]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-3.1.4-201112021740.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.1.4-201112021740.patch
CommitLineData
de151864
PK
1diff -urNp linux-3.1.4/arch/alpha/include/asm/elf.h linux-3.1.4/arch/alpha/include/asm/elf.h
2--- linux-3.1.4/arch/alpha/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
3+++ linux-3.1.4/arch/alpha/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
4@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8+#ifdef CONFIG_PAX_ASLR
9+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10+
11+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13+#endif
14+
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18diff -urNp linux-3.1.4/arch/alpha/include/asm/pgtable.h linux-3.1.4/arch/alpha/include/asm/pgtable.h
19--- linux-3.1.4/arch/alpha/include/asm/pgtable.h 2011-11-11 15:19:27.000000000 -0500
20+++ linux-3.1.4/arch/alpha/include/asm/pgtable.h 2011-11-16 18:39:07.000000000 -0500
21@@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25+
26+#ifdef CONFIG_PAX_PAGEEXEC
27+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30+#else
31+# define PAGE_SHARED_NOEXEC PAGE_SHARED
32+# define PAGE_COPY_NOEXEC PAGE_COPY
33+# define PAGE_READONLY_NOEXEC PAGE_READONLY
34+#endif
35+
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39diff -urNp linux-3.1.4/arch/alpha/kernel/module.c linux-3.1.4/arch/alpha/kernel/module.c
40--- linux-3.1.4/arch/alpha/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
41+++ linux-3.1.4/arch/alpha/kernel/module.c 2011-11-16 18:39:07.000000000 -0500
42@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46- gp = (u64)me->module_core + me->core_size - 0x8000;
47+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51diff -urNp linux-3.1.4/arch/alpha/kernel/osf_sys.c linux-3.1.4/arch/alpha/kernel/osf_sys.c
52--- linux-3.1.4/arch/alpha/kernel/osf_sys.c 2011-11-11 15:19:27.000000000 -0500
53+++ linux-3.1.4/arch/alpha/kernel/osf_sys.c 2011-11-16 18:39:07.000000000 -0500
54@@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58- if (!vma || addr + len <= vma->vm_start)
59+ if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63@@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67+#ifdef CONFIG_PAX_RANDMMAP
68+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69+#endif
70+
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74@@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79- len, limit);
80+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81+
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85diff -urNp linux-3.1.4/arch/alpha/mm/fault.c linux-3.1.4/arch/alpha/mm/fault.c
86--- linux-3.1.4/arch/alpha/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
87+++ linux-3.1.4/arch/alpha/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
88@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92+#ifdef CONFIG_PAX_PAGEEXEC
93+/*
94+ * PaX: decide what to do with offenders (regs->pc = fault address)
95+ *
96+ * returns 1 when task should be killed
97+ * 2 when patched PLT trampoline was detected
98+ * 3 when unpatched PLT trampoline was detected
99+ */
100+static int pax_handle_fetch_fault(struct pt_regs *regs)
101+{
102+
103+#ifdef CONFIG_PAX_EMUPLT
104+ int err;
105+
106+ do { /* PaX: patched PLT emulation #1 */
107+ unsigned int ldah, ldq, jmp;
108+
109+ err = get_user(ldah, (unsigned int *)regs->pc);
110+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112+
113+ if (err)
114+ break;
115+
116+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118+ jmp == 0x6BFB0000U)
119+ {
120+ unsigned long r27, addr;
121+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123+
124+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125+ err = get_user(r27, (unsigned long *)addr);
126+ if (err)
127+ break;
128+
129+ regs->r27 = r27;
130+ regs->pc = r27;
131+ return 2;
132+ }
133+ } while (0);
134+
135+ do { /* PaX: patched PLT emulation #2 */
136+ unsigned int ldah, lda, br;
137+
138+ err = get_user(ldah, (unsigned int *)regs->pc);
139+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
140+ err |= get_user(br, (unsigned int *)(regs->pc+8));
141+
142+ if (err)
143+ break;
144+
145+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
147+ (br & 0xFFE00000U) == 0xC3E00000U)
148+ {
149+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152+
153+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155+ return 2;
156+ }
157+ } while (0);
158+
159+ do { /* PaX: unpatched PLT emulation */
160+ unsigned int br;
161+
162+ err = get_user(br, (unsigned int *)regs->pc);
163+
164+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165+ unsigned int br2, ldq, nop, jmp;
166+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167+
168+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169+ err = get_user(br2, (unsigned int *)addr);
170+ err |= get_user(ldq, (unsigned int *)(addr+4));
171+ err |= get_user(nop, (unsigned int *)(addr+8));
172+ err |= get_user(jmp, (unsigned int *)(addr+12));
173+ err |= get_user(resolver, (unsigned long *)(addr+16));
174+
175+ if (err)
176+ break;
177+
178+ if (br2 == 0xC3600000U &&
179+ ldq == 0xA77B000CU &&
180+ nop == 0x47FF041FU &&
181+ jmp == 0x6B7B0000U)
182+ {
183+ regs->r28 = regs->pc+4;
184+ regs->r27 = addr+16;
185+ regs->pc = resolver;
186+ return 3;
187+ }
188+ }
189+ } while (0);
190+#endif
191+
192+ return 1;
193+}
194+
195+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
196+{
197+ unsigned long i;
198+
199+ printk(KERN_ERR "PAX: bytes at PC: ");
200+ for (i = 0; i < 5; i++) {
201+ unsigned int c;
202+ if (get_user(c, (unsigned int *)pc+i))
203+ printk(KERN_CONT "???????? ");
204+ else
205+ printk(KERN_CONT "%08x ", c);
206+ }
207+ printk("\n");
208+}
209+#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217- if (!(vma->vm_flags & VM_EXEC))
218+ if (!(vma->vm_flags & VM_EXEC)) {
219+
220+#ifdef CONFIG_PAX_PAGEEXEC
221+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222+ goto bad_area;
223+
224+ up_read(&mm->mmap_sem);
225+ switch (pax_handle_fetch_fault(regs)) {
226+
227+#ifdef CONFIG_PAX_EMUPLT
228+ case 2:
229+ case 3:
230+ return;
231+#endif
232+
233+ }
234+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235+ do_group_exit(SIGKILL);
236+#else
237 goto bad_area;
238+#endif
239+
240+ }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244diff -urNp linux-3.1.4/arch/arm/include/asm/elf.h linux-3.1.4/arch/arm/include/asm/elf.h
245--- linux-3.1.4/arch/arm/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
246+++ linux-3.1.4/arch/arm/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
247@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253+
254+#ifdef CONFIG_PAX_ASLR
255+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256+
257+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259+#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267-struct mm_struct;
268-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269-#define arch_randomize_brk arch_randomize_brk
270-
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274diff -urNp linux-3.1.4/arch/arm/include/asm/kmap_types.h linux-3.1.4/arch/arm/include/asm/kmap_types.h
275--- linux-3.1.4/arch/arm/include/asm/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
276+++ linux-3.1.4/arch/arm/include/asm/kmap_types.h 2011-11-16 18:39:07.000000000 -0500
277@@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281+ KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285diff -urNp linux-3.1.4/arch/arm/include/asm/uaccess.h linux-3.1.4/arch/arm/include/asm/uaccess.h
286--- linux-3.1.4/arch/arm/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
287+++ linux-3.1.4/arch/arm/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
288@@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292+extern void check_object_size(const void *ptr, unsigned long n, bool to);
293+
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297@@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305+
306+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307+{
308+ if (!__builtin_constant_p(n))
309+ check_object_size(to, n, false);
310+ return ___copy_from_user(to, from, n);
311+}
312+
313+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314+{
315+ if (!__builtin_constant_p(n))
316+ check_object_size(from, n, true);
317+ return ___copy_to_user(to, from, n);
318+}
319+
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327+ if ((long)n < 0)
328+ return n;
329+
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333@@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337+ if ((long)n < 0)
338+ return n;
339+
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343diff -urNp linux-3.1.4/arch/arm/kernel/armksyms.c linux-3.1.4/arch/arm/kernel/armksyms.c
344--- linux-3.1.4/arch/arm/kernel/armksyms.c 2011-11-11 15:19:27.000000000 -0500
345+++ linux-3.1.4/arch/arm/kernel/armksyms.c 2011-11-16 18:39:07.000000000 -0500
346@@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350-EXPORT_SYMBOL(__copy_from_user);
351-EXPORT_SYMBOL(__copy_to_user);
352+EXPORT_SYMBOL(___copy_from_user);
353+EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357diff -urNp linux-3.1.4/arch/arm/kernel/process.c linux-3.1.4/arch/arm/kernel/process.c
358--- linux-3.1.4/arch/arm/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
359+++ linux-3.1.4/arch/arm/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
360@@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364-#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366 #include <linux/cpuidle.h>
367
368@@ -481,12 +480,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372-unsigned long arch_randomize_brk(struct mm_struct *mm)
373-{
374- unsigned long range_end = mm->brk + 0x02000000;
375- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376-}
377-
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381diff -urNp linux-3.1.4/arch/arm/kernel/traps.c linux-3.1.4/arch/arm/kernel/traps.c
382--- linux-3.1.4/arch/arm/kernel/traps.c 2011-11-11 15:19:27.000000000 -0500
383+++ linux-3.1.4/arch/arm/kernel/traps.c 2011-11-16 18:40:08.000000000 -0500
384@@ -257,6 +257,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388+extern void gr_handle_kernel_exploit(void);
389+
390 /*
391 * This function is protected against re-entrancy.
392 */
393@@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397+
398+ gr_handle_kernel_exploit();
399+
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403diff -urNp linux-3.1.4/arch/arm/lib/copy_from_user.S linux-3.1.4/arch/arm/lib/copy_from_user.S
404--- linux-3.1.4/arch/arm/lib/copy_from_user.S 2011-11-11 15:19:27.000000000 -0500
405+++ linux-3.1.4/arch/arm/lib/copy_from_user.S 2011-11-16 18:39:07.000000000 -0500
406@@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410- * size_t __copy_from_user(void *to, const void *from, size_t n)
411+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415@@ -84,11 +84,11 @@
416
417 .text
418
419-ENTRY(__copy_from_user)
420+ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424-ENDPROC(__copy_from_user)
425+ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429diff -urNp linux-3.1.4/arch/arm/lib/copy_to_user.S linux-3.1.4/arch/arm/lib/copy_to_user.S
430--- linux-3.1.4/arch/arm/lib/copy_to_user.S 2011-11-11 15:19:27.000000000 -0500
431+++ linux-3.1.4/arch/arm/lib/copy_to_user.S 2011-11-16 18:39:07.000000000 -0500
432@@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436- * size_t __copy_to_user(void *to, const void *from, size_t n)
437+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441@@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445-WEAK(__copy_to_user)
446+WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450-ENDPROC(__copy_to_user)
451+ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455diff -urNp linux-3.1.4/arch/arm/lib/uaccess.S linux-3.1.4/arch/arm/lib/uaccess.S
456--- linux-3.1.4/arch/arm/lib/uaccess.S 2011-11-11 15:19:27.000000000 -0500
457+++ linux-3.1.4/arch/arm/lib/uaccess.S 2011-11-16 18:39:07.000000000 -0500
458@@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471-ENTRY(__copy_to_user)
472+ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480-ENDPROC(__copy_to_user)
481+ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497-ENTRY(__copy_from_user)
498+ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506-ENDPROC(__copy_from_user)
507+ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511diff -urNp linux-3.1.4/arch/arm/lib/uaccess_with_memcpy.c linux-3.1.4/arch/arm/lib/uaccess_with_memcpy.c
512--- linux-3.1.4/arch/arm/lib/uaccess_with_memcpy.c 2011-11-11 15:19:27.000000000 -0500
513+++ linux-3.1.4/arch/arm/lib/uaccess_with_memcpy.c 2011-11-16 18:39:07.000000000 -0500
514@@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518-__copy_to_user(void __user *to, const void *from, unsigned long n)
519+___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523diff -urNp linux-3.1.4/arch/arm/mach-ux500/mbox-db5500.c linux-3.1.4/arch/arm/mach-ux500/mbox-db5500.c
524--- linux-3.1.4/arch/arm/mach-ux500/mbox-db5500.c 2011-11-11 15:19:27.000000000 -0500
525+++ linux-3.1.4/arch/arm/mach-ux500/mbox-db5500.c 2011-11-16 18:40:08.000000000 -0500
526@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535diff -urNp linux-3.1.4/arch/arm/mm/fault.c linux-3.1.4/arch/arm/mm/fault.c
536--- linux-3.1.4/arch/arm/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
537+++ linux-3.1.4/arch/arm/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
538@@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542+#ifdef CONFIG_PAX_PAGEEXEC
543+ if (fsr & FSR_LNX_PF) {
544+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545+ do_group_exit(SIGKILL);
546+ }
547+#endif
548+
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552@@ -383,6 +390,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556+#ifdef CONFIG_PAX_PAGEEXEC
557+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
558+{
559+ long i;
560+
561+ printk(KERN_ERR "PAX: bytes at PC: ");
562+ for (i = 0; i < 20; i++) {
563+ unsigned char c;
564+ if (get_user(c, (__force unsigned char __user *)pc+i))
565+ printk(KERN_CONT "?? ");
566+ else
567+ printk(KERN_CONT "%02x ", c);
568+ }
569+ printk("\n");
570+
571+ printk(KERN_ERR "PAX: bytes at SP-4: ");
572+ for (i = -1; i < 20; i++) {
573+ unsigned long c;
574+ if (get_user(c, (__force unsigned long __user *)sp+i))
575+ printk(KERN_CONT "???????? ");
576+ else
577+ printk(KERN_CONT "%08lx ", c);
578+ }
579+ printk("\n");
580+}
581+#endif
582+
583 /*
584 * First Level Translation Fault Handler
585 *
586diff -urNp linux-3.1.4/arch/arm/mm/mmap.c linux-3.1.4/arch/arm/mm/mmap.c
587--- linux-3.1.4/arch/arm/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
588+++ linux-3.1.4/arch/arm/mm/mmap.c 2011-11-16 18:39:07.000000000 -0500
589@@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593+#ifdef CONFIG_PAX_RANDMMAP
594+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595+#endif
596+
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600@@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604- if (TASK_SIZE - len >= addr &&
605- (!vma || addr + len <= vma->vm_start))
606+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610- start_addr = addr = mm->free_area_cache;
611+ start_addr = addr = mm->free_area_cache;
612 } else {
613- start_addr = addr = TASK_UNMAPPED_BASE;
614- mm->cached_hole_size = 0;
615+ start_addr = addr = mm->mmap_base;
616+ mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620@@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624- if (start_addr != TASK_UNMAPPED_BASE) {
625- start_addr = addr = TASK_UNMAPPED_BASE;
626+ if (start_addr != mm->mmap_base) {
627+ start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633- if (!vma || addr + len <= vma->vm_start) {
634+ if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638diff -urNp linux-3.1.4/arch/avr32/include/asm/elf.h linux-3.1.4/arch/avr32/include/asm/elf.h
639--- linux-3.1.4/arch/avr32/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
640+++ linux-3.1.4/arch/avr32/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
641@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648+#ifdef CONFIG_PAX_ASLR
649+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650+
651+#define PAX_DELTA_MMAP_LEN 15
652+#define PAX_DELTA_STACK_LEN 15
653+#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657diff -urNp linux-3.1.4/arch/avr32/include/asm/kmap_types.h linux-3.1.4/arch/avr32/include/asm/kmap_types.h
658--- linux-3.1.4/arch/avr32/include/asm/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
659+++ linux-3.1.4/arch/avr32/include/asm/kmap_types.h 2011-11-16 18:39:07.000000000 -0500
660@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664-D(14) KM_TYPE_NR
665+D(14) KM_CLEARPAGE,
666+D(15) KM_TYPE_NR
667 };
668
669 #undef D
670diff -urNp linux-3.1.4/arch/avr32/mm/fault.c linux-3.1.4/arch/avr32/mm/fault.c
671--- linux-3.1.4/arch/avr32/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
672+++ linux-3.1.4/arch/avr32/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
673@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677+#ifdef CONFIG_PAX_PAGEEXEC
678+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
679+{
680+ unsigned long i;
681+
682+ printk(KERN_ERR "PAX: bytes at PC: ");
683+ for (i = 0; i < 20; i++) {
684+ unsigned char c;
685+ if (get_user(c, (unsigned char *)pc+i))
686+ printk(KERN_CONT "???????? ");
687+ else
688+ printk(KERN_CONT "%02x ", c);
689+ }
690+ printk("\n");
691+}
692+#endif
693+
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697@@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701+
702+#ifdef CONFIG_PAX_PAGEEXEC
703+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706+ do_group_exit(SIGKILL);
707+ }
708+ }
709+#endif
710+
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714diff -urNp linux-3.1.4/arch/frv/include/asm/kmap_types.h linux-3.1.4/arch/frv/include/asm/kmap_types.h
715--- linux-3.1.4/arch/frv/include/asm/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
716+++ linux-3.1.4/arch/frv/include/asm/kmap_types.h 2011-11-16 18:39:07.000000000 -0500
717@@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721+ KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725diff -urNp linux-3.1.4/arch/frv/mm/elf-fdpic.c linux-3.1.4/arch/frv/mm/elf-fdpic.c
726--- linux-3.1.4/arch/frv/mm/elf-fdpic.c 2011-11-11 15:19:27.000000000 -0500
727+++ linux-3.1.4/arch/frv/mm/elf-fdpic.c 2011-11-16 18:39:07.000000000 -0500
728@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732- if (TASK_SIZE - len >= addr &&
733- (!vma || addr + len <= vma->vm_start))
734+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742- if (addr + len <= vma->vm_start)
743+ if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751- if (addr + len <= vma->vm_start)
752+ if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756diff -urNp linux-3.1.4/arch/ia64/include/asm/elf.h linux-3.1.4/arch/ia64/include/asm/elf.h
757--- linux-3.1.4/arch/ia64/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
758+++ linux-3.1.4/arch/ia64/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
759@@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763+#ifdef CONFIG_PAX_ASLR
764+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765+
766+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768+#endif
769+
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773diff -urNp linux-3.1.4/arch/ia64/include/asm/pgtable.h linux-3.1.4/arch/ia64/include/asm/pgtable.h
774--- linux-3.1.4/arch/ia64/include/asm/pgtable.h 2011-11-11 15:19:27.000000000 -0500
775+++ linux-3.1.4/arch/ia64/include/asm/pgtable.h 2011-11-16 18:39:07.000000000 -0500
776@@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780-
781+#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785@@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789+
790+#ifdef CONFIG_PAX_PAGEEXEC
791+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794+#else
795+# define PAGE_SHARED_NOEXEC PAGE_SHARED
796+# define PAGE_READONLY_NOEXEC PAGE_READONLY
797+# define PAGE_COPY_NOEXEC PAGE_COPY
798+#endif
799+
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803diff -urNp linux-3.1.4/arch/ia64/include/asm/spinlock.h linux-3.1.4/arch/ia64/include/asm/spinlock.h
804--- linux-3.1.4/arch/ia64/include/asm/spinlock.h 2011-11-11 15:19:27.000000000 -0500
805+++ linux-3.1.4/arch/ia64/include/asm/spinlock.h 2011-11-16 18:39:07.000000000 -0500
806@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815diff -urNp linux-3.1.4/arch/ia64/include/asm/uaccess.h linux-3.1.4/arch/ia64/include/asm/uaccess.h
816--- linux-3.1.4/arch/ia64/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
817+++ linux-3.1.4/arch/ia64/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
818@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822- if (__access_ok(__cu_to, __cu_len, get_fs())) \
823+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831- if (__access_ok(__cu_from, __cu_len, get_fs())) \
832+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836diff -urNp linux-3.1.4/arch/ia64/kernel/module.c linux-3.1.4/arch/ia64/kernel/module.c
837--- linux-3.1.4/arch/ia64/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
838+++ linux-3.1.4/arch/ia64/kernel/module.c 2011-11-16 18:39:07.000000000 -0500
839@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843- if (mod && mod->arch.init_unw_table &&
844- module_region == mod->module_init) {
845+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853+in_init_rx (const struct module *mod, uint64_t addr)
854+{
855+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856+}
857+
858+static inline int
859+in_init_rw (const struct module *mod, uint64_t addr)
860+{
861+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862+}
863+
864+static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867- return addr - (uint64_t) mod->module_init < mod->init_size;
868+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869+}
870+
871+static inline int
872+in_core_rx (const struct module *mod, uint64_t addr)
873+{
874+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875+}
876+
877+static inline int
878+in_core_rw (const struct module *mod, uint64_t addr)
879+{
880+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886- return addr - (uint64_t) mod->module_core < mod->core_size;
887+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896+ if (in_init_rx(mod, val))
897+ val -= (uint64_t) mod->module_init_rx;
898+ else if (in_init_rw(mod, val))
899+ val -= (uint64_t) mod->module_init_rw;
900+ else if (in_core_rx(mod, val))
901+ val -= (uint64_t) mod->module_core_rx;
902+ else if (in_core_rw(mod, val))
903+ val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911- if (mod->core_size > MAX_LTOFF)
912+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917- gp = mod->core_size - MAX_LTOFF / 2;
918+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920- gp = mod->core_size / 2;
921- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927diff -urNp linux-3.1.4/arch/ia64/kernel/sys_ia64.c linux-3.1.4/arch/ia64/kernel/sys_ia64.c
928--- linux-3.1.4/arch/ia64/kernel/sys_ia64.c 2011-11-11 15:19:27.000000000 -0500
929+++ linux-3.1.4/arch/ia64/kernel/sys_ia64.c 2011-11-16 18:39:07.000000000 -0500
930@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934+
935+#ifdef CONFIG_PAX_RANDMMAP
936+ if (mm->pax_flags & MF_PAX_RANDMMAP)
937+ addr = mm->free_area_cache;
938+ else
939+#endif
940+
941 if (!addr)
942 addr = mm->free_area_cache;
943
944@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948- if (start_addr != TASK_UNMAPPED_BASE) {
949+ if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951- addr = TASK_UNMAPPED_BASE;
952+ addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957- if (!vma || addr + len <= vma->vm_start) {
958+ if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962diff -urNp linux-3.1.4/arch/ia64/kernel/vmlinux.lds.S linux-3.1.4/arch/ia64/kernel/vmlinux.lds.S
963--- linux-3.1.4/arch/ia64/kernel/vmlinux.lds.S 2011-11-11 15:19:27.000000000 -0500
964+++ linux-3.1.4/arch/ia64/kernel/vmlinux.lds.S 2011-11-16 18:39:07.000000000 -0500
965@@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969- __phys_per_cpu_start = __per_cpu_load;
970+ __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974diff -urNp linux-3.1.4/arch/ia64/mm/fault.c linux-3.1.4/arch/ia64/mm/fault.c
975--- linux-3.1.4/arch/ia64/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
976+++ linux-3.1.4/arch/ia64/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
977@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981+#ifdef CONFIG_PAX_PAGEEXEC
982+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
983+{
984+ unsigned long i;
985+
986+ printk(KERN_ERR "PAX: bytes at PC: ");
987+ for (i = 0; i < 8; i++) {
988+ unsigned int c;
989+ if (get_user(c, (unsigned int *)pc+i))
990+ printk(KERN_CONT "???????? ");
991+ else
992+ printk(KERN_CONT "%08x ", c);
993+ }
994+ printk("\n");
995+}
996+#endif
997+
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005- if ((vma->vm_flags & mask) != mask)
1006+ if ((vma->vm_flags & mask) != mask) {
1007+
1008+#ifdef CONFIG_PAX_PAGEEXEC
1009+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011+ goto bad_area;
1012+
1013+ up_read(&mm->mmap_sem);
1014+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015+ do_group_exit(SIGKILL);
1016+ }
1017+#endif
1018+
1019 goto bad_area;
1020
1021+ }
1022+
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026diff -urNp linux-3.1.4/arch/ia64/mm/hugetlbpage.c linux-3.1.4/arch/ia64/mm/hugetlbpage.c
1027--- linux-3.1.4/arch/ia64/mm/hugetlbpage.c 2011-11-11 15:19:27.000000000 -0500
1028+++ linux-3.1.4/arch/ia64/mm/hugetlbpage.c 2011-11-16 18:39:07.000000000 -0500
1029@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033- if (!vmm || (addr + len) <= vmm->vm_start)
1034+ if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038diff -urNp linux-3.1.4/arch/ia64/mm/init.c linux-3.1.4/arch/ia64/mm/init.c
1039--- linux-3.1.4/arch/ia64/mm/init.c 2011-11-11 15:19:27.000000000 -0500
1040+++ linux-3.1.4/arch/ia64/mm/init.c 2011-11-16 18:39:07.000000000 -0500
1041@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045+
1046+#ifdef CONFIG_PAX_PAGEEXEC
1047+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048+ vma->vm_flags &= ~VM_EXEC;
1049+
1050+#ifdef CONFIG_PAX_MPROTECT
1051+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052+ vma->vm_flags &= ~VM_MAYEXEC;
1053+#endif
1054+
1055+ }
1056+#endif
1057+
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061diff -urNp linux-3.1.4/arch/m32r/lib/usercopy.c linux-3.1.4/arch/m32r/lib/usercopy.c
1062--- linux-3.1.4/arch/m32r/lib/usercopy.c 2011-11-11 15:19:27.000000000 -0500
1063+++ linux-3.1.4/arch/m32r/lib/usercopy.c 2011-11-16 18:39:07.000000000 -0500
1064@@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068+ if ((long)n < 0)
1069+ return n;
1070+
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078+ if ((long)n < 0)
1079+ return n;
1080+
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084diff -urNp linux-3.1.4/arch/mips/include/asm/elf.h linux-3.1.4/arch/mips/include/asm/elf.h
1085--- linux-3.1.4/arch/mips/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
1086+++ linux-3.1.4/arch/mips/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
1087@@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091+#ifdef CONFIG_PAX_ASLR
1092+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093+
1094+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096+#endif
1097+
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103-struct mm_struct;
1104-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105-#define arch_randomize_brk arch_randomize_brk
1106-
1107 #endif /* _ASM_ELF_H */
1108diff -urNp linux-3.1.4/arch/mips/include/asm/page.h linux-3.1.4/arch/mips/include/asm/page.h
1109--- linux-3.1.4/arch/mips/include/asm/page.h 2011-11-11 15:19:27.000000000 -0500
1110+++ linux-3.1.4/arch/mips/include/asm/page.h 2011-11-16 18:39:07.000000000 -0500
1111@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120diff -urNp linux-3.1.4/arch/mips/include/asm/system.h linux-3.1.4/arch/mips/include/asm/system.h
1121--- linux-3.1.4/arch/mips/include/asm/system.h 2011-11-11 15:19:27.000000000 -0500
1122+++ linux-3.1.4/arch/mips/include/asm/system.h 2011-11-16 18:39:07.000000000 -0500
1123@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127-extern unsigned long arch_align_stack(unsigned long sp);
1128+#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131diff -urNp linux-3.1.4/arch/mips/kernel/binfmt_elfn32.c linux-3.1.4/arch/mips/kernel/binfmt_elfn32.c
1132--- linux-3.1.4/arch/mips/kernel/binfmt_elfn32.c 2011-11-11 15:19:27.000000000 -0500
1133+++ linux-3.1.4/arch/mips/kernel/binfmt_elfn32.c 2011-11-16 18:39:07.000000000 -0500
1134@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138+#ifdef CONFIG_PAX_ASLR
1139+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140+
1141+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143+#endif
1144+
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148diff -urNp linux-3.1.4/arch/mips/kernel/binfmt_elfo32.c linux-3.1.4/arch/mips/kernel/binfmt_elfo32.c
1149--- linux-3.1.4/arch/mips/kernel/binfmt_elfo32.c 2011-11-11 15:19:27.000000000 -0500
1150+++ linux-3.1.4/arch/mips/kernel/binfmt_elfo32.c 2011-11-16 18:39:07.000000000 -0500
1151@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155+#ifdef CONFIG_PAX_ASLR
1156+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157+
1158+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160+#endif
1161+
1162 #include <asm/processor.h>
1163
1164 /*
1165diff -urNp linux-3.1.4/arch/mips/kernel/process.c linux-3.1.4/arch/mips/kernel/process.c
1166--- linux-3.1.4/arch/mips/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
1167+++ linux-3.1.4/arch/mips/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
1168@@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172-
1173-/*
1174- * Don't forget that the stack pointer must be aligned on a 8 bytes
1175- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176- */
1177-unsigned long arch_align_stack(unsigned long sp)
1178-{
1179- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180- sp -= get_random_int() & ~PAGE_MASK;
1181-
1182- return sp & ALMASK;
1183-}
1184diff -urNp linux-3.1.4/arch/mips/mm/fault.c linux-3.1.4/arch/mips/mm/fault.c
1185--- linux-3.1.4/arch/mips/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
1186+++ linux-3.1.4/arch/mips/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
1187@@ -28,6 +28,23 @@
1188 #include <asm/highmem.h> /* For VMALLOC_END */
1189 #include <linux/kdebug.h>
1190
1191+#ifdef CONFIG_PAX_PAGEEXEC
1192+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1193+{
1194+ unsigned long i;
1195+
1196+ printk(KERN_ERR "PAX: bytes at PC: ");
1197+ for (i = 0; i < 5; i++) {
1198+ unsigned int c;
1199+ if (get_user(c, (unsigned int *)pc+i))
1200+ printk(KERN_CONT "???????? ");
1201+ else
1202+ printk(KERN_CONT "%08x ", c);
1203+ }
1204+ printk("\n");
1205+}
1206+#endif
1207+
1208 /*
1209 * This routine handles page faults. It determines the address,
1210 * and the problem, and then passes it off to one of the appropriate
1211diff -urNp linux-3.1.4/arch/mips/mm/mmap.c linux-3.1.4/arch/mips/mm/mmap.c
1212--- linux-3.1.4/arch/mips/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
1213+++ linux-3.1.4/arch/mips/mm/mmap.c 2011-11-16 18:39:07.000000000 -0500
1214@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_a
1215 do_color_align = 1;
1216
1217 /* requesting a specific address */
1218+
1219+#ifdef CONFIG_PAX_RANDMMAP
1220+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1221+#endif
1222+
1223 if (addr) {
1224 if (do_color_align)
1225 addr = COLOUR_ALIGN(addr, pgoff);
1226@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_a
1227 addr = PAGE_ALIGN(addr);
1228
1229 vma = find_vma(mm, addr);
1230- if (TASK_SIZE - len >= addr &&
1231- (!vma || addr + len <= vma->vm_start))
1232+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1233 return addr;
1234 }
1235
1236@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_a
1237 /* At this point: (!vma || addr < vma->vm_end). */
1238 if (TASK_SIZE - len < addr)
1239 return -ENOMEM;
1240- if (!vma || addr + len <= vma->vm_start)
1241+ if (check_heap_stack_gap(vmm, addr, len))
1242 return addr;
1243 addr = vma->vm_end;
1244 if (do_color_align)
1245@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_a
1246 /* make sure it can fit in the remaining address space */
1247 if (likely(addr > len)) {
1248 vma = find_vma(mm, addr - len);
1249- if (!vma || addr <= vma->vm_start) {
1250+ if (check_heap_stack_gap(vmm, addr - len, len))
1251 /* cache the address as a hint for next time */
1252 return mm->free_area_cache = addr - len;
1253 }
1254@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_a
1255 * return with success:
1256 */
1257 vma = find_vma(mm, addr);
1258- if (likely(!vma || addr + len <= vma->vm_start)) {
1259+ if (check_heap_stack_gap(vmm, addr, len)) {
1260 /* cache the address as a hint for next time */
1261 return mm->free_area_cache = addr;
1262 }
1263@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_str
1264 mm->unmap_area = arch_unmap_area_topdown;
1265 }
1266 }
1267-
1268-static inline unsigned long brk_rnd(void)
1269-{
1270- unsigned long rnd = get_random_int();
1271-
1272- rnd = rnd << PAGE_SHIFT;
1273- /* 8MB for 32bit, 256MB for 64bit */
1274- if (TASK_IS_32BIT_ADDR)
1275- rnd = rnd & 0x7ffffful;
1276- else
1277- rnd = rnd & 0xffffffful;
1278-
1279- return rnd;
1280-}
1281-
1282-unsigned long arch_randomize_brk(struct mm_struct *mm)
1283-{
1284- unsigned long base = mm->brk;
1285- unsigned long ret;
1286-
1287- ret = PAGE_ALIGN(base + brk_rnd());
1288-
1289- if (ret < mm->brk)
1290- return mm->brk;
1291-
1292- return ret;
1293-}
1294diff -urNp linux-3.1.4/arch/parisc/include/asm/elf.h linux-3.1.4/arch/parisc/include/asm/elf.h
1295--- linux-3.1.4/arch/parisc/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
1296+++ linux-3.1.4/arch/parisc/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
1297@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1298
1299 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1300
1301+#ifdef CONFIG_PAX_ASLR
1302+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1303+
1304+#define PAX_DELTA_MMAP_LEN 16
1305+#define PAX_DELTA_STACK_LEN 16
1306+#endif
1307+
1308 /* This yields a mask that user programs can use to figure out what
1309 instruction set this CPU supports. This could be done in user space,
1310 but it's not easy, and we've already done it here. */
1311diff -urNp linux-3.1.4/arch/parisc/include/asm/pgtable.h linux-3.1.4/arch/parisc/include/asm/pgtable.h
1312--- linux-3.1.4/arch/parisc/include/asm/pgtable.h 2011-11-11 15:19:27.000000000 -0500
1313+++ linux-3.1.4/arch/parisc/include/asm/pgtable.h 2011-11-16 18:39:07.000000000 -0500
1314@@ -210,6 +210,17 @@ struct vm_area_struct;
1315 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1316 #define PAGE_COPY PAGE_EXECREAD
1317 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1318+
1319+#ifdef CONFIG_PAX_PAGEEXEC
1320+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1321+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1322+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1323+#else
1324+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1325+# define PAGE_COPY_NOEXEC PAGE_COPY
1326+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1327+#endif
1328+
1329 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1330 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1331 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1332diff -urNp linux-3.1.4/arch/parisc/kernel/module.c linux-3.1.4/arch/parisc/kernel/module.c
1333--- linux-3.1.4/arch/parisc/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
1334+++ linux-3.1.4/arch/parisc/kernel/module.c 2011-11-16 18:39:07.000000000 -0500
1335@@ -98,16 +98,38 @@
1336
1337 /* three functions to determine where in the module core
1338 * or init pieces the location is */
1339+static inline int in_init_rx(struct module *me, void *loc)
1340+{
1341+ return (loc >= me->module_init_rx &&
1342+ loc < (me->module_init_rx + me->init_size_rx));
1343+}
1344+
1345+static inline int in_init_rw(struct module *me, void *loc)
1346+{
1347+ return (loc >= me->module_init_rw &&
1348+ loc < (me->module_init_rw + me->init_size_rw));
1349+}
1350+
1351 static inline int in_init(struct module *me, void *loc)
1352 {
1353- return (loc >= me->module_init &&
1354- loc <= (me->module_init + me->init_size));
1355+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1356+}
1357+
1358+static inline int in_core_rx(struct module *me, void *loc)
1359+{
1360+ return (loc >= me->module_core_rx &&
1361+ loc < (me->module_core_rx + me->core_size_rx));
1362+}
1363+
1364+static inline int in_core_rw(struct module *me, void *loc)
1365+{
1366+ return (loc >= me->module_core_rw &&
1367+ loc < (me->module_core_rw + me->core_size_rw));
1368 }
1369
1370 static inline int in_core(struct module *me, void *loc)
1371 {
1372- return (loc >= me->module_core &&
1373- loc <= (me->module_core + me->core_size));
1374+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1375 }
1376
1377 static inline int in_local(struct module *me, void *loc)
1378@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_
1379 }
1380
1381 /* align things a bit */
1382- me->core_size = ALIGN(me->core_size, 16);
1383- me->arch.got_offset = me->core_size;
1384- me->core_size += gots * sizeof(struct got_entry);
1385-
1386- me->core_size = ALIGN(me->core_size, 16);
1387- me->arch.fdesc_offset = me->core_size;
1388- me->core_size += fdescs * sizeof(Elf_Fdesc);
1389+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1390+ me->arch.got_offset = me->core_size_rw;
1391+ me->core_size_rw += gots * sizeof(struct got_entry);
1392+
1393+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1394+ me->arch.fdesc_offset = me->core_size_rw;
1395+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1396
1397 me->arch.got_max = gots;
1398 me->arch.fdesc_max = fdescs;
1399@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module
1400
1401 BUG_ON(value == 0);
1402
1403- got = me->module_core + me->arch.got_offset;
1404+ got = me->module_core_rw + me->arch.got_offset;
1405 for (i = 0; got[i].addr; i++)
1406 if (got[i].addr == value)
1407 goto out;
1408@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module
1409 #ifdef CONFIG_64BIT
1410 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1411 {
1412- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1413+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1414
1415 if (!value) {
1416 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1417@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module
1418
1419 /* Create new one */
1420 fdesc->addr = value;
1421- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1422+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1423 return (Elf_Addr)fdesc;
1424 }
1425 #endif /* CONFIG_64BIT */
1426@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
1427
1428 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1429 end = table + sechdrs[me->arch.unwind_section].sh_size;
1430- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1431+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1432
1433 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1434 me->arch.unwind_section, table, end, gp);
1435diff -urNp linux-3.1.4/arch/parisc/kernel/sys_parisc.c linux-3.1.4/arch/parisc/kernel/sys_parisc.c
1436--- linux-3.1.4/arch/parisc/kernel/sys_parisc.c 2011-11-11 15:19:27.000000000 -0500
1437+++ linux-3.1.4/arch/parisc/kernel/sys_parisc.c 2011-11-16 18:39:07.000000000 -0500
1438@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1439 /* At this point: (!vma || addr < vma->vm_end). */
1440 if (TASK_SIZE - len < addr)
1441 return -ENOMEM;
1442- if (!vma || addr + len <= vma->vm_start)
1443+ if (check_heap_stack_gap(vma, addr, len))
1444 return addr;
1445 addr = vma->vm_end;
1446 }
1447@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1448 /* At this point: (!vma || addr < vma->vm_end). */
1449 if (TASK_SIZE - len < addr)
1450 return -ENOMEM;
1451- if (!vma || addr + len <= vma->vm_start)
1452+ if (check_heap_stack_gap(vma, addr, len))
1453 return addr;
1454 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1455 if (addr < vma->vm_end) /* handle wraparound */
1456@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1457 if (flags & MAP_FIXED)
1458 return addr;
1459 if (!addr)
1460- addr = TASK_UNMAPPED_BASE;
1461+ addr = current->mm->mmap_base;
1462
1463 if (filp) {
1464 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1465diff -urNp linux-3.1.4/arch/parisc/kernel/traps.c linux-3.1.4/arch/parisc/kernel/traps.c
1466--- linux-3.1.4/arch/parisc/kernel/traps.c 2011-11-11 15:19:27.000000000 -0500
1467+++ linux-3.1.4/arch/parisc/kernel/traps.c 2011-11-16 18:39:07.000000000 -0500
1468@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1469
1470 down_read(&current->mm->mmap_sem);
1471 vma = find_vma(current->mm,regs->iaoq[0]);
1472- if (vma && (regs->iaoq[0] >= vma->vm_start)
1473- && (vma->vm_flags & VM_EXEC)) {
1474-
1475+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1476 fault_address = regs->iaoq[0];
1477 fault_space = regs->iasq[0];
1478
1479diff -urNp linux-3.1.4/arch/parisc/mm/fault.c linux-3.1.4/arch/parisc/mm/fault.c
1480--- linux-3.1.4/arch/parisc/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
1481+++ linux-3.1.4/arch/parisc/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
1482@@ -15,6 +15,7 @@
1483 #include <linux/sched.h>
1484 #include <linux/interrupt.h>
1485 #include <linux/module.h>
1486+#include <linux/unistd.h>
1487
1488 #include <asm/uaccess.h>
1489 #include <asm/traps.h>
1490@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1491 static unsigned long
1492 parisc_acctyp(unsigned long code, unsigned int inst)
1493 {
1494- if (code == 6 || code == 16)
1495+ if (code == 6 || code == 7 || code == 16)
1496 return VM_EXEC;
1497
1498 switch (inst & 0xf0000000) {
1499@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1500 }
1501 #endif
1502
1503+#ifdef CONFIG_PAX_PAGEEXEC
1504+/*
1505+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1506+ *
1507+ * returns 1 when task should be killed
1508+ * 2 when rt_sigreturn trampoline was detected
1509+ * 3 when unpatched PLT trampoline was detected
1510+ */
1511+static int pax_handle_fetch_fault(struct pt_regs *regs)
1512+{
1513+
1514+#ifdef CONFIG_PAX_EMUPLT
1515+ int err;
1516+
1517+ do { /* PaX: unpatched PLT emulation */
1518+ unsigned int bl, depwi;
1519+
1520+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1521+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1522+
1523+ if (err)
1524+ break;
1525+
1526+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1527+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1528+
1529+ err = get_user(ldw, (unsigned int *)addr);
1530+ err |= get_user(bv, (unsigned int *)(addr+4));
1531+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1532+
1533+ if (err)
1534+ break;
1535+
1536+ if (ldw == 0x0E801096U &&
1537+ bv == 0xEAC0C000U &&
1538+ ldw2 == 0x0E881095U)
1539+ {
1540+ unsigned int resolver, map;
1541+
1542+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1543+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1544+ if (err)
1545+ break;
1546+
1547+ regs->gr[20] = instruction_pointer(regs)+8;
1548+ regs->gr[21] = map;
1549+ regs->gr[22] = resolver;
1550+ regs->iaoq[0] = resolver | 3UL;
1551+ regs->iaoq[1] = regs->iaoq[0] + 4;
1552+ return 3;
1553+ }
1554+ }
1555+ } while (0);
1556+#endif
1557+
1558+#ifdef CONFIG_PAX_EMUTRAMP
1559+
1560+#ifndef CONFIG_PAX_EMUSIGRT
1561+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1562+ return 1;
1563+#endif
1564+
1565+ do { /* PaX: rt_sigreturn emulation */
1566+ unsigned int ldi1, ldi2, bel, nop;
1567+
1568+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1569+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1570+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1571+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1572+
1573+ if (err)
1574+ break;
1575+
1576+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1577+ ldi2 == 0x3414015AU &&
1578+ bel == 0xE4008200U &&
1579+ nop == 0x08000240U)
1580+ {
1581+ regs->gr[25] = (ldi1 & 2) >> 1;
1582+ regs->gr[20] = __NR_rt_sigreturn;
1583+ regs->gr[31] = regs->iaoq[1] + 16;
1584+ regs->sr[0] = regs->iasq[1];
1585+ regs->iaoq[0] = 0x100UL;
1586+ regs->iaoq[1] = regs->iaoq[0] + 4;
1587+ regs->iasq[0] = regs->sr[2];
1588+ regs->iasq[1] = regs->sr[2];
1589+ return 2;
1590+ }
1591+ } while (0);
1592+#endif
1593+
1594+ return 1;
1595+}
1596+
1597+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1598+{
1599+ unsigned long i;
1600+
1601+ printk(KERN_ERR "PAX: bytes at PC: ");
1602+ for (i = 0; i < 5; i++) {
1603+ unsigned int c;
1604+ if (get_user(c, (unsigned int *)pc+i))
1605+ printk(KERN_CONT "???????? ");
1606+ else
1607+ printk(KERN_CONT "%08x ", c);
1608+ }
1609+ printk("\n");
1610+}
1611+#endif
1612+
1613 int fixup_exception(struct pt_regs *regs)
1614 {
1615 const struct exception_table_entry *fix;
1616@@ -192,8 +303,33 @@ good_area:
1617
1618 acc_type = parisc_acctyp(code,regs->iir);
1619
1620- if ((vma->vm_flags & acc_type) != acc_type)
1621+ if ((vma->vm_flags & acc_type) != acc_type) {
1622+
1623+#ifdef CONFIG_PAX_PAGEEXEC
1624+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1625+ (address & ~3UL) == instruction_pointer(regs))
1626+ {
1627+ up_read(&mm->mmap_sem);
1628+ switch (pax_handle_fetch_fault(regs)) {
1629+
1630+#ifdef CONFIG_PAX_EMUPLT
1631+ case 3:
1632+ return;
1633+#endif
1634+
1635+#ifdef CONFIG_PAX_EMUTRAMP
1636+ case 2:
1637+ return;
1638+#endif
1639+
1640+ }
1641+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1642+ do_group_exit(SIGKILL);
1643+ }
1644+#endif
1645+
1646 goto bad_area;
1647+ }
1648
1649 /*
1650 * If for any reason at all we couldn't handle the fault, make
1651diff -urNp linux-3.1.4/arch/powerpc/include/asm/elf.h linux-3.1.4/arch/powerpc/include/asm/elf.h
1652--- linux-3.1.4/arch/powerpc/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
1653+++ linux-3.1.4/arch/powerpc/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
1654@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1655 the loader. We need to make sure that it is out of the way of the program
1656 that it will "exec", and that there is sufficient room for the brk. */
1657
1658-extern unsigned long randomize_et_dyn(unsigned long base);
1659-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1660+#define ELF_ET_DYN_BASE (0x20000000)
1661+
1662+#ifdef CONFIG_PAX_ASLR
1663+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1664+
1665+#ifdef __powerpc64__
1666+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1667+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1668+#else
1669+#define PAX_DELTA_MMAP_LEN 15
1670+#define PAX_DELTA_STACK_LEN 15
1671+#endif
1672+#endif
1673
1674 /*
1675 * Our registers are always unsigned longs, whether we're a 32 bit
1676@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1677 (0x7ff >> (PAGE_SHIFT - 12)) : \
1678 (0x3ffff >> (PAGE_SHIFT - 12)))
1679
1680-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1681-#define arch_randomize_brk arch_randomize_brk
1682-
1683 #endif /* __KERNEL__ */
1684
1685 /*
1686diff -urNp linux-3.1.4/arch/powerpc/include/asm/kmap_types.h linux-3.1.4/arch/powerpc/include/asm/kmap_types.h
1687--- linux-3.1.4/arch/powerpc/include/asm/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
1688+++ linux-3.1.4/arch/powerpc/include/asm/kmap_types.h 2011-11-16 18:39:07.000000000 -0500
1689@@ -27,6 +27,7 @@ enum km_type {
1690 KM_PPC_SYNC_PAGE,
1691 KM_PPC_SYNC_ICACHE,
1692 KM_KDB,
1693+ KM_CLEARPAGE,
1694 KM_TYPE_NR
1695 };
1696
1697diff -urNp linux-3.1.4/arch/powerpc/include/asm/mman.h linux-3.1.4/arch/powerpc/include/asm/mman.h
1698--- linux-3.1.4/arch/powerpc/include/asm/mman.h 2011-11-11 15:19:27.000000000 -0500
1699+++ linux-3.1.4/arch/powerpc/include/asm/mman.h 2011-11-16 18:39:07.000000000 -0500
1700@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm
1701 }
1702 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
1703
1704-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
1705+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
1706 {
1707 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
1708 }
1709diff -urNp linux-3.1.4/arch/powerpc/include/asm/page_64.h linux-3.1.4/arch/powerpc/include/asm/page_64.h
1710--- linux-3.1.4/arch/powerpc/include/asm/page_64.h 2011-11-11 15:19:27.000000000 -0500
1711+++ linux-3.1.4/arch/powerpc/include/asm/page_64.h 2011-11-16 18:39:07.000000000 -0500
1712@@ -155,15 +155,18 @@ do { \
1713 * stack by default, so in the absence of a PT_GNU_STACK program header
1714 * we turn execute permission off.
1715 */
1716-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1717- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1718+#define VM_STACK_DEFAULT_FLAGS32 \
1719+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1720+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1721
1722 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1723 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1724
1725+#ifndef CONFIG_PAX_PAGEEXEC
1726 #define VM_STACK_DEFAULT_FLAGS \
1727 (is_32bit_task() ? \
1728 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1729+#endif
1730
1731 #include <asm-generic/getorder.h>
1732
1733diff -urNp linux-3.1.4/arch/powerpc/include/asm/page.h linux-3.1.4/arch/powerpc/include/asm/page.h
1734--- linux-3.1.4/arch/powerpc/include/asm/page.h 2011-11-11 15:19:27.000000000 -0500
1735+++ linux-3.1.4/arch/powerpc/include/asm/page.h 2011-11-16 18:39:07.000000000 -0500
1736@@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1737 * and needs to be executable. This means the whole heap ends
1738 * up being executable.
1739 */
1740-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1741- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1742+#define VM_DATA_DEFAULT_FLAGS32 \
1743+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1744+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1745
1746 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1747 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1748@@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1749 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1750 #endif
1751
1752+#define ktla_ktva(addr) (addr)
1753+#define ktva_ktla(addr) (addr)
1754+
1755 #ifndef __ASSEMBLY__
1756
1757 #undef STRICT_MM_TYPECHECKS
1758diff -urNp linux-3.1.4/arch/powerpc/include/asm/pgtable.h linux-3.1.4/arch/powerpc/include/asm/pgtable.h
1759--- linux-3.1.4/arch/powerpc/include/asm/pgtable.h 2011-11-11 15:19:27.000000000 -0500
1760+++ linux-3.1.4/arch/powerpc/include/asm/pgtable.h 2011-11-16 18:39:07.000000000 -0500
1761@@ -2,6 +2,7 @@
1762 #define _ASM_POWERPC_PGTABLE_H
1763 #ifdef __KERNEL__
1764
1765+#include <linux/const.h>
1766 #ifndef __ASSEMBLY__
1767 #include <asm/processor.h> /* For TASK_SIZE */
1768 #include <asm/mmu.h>
1769diff -urNp linux-3.1.4/arch/powerpc/include/asm/pte-hash32.h linux-3.1.4/arch/powerpc/include/asm/pte-hash32.h
1770--- linux-3.1.4/arch/powerpc/include/asm/pte-hash32.h 2011-11-11 15:19:27.000000000 -0500
1771+++ linux-3.1.4/arch/powerpc/include/asm/pte-hash32.h 2011-11-16 18:39:07.000000000 -0500
1772@@ -21,6 +21,7 @@
1773 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1774 #define _PAGE_USER 0x004 /* usermode access allowed */
1775 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1776+#define _PAGE_EXEC _PAGE_GUARDED
1777 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1778 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1779 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1780diff -urNp linux-3.1.4/arch/powerpc/include/asm/reg.h linux-3.1.4/arch/powerpc/include/asm/reg.h
1781--- linux-3.1.4/arch/powerpc/include/asm/reg.h 2011-11-11 15:19:27.000000000 -0500
1782+++ linux-3.1.4/arch/powerpc/include/asm/reg.h 2011-11-16 18:39:07.000000000 -0500
1783@@ -212,6 +212,7 @@
1784 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1785 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1786 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1787+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1788 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1789 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1790 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1791diff -urNp linux-3.1.4/arch/powerpc/include/asm/system.h linux-3.1.4/arch/powerpc/include/asm/system.h
1792--- linux-3.1.4/arch/powerpc/include/asm/system.h 2011-11-11 15:19:27.000000000 -0500
1793+++ linux-3.1.4/arch/powerpc/include/asm/system.h 2011-11-16 18:39:07.000000000 -0500
1794@@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1795 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1796 #endif
1797
1798-extern unsigned long arch_align_stack(unsigned long sp);
1799+#define arch_align_stack(x) ((x) & ~0xfUL)
1800
1801 /* Used in very early kernel initialization. */
1802 extern unsigned long reloc_offset(void);
1803diff -urNp linux-3.1.4/arch/powerpc/include/asm/uaccess.h linux-3.1.4/arch/powerpc/include/asm/uaccess.h
1804--- linux-3.1.4/arch/powerpc/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
1805+++ linux-3.1.4/arch/powerpc/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
1806@@ -13,6 +13,8 @@
1807 #define VERIFY_READ 0
1808 #define VERIFY_WRITE 1
1809
1810+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1811+
1812 /*
1813 * The fs value determines whether argument validity checking should be
1814 * performed or not. If get_fs() == USER_DS, checking is performed, with
1815@@ -327,52 +329,6 @@ do { \
1816 extern unsigned long __copy_tofrom_user(void __user *to,
1817 const void __user *from, unsigned long size);
1818
1819-#ifndef __powerpc64__
1820-
1821-static inline unsigned long copy_from_user(void *to,
1822- const void __user *from, unsigned long n)
1823-{
1824- unsigned long over;
1825-
1826- if (access_ok(VERIFY_READ, from, n))
1827- return __copy_tofrom_user((__force void __user *)to, from, n);
1828- if ((unsigned long)from < TASK_SIZE) {
1829- over = (unsigned long)from + n - TASK_SIZE;
1830- return __copy_tofrom_user((__force void __user *)to, from,
1831- n - over) + over;
1832- }
1833- return n;
1834-}
1835-
1836-static inline unsigned long copy_to_user(void __user *to,
1837- const void *from, unsigned long n)
1838-{
1839- unsigned long over;
1840-
1841- if (access_ok(VERIFY_WRITE, to, n))
1842- return __copy_tofrom_user(to, (__force void __user *)from, n);
1843- if ((unsigned long)to < TASK_SIZE) {
1844- over = (unsigned long)to + n - TASK_SIZE;
1845- return __copy_tofrom_user(to, (__force void __user *)from,
1846- n - over) + over;
1847- }
1848- return n;
1849-}
1850-
1851-#else /* __powerpc64__ */
1852-
1853-#define __copy_in_user(to, from, size) \
1854- __copy_tofrom_user((to), (from), (size))
1855-
1856-extern unsigned long copy_from_user(void *to, const void __user *from,
1857- unsigned long n);
1858-extern unsigned long copy_to_user(void __user *to, const void *from,
1859- unsigned long n);
1860-extern unsigned long copy_in_user(void __user *to, const void __user *from,
1861- unsigned long n);
1862-
1863-#endif /* __powerpc64__ */
1864-
1865 static inline unsigned long __copy_from_user_inatomic(void *to,
1866 const void __user *from, unsigned long n)
1867 {
1868@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1869 if (ret == 0)
1870 return 0;
1871 }
1872+
1873+ if (!__builtin_constant_p(n))
1874+ check_object_size(to, n, false);
1875+
1876 return __copy_tofrom_user((__force void __user *)to, from, n);
1877 }
1878
1879@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1880 if (ret == 0)
1881 return 0;
1882 }
1883+
1884+ if (!__builtin_constant_p(n))
1885+ check_object_size(from, n, true);
1886+
1887 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1888 }
1889
1890@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1891 return __copy_to_user_inatomic(to, from, size);
1892 }
1893
1894+#ifndef __powerpc64__
1895+
1896+static inline unsigned long __must_check copy_from_user(void *to,
1897+ const void __user *from, unsigned long n)
1898+{
1899+ unsigned long over;
1900+
1901+ if ((long)n < 0)
1902+ return n;
1903+
1904+ if (access_ok(VERIFY_READ, from, n)) {
1905+ if (!__builtin_constant_p(n))
1906+ check_object_size(to, n, false);
1907+ return __copy_tofrom_user((__force void __user *)to, from, n);
1908+ }
1909+ if ((unsigned long)from < TASK_SIZE) {
1910+ over = (unsigned long)from + n - TASK_SIZE;
1911+ if (!__builtin_constant_p(n - over))
1912+ check_object_size(to, n - over, false);
1913+ return __copy_tofrom_user((__force void __user *)to, from,
1914+ n - over) + over;
1915+ }
1916+ return n;
1917+}
1918+
1919+static inline unsigned long __must_check copy_to_user(void __user *to,
1920+ const void *from, unsigned long n)
1921+{
1922+ unsigned long over;
1923+
1924+ if ((long)n < 0)
1925+ return n;
1926+
1927+ if (access_ok(VERIFY_WRITE, to, n)) {
1928+ if (!__builtin_constant_p(n))
1929+ check_object_size(from, n, true);
1930+ return __copy_tofrom_user(to, (__force void __user *)from, n);
1931+ }
1932+ if ((unsigned long)to < TASK_SIZE) {
1933+ over = (unsigned long)to + n - TASK_SIZE;
1934+ if (!__builtin_constant_p(n))
1935+ check_object_size(from, n - over, true);
1936+ return __copy_tofrom_user(to, (__force void __user *)from,
1937+ n - over) + over;
1938+ }
1939+ return n;
1940+}
1941+
1942+#else /* __powerpc64__ */
1943+
1944+#define __copy_in_user(to, from, size) \
1945+ __copy_tofrom_user((to), (from), (size))
1946+
1947+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1948+{
1949+ if ((long)n < 0 || n > INT_MAX)
1950+ return n;
1951+
1952+ if (!__builtin_constant_p(n))
1953+ check_object_size(to, n, false);
1954+
1955+ if (likely(access_ok(VERIFY_READ, from, n)))
1956+ n = __copy_from_user(to, from, n);
1957+ else
1958+ memset(to, 0, n);
1959+ return n;
1960+}
1961+
1962+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1963+{
1964+ if ((long)n < 0 || n > INT_MAX)
1965+ return n;
1966+
1967+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
1968+ if (!__builtin_constant_p(n))
1969+ check_object_size(from, n, true);
1970+ n = __copy_to_user(to, from, n);
1971+ }
1972+ return n;
1973+}
1974+
1975+extern unsigned long copy_in_user(void __user *to, const void __user *from,
1976+ unsigned long n);
1977+
1978+#endif /* __powerpc64__ */
1979+
1980 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1981
1982 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1983diff -urNp linux-3.1.4/arch/powerpc/kernel/exceptions-64e.S linux-3.1.4/arch/powerpc/kernel/exceptions-64e.S
1984--- linux-3.1.4/arch/powerpc/kernel/exceptions-64e.S 2011-11-11 15:19:27.000000000 -0500
1985+++ linux-3.1.4/arch/powerpc/kernel/exceptions-64e.S 2011-11-16 18:39:07.000000000 -0500
1986@@ -587,6 +587,7 @@ storage_fault_common:
1987 std r14,_DAR(r1)
1988 std r15,_DSISR(r1)
1989 addi r3,r1,STACK_FRAME_OVERHEAD
1990+ bl .save_nvgprs
1991 mr r4,r14
1992 mr r5,r15
1993 ld r14,PACA_EXGEN+EX_R14(r13)
1994@@ -596,8 +597,7 @@ storage_fault_common:
1995 cmpdi r3,0
1996 bne- 1f
1997 b .ret_from_except_lite
1998-1: bl .save_nvgprs
1999- mr r5,r3
2000+1: mr r5,r3
2001 addi r3,r1,STACK_FRAME_OVERHEAD
2002 ld r4,_DAR(r1)
2003 bl .bad_page_fault
2004diff -urNp linux-3.1.4/arch/powerpc/kernel/exceptions-64s.S linux-3.1.4/arch/powerpc/kernel/exceptions-64s.S
2005--- linux-3.1.4/arch/powerpc/kernel/exceptions-64s.S 2011-11-11 15:19:27.000000000 -0500
2006+++ linux-3.1.4/arch/powerpc/kernel/exceptions-64s.S 2011-11-16 18:39:07.000000000 -0500
2007@@ -1014,10 +1014,10 @@ handle_page_fault:
2008 11: ld r4,_DAR(r1)
2009 ld r5,_DSISR(r1)
2010 addi r3,r1,STACK_FRAME_OVERHEAD
2011+ bl .save_nvgprs
2012 bl .do_page_fault
2013 cmpdi r3,0
2014 beq+ 13f
2015- bl .save_nvgprs
2016 mr r5,r3
2017 addi r3,r1,STACK_FRAME_OVERHEAD
2018 lwz r4,_DAR(r1)
2019diff -urNp linux-3.1.4/arch/powerpc/kernel/module_32.c linux-3.1.4/arch/powerpc/kernel/module_32.c
2020--- linux-3.1.4/arch/powerpc/kernel/module_32.c 2011-11-11 15:19:27.000000000 -0500
2021+++ linux-3.1.4/arch/powerpc/kernel/module_32.c 2011-11-16 18:39:07.000000000 -0500
2022@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2023 me->arch.core_plt_section = i;
2024 }
2025 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2026- printk("Module doesn't contain .plt or .init.plt sections.\n");
2027+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2028 return -ENOEXEC;
2029 }
2030
2031@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *locati
2032
2033 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2034 /* Init, or core PLT? */
2035- if (location >= mod->module_core
2036- && location < mod->module_core + mod->core_size)
2037+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2038+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2039 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2040- else
2041+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2042+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2043 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2044+ else {
2045+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2046+ return ~0UL;
2047+ }
2048
2049 /* Find this entry, or if that fails, the next avail. entry */
2050 while (entry->jump[0]) {
2051diff -urNp linux-3.1.4/arch/powerpc/kernel/process.c linux-3.1.4/arch/powerpc/kernel/process.c
2052--- linux-3.1.4/arch/powerpc/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
2053+++ linux-3.1.4/arch/powerpc/kernel/process.c 2011-11-16 18:40:08.000000000 -0500
2054@@ -682,8 +682,8 @@ void show_regs(struct pt_regs * regs)
2055 * Lookup NIP late so we have the best change of getting the
2056 * above info out without failing
2057 */
2058- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2059- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2060+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2061+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2062 #endif
2063 show_stack(current, (unsigned long *) regs->gpr[1]);
2064 if (!user_mode(regs))
2065@@ -1187,10 +1187,10 @@ void show_stack(struct task_struct *tsk,
2066 newsp = stack[0];
2067 ip = stack[STACK_FRAME_LR_SAVE];
2068 if (!firstframe || ip != lr) {
2069- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2070+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2071 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2072 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2073- printk(" (%pS)",
2074+ printk(" (%pA)",
2075 (void *)current->ret_stack[curr_frame].ret);
2076 curr_frame--;
2077 }
2078@@ -1210,7 +1210,7 @@ void show_stack(struct task_struct *tsk,
2079 struct pt_regs *regs = (struct pt_regs *)
2080 (sp + STACK_FRAME_OVERHEAD);
2081 lr = regs->link;
2082- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2083+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2084 regs->trap, (void *)regs->nip, (void *)lr);
2085 firstframe = 1;
2086 }
2087@@ -1285,58 +1285,3 @@ void thread_info_cache_init(void)
2088 }
2089
2090 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2091-
2092-unsigned long arch_align_stack(unsigned long sp)
2093-{
2094- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2095- sp -= get_random_int() & ~PAGE_MASK;
2096- return sp & ~0xf;
2097-}
2098-
2099-static inline unsigned long brk_rnd(void)
2100-{
2101- unsigned long rnd = 0;
2102-
2103- /* 8MB for 32bit, 1GB for 64bit */
2104- if (is_32bit_task())
2105- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2106- else
2107- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2108-
2109- return rnd << PAGE_SHIFT;
2110-}
2111-
2112-unsigned long arch_randomize_brk(struct mm_struct *mm)
2113-{
2114- unsigned long base = mm->brk;
2115- unsigned long ret;
2116-
2117-#ifdef CONFIG_PPC_STD_MMU_64
2118- /*
2119- * If we are using 1TB segments and we are allowed to randomise
2120- * the heap, we can put it above 1TB so it is backed by a 1TB
2121- * segment. Otherwise the heap will be in the bottom 1TB
2122- * which always uses 256MB segments and this may result in a
2123- * performance penalty.
2124- */
2125- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2126- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2127-#endif
2128-
2129- ret = PAGE_ALIGN(base + brk_rnd());
2130-
2131- if (ret < mm->brk)
2132- return mm->brk;
2133-
2134- return ret;
2135-}
2136-
2137-unsigned long randomize_et_dyn(unsigned long base)
2138-{
2139- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2140-
2141- if (ret < base)
2142- return base;
2143-
2144- return ret;
2145-}
2146diff -urNp linux-3.1.4/arch/powerpc/kernel/signal_32.c linux-3.1.4/arch/powerpc/kernel/signal_32.c
2147--- linux-3.1.4/arch/powerpc/kernel/signal_32.c 2011-11-11 15:19:27.000000000 -0500
2148+++ linux-3.1.4/arch/powerpc/kernel/signal_32.c 2011-11-16 18:39:07.000000000 -0500
2149@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig
2150 /* Save user registers on the stack */
2151 frame = &rt_sf->uc.uc_mcontext;
2152 addr = frame;
2153- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2154+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2155 if (save_user_regs(regs, frame, 0, 1))
2156 goto badframe;
2157 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2158diff -urNp linux-3.1.4/arch/powerpc/kernel/signal_64.c linux-3.1.4/arch/powerpc/kernel/signal_64.c
2159--- linux-3.1.4/arch/powerpc/kernel/signal_64.c 2011-11-11 15:19:27.000000000 -0500
2160+++ linux-3.1.4/arch/powerpc/kernel/signal_64.c 2011-11-16 18:39:07.000000000 -0500
2161@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct
2162 current->thread.fpscr.val = 0;
2163
2164 /* Set up to return from userspace. */
2165- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2166+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2167 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2168 } else {
2169 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2170diff -urNp linux-3.1.4/arch/powerpc/kernel/traps.c linux-3.1.4/arch/powerpc/kernel/traps.c
2171--- linux-3.1.4/arch/powerpc/kernel/traps.c 2011-11-11 15:19:27.000000000 -0500
2172+++ linux-3.1.4/arch/powerpc/kernel/traps.c 2011-11-16 18:40:08.000000000 -0500
2173@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2174 static inline void pmac_backlight_unblank(void) { }
2175 #endif
2176
2177+extern void gr_handle_kernel_exploit(void);
2178+
2179 int die(const char *str, struct pt_regs *regs, long err)
2180 {
2181 static struct {
2182@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs
2183 if (panic_on_oops)
2184 panic("Fatal exception");
2185
2186+ gr_handle_kernel_exploit();
2187+
2188 oops_exit();
2189 do_exit(err);
2190
2191diff -urNp linux-3.1.4/arch/powerpc/kernel/vdso.c linux-3.1.4/arch/powerpc/kernel/vdso.c
2192--- linux-3.1.4/arch/powerpc/kernel/vdso.c 2011-11-11 15:19:27.000000000 -0500
2193+++ linux-3.1.4/arch/powerpc/kernel/vdso.c 2011-11-16 18:39:07.000000000 -0500
2194@@ -36,6 +36,7 @@
2195 #include <asm/firmware.h>
2196 #include <asm/vdso.h>
2197 #include <asm/vdso_datapage.h>
2198+#include <asm/mman.h>
2199
2200 #include "setup.h"
2201
2202@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2203 vdso_base = VDSO32_MBASE;
2204 #endif
2205
2206- current->mm->context.vdso_base = 0;
2207+ current->mm->context.vdso_base = ~0UL;
2208
2209 /* vDSO has a problem and was disabled, just don't "enable" it for the
2210 * process
2211@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2212 vdso_base = get_unmapped_area(NULL, vdso_base,
2213 (vdso_pages << PAGE_SHIFT) +
2214 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2215- 0, 0);
2216+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2217 if (IS_ERR_VALUE(vdso_base)) {
2218 rc = vdso_base;
2219 goto fail_mmapsem;
2220diff -urNp linux-3.1.4/arch/powerpc/lib/usercopy_64.c linux-3.1.4/arch/powerpc/lib/usercopy_64.c
2221--- linux-3.1.4/arch/powerpc/lib/usercopy_64.c 2011-11-11 15:19:27.000000000 -0500
2222+++ linux-3.1.4/arch/powerpc/lib/usercopy_64.c 2011-11-16 18:39:07.000000000 -0500
2223@@ -9,22 +9,6 @@
2224 #include <linux/module.h>
2225 #include <asm/uaccess.h>
2226
2227-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2228-{
2229- if (likely(access_ok(VERIFY_READ, from, n)))
2230- n = __copy_from_user(to, from, n);
2231- else
2232- memset(to, 0, n);
2233- return n;
2234-}
2235-
2236-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2237-{
2238- if (likely(access_ok(VERIFY_WRITE, to, n)))
2239- n = __copy_to_user(to, from, n);
2240- return n;
2241-}
2242-
2243 unsigned long copy_in_user(void __user *to, const void __user *from,
2244 unsigned long n)
2245 {
2246@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2247 return n;
2248 }
2249
2250-EXPORT_SYMBOL(copy_from_user);
2251-EXPORT_SYMBOL(copy_to_user);
2252 EXPORT_SYMBOL(copy_in_user);
2253
2254diff -urNp linux-3.1.4/arch/powerpc/mm/fault.c linux-3.1.4/arch/powerpc/mm/fault.c
2255--- linux-3.1.4/arch/powerpc/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
2256+++ linux-3.1.4/arch/powerpc/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
2257@@ -32,6 +32,10 @@
2258 #include <linux/perf_event.h>
2259 #include <linux/magic.h>
2260 #include <linux/ratelimit.h>
2261+#include <linux/slab.h>
2262+#include <linux/pagemap.h>
2263+#include <linux/compiler.h>
2264+#include <linux/unistd.h>
2265
2266 #include <asm/firmware.h>
2267 #include <asm/page.h>
2268@@ -43,6 +47,7 @@
2269 #include <asm/tlbflush.h>
2270 #include <asm/siginfo.h>
2271 #include <mm/mmu_decl.h>
2272+#include <asm/ptrace.h>
2273
2274 #ifdef CONFIG_KPROBES
2275 static inline int notify_page_fault(struct pt_regs *regs)
2276@@ -66,6 +71,33 @@ static inline int notify_page_fault(stru
2277 }
2278 #endif
2279
2280+#ifdef CONFIG_PAX_PAGEEXEC
2281+/*
2282+ * PaX: decide what to do with offenders (regs->nip = fault address)
2283+ *
2284+ * returns 1 when task should be killed
2285+ */
2286+static int pax_handle_fetch_fault(struct pt_regs *regs)
2287+{
2288+ return 1;
2289+}
2290+
2291+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2292+{
2293+ unsigned long i;
2294+
2295+ printk(KERN_ERR "PAX: bytes at PC: ");
2296+ for (i = 0; i < 5; i++) {
2297+ unsigned int c;
2298+ if (get_user(c, (unsigned int __user *)pc+i))
2299+ printk(KERN_CONT "???????? ");
2300+ else
2301+ printk(KERN_CONT "%08x ", c);
2302+ }
2303+ printk("\n");
2304+}
2305+#endif
2306+
2307 /*
2308 * Check whether the instruction at regs->nip is a store using
2309 * an update addressing form which will update r1.
2310@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_re
2311 * indicate errors in DSISR but can validly be set in SRR1.
2312 */
2313 if (trap == 0x400)
2314- error_code &= 0x48200000;
2315+ error_code &= 0x58200000;
2316 else
2317 is_write = error_code & DSISR_ISSTORE;
2318 #else
2319@@ -259,7 +291,7 @@ good_area:
2320 * "undefined". Of those that can be set, this is the only
2321 * one which seems bad.
2322 */
2323- if (error_code & 0x10000000)
2324+ if (error_code & DSISR_GUARDED)
2325 /* Guarded storage error. */
2326 goto bad_area;
2327 #endif /* CONFIG_8xx */
2328@@ -274,7 +306,7 @@ good_area:
2329 * processors use the same I/D cache coherency mechanism
2330 * as embedded.
2331 */
2332- if (error_code & DSISR_PROTFAULT)
2333+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2334 goto bad_area;
2335 #endif /* CONFIG_PPC_STD_MMU */
2336
2337@@ -343,6 +375,23 @@ bad_area:
2338 bad_area_nosemaphore:
2339 /* User mode accesses cause a SIGSEGV */
2340 if (user_mode(regs)) {
2341+
2342+#ifdef CONFIG_PAX_PAGEEXEC
2343+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2344+#ifdef CONFIG_PPC_STD_MMU
2345+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2346+#else
2347+ if (is_exec && regs->nip == address) {
2348+#endif
2349+ switch (pax_handle_fetch_fault(regs)) {
2350+ }
2351+
2352+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2353+ do_group_exit(SIGKILL);
2354+ }
2355+ }
2356+#endif
2357+
2358 _exception(SIGSEGV, regs, code, address);
2359 return 0;
2360 }
2361diff -urNp linux-3.1.4/arch/powerpc/mm/mmap_64.c linux-3.1.4/arch/powerpc/mm/mmap_64.c
2362--- linux-3.1.4/arch/powerpc/mm/mmap_64.c 2011-11-11 15:19:27.000000000 -0500
2363+++ linux-3.1.4/arch/powerpc/mm/mmap_64.c 2011-11-16 18:39:07.000000000 -0500
2364@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2365 */
2366 if (mmap_is_legacy()) {
2367 mm->mmap_base = TASK_UNMAPPED_BASE;
2368+
2369+#ifdef CONFIG_PAX_RANDMMAP
2370+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2371+ mm->mmap_base += mm->delta_mmap;
2372+#endif
2373+
2374 mm->get_unmapped_area = arch_get_unmapped_area;
2375 mm->unmap_area = arch_unmap_area;
2376 } else {
2377 mm->mmap_base = mmap_base();
2378+
2379+#ifdef CONFIG_PAX_RANDMMAP
2380+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2381+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2382+#endif
2383+
2384 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2385 mm->unmap_area = arch_unmap_area_topdown;
2386 }
2387diff -urNp linux-3.1.4/arch/powerpc/mm/slice.c linux-3.1.4/arch/powerpc/mm/slice.c
2388--- linux-3.1.4/arch/powerpc/mm/slice.c 2011-11-11 15:19:27.000000000 -0500
2389+++ linux-3.1.4/arch/powerpc/mm/slice.c 2011-11-16 18:39:07.000000000 -0500
2390@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2391 if ((mm->task_size - len) < addr)
2392 return 0;
2393 vma = find_vma(mm, addr);
2394- return (!vma || (addr + len) <= vma->vm_start);
2395+ return check_heap_stack_gap(vma, addr, len);
2396 }
2397
2398 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2399@@ -256,7 +256,7 @@ full_search:
2400 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2401 continue;
2402 }
2403- if (!vma || addr + len <= vma->vm_start) {
2404+ if (check_heap_stack_gap(vma, addr, len)) {
2405 /*
2406 * Remember the place where we stopped the search:
2407 */
2408@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2409 }
2410 }
2411
2412- addr = mm->mmap_base;
2413- while (addr > len) {
2414+ if (mm->mmap_base < len)
2415+ addr = -ENOMEM;
2416+ else
2417+ addr = mm->mmap_base - len;
2418+
2419+ while (!IS_ERR_VALUE(addr)) {
2420 /* Go down by chunk size */
2421- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2422+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
2423
2424 /* Check for hit with different page size */
2425 mask = slice_range_to_mask(addr, len);
2426@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2427 * return with success:
2428 */
2429 vma = find_vma(mm, addr);
2430- if (!vma || (addr + len) <= vma->vm_start) {
2431+ if (check_heap_stack_gap(vma, addr, len)) {
2432 /* remember the address as a hint for next time */
2433 if (use_cache)
2434 mm->free_area_cache = addr;
2435@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2436 mm->cached_hole_size = vma->vm_start - addr;
2437
2438 /* try just below the current vma->vm_start */
2439- addr = vma->vm_start;
2440+ addr = skip_heap_stack_gap(vma, len);
2441 }
2442
2443 /*
2444@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2445 if (fixed && addr > (mm->task_size - len))
2446 return -EINVAL;
2447
2448+#ifdef CONFIG_PAX_RANDMMAP
2449+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2450+ addr = 0;
2451+#endif
2452+
2453 /* If hint, make sure it matches our alignment restrictions */
2454 if (!fixed && addr) {
2455 addr = _ALIGN_UP(addr, 1ul << pshift);
2456diff -urNp linux-3.1.4/arch/s390/include/asm/elf.h linux-3.1.4/arch/s390/include/asm/elf.h
2457--- linux-3.1.4/arch/s390/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
2458+++ linux-3.1.4/arch/s390/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
2459@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2460 the loader. We need to make sure that it is out of the way of the program
2461 that it will "exec", and that there is sufficient room for the brk. */
2462
2463-extern unsigned long randomize_et_dyn(unsigned long base);
2464-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2465+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2466+
2467+#ifdef CONFIG_PAX_ASLR
2468+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2469+
2470+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2471+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2472+#endif
2473
2474 /* This yields a mask that user programs can use to figure out what
2475 instruction set this CPU supports. */
2476@@ -211,7 +217,4 @@ struct linux_binprm;
2477 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2478 int arch_setup_additional_pages(struct linux_binprm *, int);
2479
2480-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2481-#define arch_randomize_brk arch_randomize_brk
2482-
2483 #endif
2484diff -urNp linux-3.1.4/arch/s390/include/asm/system.h linux-3.1.4/arch/s390/include/asm/system.h
2485--- linux-3.1.4/arch/s390/include/asm/system.h 2011-11-11 15:19:27.000000000 -0500
2486+++ linux-3.1.4/arch/s390/include/asm/system.h 2011-11-16 18:39:07.000000000 -0500
2487@@ -256,7 +256,7 @@ extern void (*_machine_restart)(char *co
2488 extern void (*_machine_halt)(void);
2489 extern void (*_machine_power_off)(void);
2490
2491-extern unsigned long arch_align_stack(unsigned long sp);
2492+#define arch_align_stack(x) ((x) & ~0xfUL)
2493
2494 static inline int tprot(unsigned long addr)
2495 {
2496diff -urNp linux-3.1.4/arch/s390/include/asm/uaccess.h linux-3.1.4/arch/s390/include/asm/uaccess.h
2497--- linux-3.1.4/arch/s390/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
2498+++ linux-3.1.4/arch/s390/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
2499@@ -235,6 +235,10 @@ static inline unsigned long __must_check
2500 copy_to_user(void __user *to, const void *from, unsigned long n)
2501 {
2502 might_fault();
2503+
2504+ if ((long)n < 0)
2505+ return n;
2506+
2507 if (access_ok(VERIFY_WRITE, to, n))
2508 n = __copy_to_user(to, from, n);
2509 return n;
2510@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void
2511 static inline unsigned long __must_check
2512 __copy_from_user(void *to, const void __user *from, unsigned long n)
2513 {
2514+ if ((long)n < 0)
2515+ return n;
2516+
2517 if (__builtin_constant_p(n) && (n <= 256))
2518 return uaccess.copy_from_user_small(n, from, to);
2519 else
2520@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __us
2521 unsigned int sz = __compiletime_object_size(to);
2522
2523 might_fault();
2524+
2525+ if ((long)n < 0)
2526+ return n;
2527+
2528 if (unlikely(sz != -1 && sz < n)) {
2529 copy_from_user_overflow();
2530 return n;
2531diff -urNp linux-3.1.4/arch/s390/kernel/module.c linux-3.1.4/arch/s390/kernel/module.c
2532--- linux-3.1.4/arch/s390/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
2533+++ linux-3.1.4/arch/s390/kernel/module.c 2011-11-16 18:39:07.000000000 -0500
2534@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2535
2536 /* Increase core size by size of got & plt and set start
2537 offsets for got and plt. */
2538- me->core_size = ALIGN(me->core_size, 4);
2539- me->arch.got_offset = me->core_size;
2540- me->core_size += me->arch.got_size;
2541- me->arch.plt_offset = me->core_size;
2542- me->core_size += me->arch.plt_size;
2543+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
2544+ me->arch.got_offset = me->core_size_rw;
2545+ me->core_size_rw += me->arch.got_size;
2546+ me->arch.plt_offset = me->core_size_rx;
2547+ me->core_size_rx += me->arch.plt_size;
2548 return 0;
2549 }
2550
2551@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2552 if (info->got_initialized == 0) {
2553 Elf_Addr *gotent;
2554
2555- gotent = me->module_core + me->arch.got_offset +
2556+ gotent = me->module_core_rw + me->arch.got_offset +
2557 info->got_offset;
2558 *gotent = val;
2559 info->got_initialized = 1;
2560@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2561 else if (r_type == R_390_GOTENT ||
2562 r_type == R_390_GOTPLTENT)
2563 *(unsigned int *) loc =
2564- (val + (Elf_Addr) me->module_core - loc) >> 1;
2565+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2566 else if (r_type == R_390_GOT64 ||
2567 r_type == R_390_GOTPLT64)
2568 *(unsigned long *) loc = val;
2569@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2570 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2571 if (info->plt_initialized == 0) {
2572 unsigned int *ip;
2573- ip = me->module_core + me->arch.plt_offset +
2574+ ip = me->module_core_rx + me->arch.plt_offset +
2575 info->plt_offset;
2576 #ifndef CONFIG_64BIT
2577 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2578@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2579 val - loc + 0xffffUL < 0x1ffffeUL) ||
2580 (r_type == R_390_PLT32DBL &&
2581 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2582- val = (Elf_Addr) me->module_core +
2583+ val = (Elf_Addr) me->module_core_rx +
2584 me->arch.plt_offset +
2585 info->plt_offset;
2586 val += rela->r_addend - loc;
2587@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2588 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2589 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2590 val = val + rela->r_addend -
2591- ((Elf_Addr) me->module_core + me->arch.got_offset);
2592+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2593 if (r_type == R_390_GOTOFF16)
2594 *(unsigned short *) loc = val;
2595 else if (r_type == R_390_GOTOFF32)
2596@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2597 break;
2598 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2599 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2600- val = (Elf_Addr) me->module_core + me->arch.got_offset +
2601+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2602 rela->r_addend - loc;
2603 if (r_type == R_390_GOTPC)
2604 *(unsigned int *) loc = val;
2605diff -urNp linux-3.1.4/arch/s390/kernel/process.c linux-3.1.4/arch/s390/kernel/process.c
2606--- linux-3.1.4/arch/s390/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
2607+++ linux-3.1.4/arch/s390/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
2608@@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_stru
2609 }
2610 return 0;
2611 }
2612-
2613-unsigned long arch_align_stack(unsigned long sp)
2614-{
2615- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2616- sp -= get_random_int() & ~PAGE_MASK;
2617- return sp & ~0xf;
2618-}
2619-
2620-static inline unsigned long brk_rnd(void)
2621-{
2622- /* 8MB for 32bit, 1GB for 64bit */
2623- if (is_32bit_task())
2624- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2625- else
2626- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2627-}
2628-
2629-unsigned long arch_randomize_brk(struct mm_struct *mm)
2630-{
2631- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2632-
2633- if (ret < mm->brk)
2634- return mm->brk;
2635- return ret;
2636-}
2637-
2638-unsigned long randomize_et_dyn(unsigned long base)
2639-{
2640- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2641-
2642- if (!(current->flags & PF_RANDOMIZE))
2643- return base;
2644- if (ret < base)
2645- return base;
2646- return ret;
2647-}
2648diff -urNp linux-3.1.4/arch/s390/kernel/setup.c linux-3.1.4/arch/s390/kernel/setup.c
2649--- linux-3.1.4/arch/s390/kernel/setup.c 2011-11-11 15:19:27.000000000 -0500
2650+++ linux-3.1.4/arch/s390/kernel/setup.c 2011-11-16 18:39:07.000000000 -0500
2651@@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2652 }
2653 early_param("mem", early_parse_mem);
2654
2655-unsigned int user_mode = HOME_SPACE_MODE;
2656+unsigned int user_mode = SECONDARY_SPACE_MODE;
2657 EXPORT_SYMBOL_GPL(user_mode);
2658
2659 static int set_amode_and_uaccess(unsigned long user_amode,
2660diff -urNp linux-3.1.4/arch/s390/mm/mmap.c linux-3.1.4/arch/s390/mm/mmap.c
2661--- linux-3.1.4/arch/s390/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
2662+++ linux-3.1.4/arch/s390/mm/mmap.c 2011-11-16 18:39:07.000000000 -0500
2663@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2664 */
2665 if (mmap_is_legacy()) {
2666 mm->mmap_base = TASK_UNMAPPED_BASE;
2667+
2668+#ifdef CONFIG_PAX_RANDMMAP
2669+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2670+ mm->mmap_base += mm->delta_mmap;
2671+#endif
2672+
2673 mm->get_unmapped_area = arch_get_unmapped_area;
2674 mm->unmap_area = arch_unmap_area;
2675 } else {
2676 mm->mmap_base = mmap_base();
2677+
2678+#ifdef CONFIG_PAX_RANDMMAP
2679+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2680+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2681+#endif
2682+
2683 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2684 mm->unmap_area = arch_unmap_area_topdown;
2685 }
2686@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2687 */
2688 if (mmap_is_legacy()) {
2689 mm->mmap_base = TASK_UNMAPPED_BASE;
2690+
2691+#ifdef CONFIG_PAX_RANDMMAP
2692+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2693+ mm->mmap_base += mm->delta_mmap;
2694+#endif
2695+
2696 mm->get_unmapped_area = s390_get_unmapped_area;
2697 mm->unmap_area = arch_unmap_area;
2698 } else {
2699 mm->mmap_base = mmap_base();
2700+
2701+#ifdef CONFIG_PAX_RANDMMAP
2702+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2703+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2704+#endif
2705+
2706 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2707 mm->unmap_area = arch_unmap_area_topdown;
2708 }
2709diff -urNp linux-3.1.4/arch/score/include/asm/system.h linux-3.1.4/arch/score/include/asm/system.h
2710--- linux-3.1.4/arch/score/include/asm/system.h 2011-11-11 15:19:27.000000000 -0500
2711+++ linux-3.1.4/arch/score/include/asm/system.h 2011-11-16 18:39:07.000000000 -0500
2712@@ -17,7 +17,7 @@ do { \
2713 #define finish_arch_switch(prev) do {} while (0)
2714
2715 typedef void (*vi_handler_t)(void);
2716-extern unsigned long arch_align_stack(unsigned long sp);
2717+#define arch_align_stack(x) (x)
2718
2719 #define mb() barrier()
2720 #define rmb() barrier()
2721diff -urNp linux-3.1.4/arch/score/kernel/process.c linux-3.1.4/arch/score/kernel/process.c
2722--- linux-3.1.4/arch/score/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
2723+++ linux-3.1.4/arch/score/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
2724@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2725
2726 return task_pt_regs(task)->cp0_epc;
2727 }
2728-
2729-unsigned long arch_align_stack(unsigned long sp)
2730-{
2731- return sp;
2732-}
2733diff -urNp linux-3.1.4/arch/sh/mm/mmap.c linux-3.1.4/arch/sh/mm/mmap.c
2734--- linux-3.1.4/arch/sh/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
2735+++ linux-3.1.4/arch/sh/mm/mmap.c 2011-11-16 18:39:07.000000000 -0500
2736@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2737 addr = PAGE_ALIGN(addr);
2738
2739 vma = find_vma(mm, addr);
2740- if (TASK_SIZE - len >= addr &&
2741- (!vma || addr + len <= vma->vm_start))
2742+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2743 return addr;
2744 }
2745
2746@@ -106,7 +105,7 @@ full_search:
2747 }
2748 return -ENOMEM;
2749 }
2750- if (likely(!vma || addr + len <= vma->vm_start)) {
2751+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2752 /*
2753 * Remember the place where we stopped the search:
2754 */
2755@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2756 addr = PAGE_ALIGN(addr);
2757
2758 vma = find_vma(mm, addr);
2759- if (TASK_SIZE - len >= addr &&
2760- (!vma || addr + len <= vma->vm_start))
2761+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2762 return addr;
2763 }
2764
2765@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2766 /* make sure it can fit in the remaining address space */
2767 if (likely(addr > len)) {
2768 vma = find_vma(mm, addr-len);
2769- if (!vma || addr <= vma->vm_start) {
2770+ if (check_heap_stack_gap(vma, addr - len, len)) {
2771 /* remember the address as a hint for next time */
2772 return (mm->free_area_cache = addr-len);
2773 }
2774@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2775 if (unlikely(mm->mmap_base < len))
2776 goto bottomup;
2777
2778- addr = mm->mmap_base-len;
2779- if (do_colour_align)
2780- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2781+ addr = mm->mmap_base - len;
2782
2783 do {
2784+ if (do_colour_align)
2785+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2786 /*
2787 * Lookup failure means no vma is above this address,
2788 * else if new region fits below vma->vm_start,
2789 * return with success:
2790 */
2791 vma = find_vma(mm, addr);
2792- if (likely(!vma || addr+len <= vma->vm_start)) {
2793+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2794 /* remember the address as a hint for next time */
2795 return (mm->free_area_cache = addr);
2796 }
2797@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2798 mm->cached_hole_size = vma->vm_start - addr;
2799
2800 /* try just below the current vma->vm_start */
2801- addr = vma->vm_start-len;
2802- if (do_colour_align)
2803- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2804- } while (likely(len < vma->vm_start));
2805+ addr = skip_heap_stack_gap(vma, len);
2806+ } while (!IS_ERR_VALUE(addr));
2807
2808 bottomup:
2809 /*
2810diff -urNp linux-3.1.4/arch/sparc/include/asm/atomic_64.h linux-3.1.4/arch/sparc/include/asm/atomic_64.h
2811--- linux-3.1.4/arch/sparc/include/asm/atomic_64.h 2011-11-11 15:19:27.000000000 -0500
2812+++ linux-3.1.4/arch/sparc/include/asm/atomic_64.h 2011-11-16 18:39:07.000000000 -0500
2813@@ -14,18 +14,40 @@
2814 #define ATOMIC64_INIT(i) { (i) }
2815
2816 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2817+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2818+{
2819+ return v->counter;
2820+}
2821 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2822+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2823+{
2824+ return v->counter;
2825+}
2826
2827 #define atomic_set(v, i) (((v)->counter) = i)
2828+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2829+{
2830+ v->counter = i;
2831+}
2832 #define atomic64_set(v, i) (((v)->counter) = i)
2833+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2834+{
2835+ v->counter = i;
2836+}
2837
2838 extern void atomic_add(int, atomic_t *);
2839+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2840 extern void atomic64_add(long, atomic64_t *);
2841+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2842 extern void atomic_sub(int, atomic_t *);
2843+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2844 extern void atomic64_sub(long, atomic64_t *);
2845+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2846
2847 extern int atomic_add_ret(int, atomic_t *);
2848+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2849 extern long atomic64_add_ret(long, atomic64_t *);
2850+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2851 extern int atomic_sub_ret(int, atomic_t *);
2852 extern long atomic64_sub_ret(long, atomic64_t *);
2853
2854@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2855 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2856
2857 #define atomic_inc_return(v) atomic_add_ret(1, v)
2858+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2859+{
2860+ return atomic_add_ret_unchecked(1, v);
2861+}
2862 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2863+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2864+{
2865+ return atomic64_add_ret_unchecked(1, v);
2866+}
2867
2868 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2869 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2870
2871 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2872+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2873+{
2874+ return atomic_add_ret_unchecked(i, v);
2875+}
2876 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2877+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2878+{
2879+ return atomic64_add_ret_unchecked(i, v);
2880+}
2881
2882 /*
2883 * atomic_inc_and_test - increment and test
2884@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
2885 * other cases.
2886 */
2887 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2888+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
2889+{
2890+ return atomic_inc_return_unchecked(v) == 0;
2891+}
2892 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2893
2894 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2895@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomi
2896 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2897
2898 #define atomic_inc(v) atomic_add(1, v)
2899+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2900+{
2901+ atomic_add_unchecked(1, v);
2902+}
2903 #define atomic64_inc(v) atomic64_add(1, v)
2904+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2905+{
2906+ atomic64_add_unchecked(1, v);
2907+}
2908
2909 #define atomic_dec(v) atomic_sub(1, v)
2910+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2911+{
2912+ atomic_sub_unchecked(1, v);
2913+}
2914 #define atomic64_dec(v) atomic64_sub(1, v)
2915+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2916+{
2917+ atomic64_sub_unchecked(1, v);
2918+}
2919
2920 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2921 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2922
2923 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2924+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
2925+{
2926+ return cmpxchg(&v->counter, old, new);
2927+}
2928 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2929+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
2930+{
2931+ return xchg(&v->counter, new);
2932+}
2933
2934 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
2935 {
2936- int c, old;
2937+ int c, old, new;
2938 c = atomic_read(v);
2939 for (;;) {
2940- if (unlikely(c == (u)))
2941+ if (unlikely(c == u))
2942 break;
2943- old = atomic_cmpxchg((v), c, c + (a));
2944+
2945+ asm volatile("addcc %2, %0, %0\n"
2946+
2947+#ifdef CONFIG_PAX_REFCOUNT
2948+ "tvs %%icc, 6\n"
2949+#endif
2950+
2951+ : "=r" (new)
2952+ : "0" (c), "ir" (a)
2953+ : "cc");
2954+
2955+ old = atomic_cmpxchg(v, c, new);
2956 if (likely(old == c))
2957 break;
2958 c = old;
2959@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(at
2960 #define atomic64_cmpxchg(v, o, n) \
2961 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
2962 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
2963+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
2964+{
2965+ return xchg(&v->counter, new);
2966+}
2967
2968 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
2969 {
2970- long c, old;
2971+ long c, old, new;
2972 c = atomic64_read(v);
2973 for (;;) {
2974- if (unlikely(c == (u)))
2975+ if (unlikely(c == u))
2976 break;
2977- old = atomic64_cmpxchg((v), c, c + (a));
2978+
2979+ asm volatile("addcc %2, %0, %0\n"
2980+
2981+#ifdef CONFIG_PAX_REFCOUNT
2982+ "tvs %%xcc, 6\n"
2983+#endif
2984+
2985+ : "=r" (new)
2986+ : "0" (c), "ir" (a)
2987+ : "cc");
2988+
2989+ old = atomic64_cmpxchg(v, c, new);
2990 if (likely(old == c))
2991 break;
2992 c = old;
2993 }
2994- return c != (u);
2995+ return c != u;
2996 }
2997
2998 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
2999diff -urNp linux-3.1.4/arch/sparc/include/asm/cache.h linux-3.1.4/arch/sparc/include/asm/cache.h
3000--- linux-3.1.4/arch/sparc/include/asm/cache.h 2011-11-11 15:19:27.000000000 -0500
3001+++ linux-3.1.4/arch/sparc/include/asm/cache.h 2011-11-16 18:39:07.000000000 -0500
3002@@ -10,7 +10,7 @@
3003 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3004
3005 #define L1_CACHE_SHIFT 5
3006-#define L1_CACHE_BYTES 32
3007+#define L1_CACHE_BYTES 32UL
3008
3009 #ifdef CONFIG_SPARC32
3010 #define SMP_CACHE_BYTES_SHIFT 5
3011diff -urNp linux-3.1.4/arch/sparc/include/asm/elf_32.h linux-3.1.4/arch/sparc/include/asm/elf_32.h
3012--- linux-3.1.4/arch/sparc/include/asm/elf_32.h 2011-11-11 15:19:27.000000000 -0500
3013+++ linux-3.1.4/arch/sparc/include/asm/elf_32.h 2011-11-16 18:39:07.000000000 -0500
3014@@ -114,6 +114,13 @@ typedef struct {
3015
3016 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3017
3018+#ifdef CONFIG_PAX_ASLR
3019+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3020+
3021+#define PAX_DELTA_MMAP_LEN 16
3022+#define PAX_DELTA_STACK_LEN 16
3023+#endif
3024+
3025 /* This yields a mask that user programs can use to figure out what
3026 instruction set this cpu supports. This can NOT be done in userspace
3027 on Sparc. */
3028diff -urNp linux-3.1.4/arch/sparc/include/asm/elf_64.h linux-3.1.4/arch/sparc/include/asm/elf_64.h
3029--- linux-3.1.4/arch/sparc/include/asm/elf_64.h 2011-11-11 15:19:27.000000000 -0500
3030+++ linux-3.1.4/arch/sparc/include/asm/elf_64.h 2011-11-16 18:39:07.000000000 -0500
3031@@ -180,6 +180,13 @@ typedef struct {
3032 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3033 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3034
3035+#ifdef CONFIG_PAX_ASLR
3036+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3037+
3038+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3039+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3040+#endif
3041+
3042 extern unsigned long sparc64_elf_hwcap;
3043 #define ELF_HWCAP sparc64_elf_hwcap
3044
3045diff -urNp linux-3.1.4/arch/sparc/include/asm/pgtable_32.h linux-3.1.4/arch/sparc/include/asm/pgtable_32.h
3046--- linux-3.1.4/arch/sparc/include/asm/pgtable_32.h 2011-11-11 15:19:27.000000000 -0500
3047+++ linux-3.1.4/arch/sparc/include/asm/pgtable_32.h 2011-11-16 18:39:07.000000000 -0500
3048@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3049 BTFIXUPDEF_INT(page_none)
3050 BTFIXUPDEF_INT(page_copy)
3051 BTFIXUPDEF_INT(page_readonly)
3052+
3053+#ifdef CONFIG_PAX_PAGEEXEC
3054+BTFIXUPDEF_INT(page_shared_noexec)
3055+BTFIXUPDEF_INT(page_copy_noexec)
3056+BTFIXUPDEF_INT(page_readonly_noexec)
3057+#endif
3058+
3059 BTFIXUPDEF_INT(page_kernel)
3060
3061 #define PMD_SHIFT SUN4C_PMD_SHIFT
3062@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3063 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3064 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3065
3066+#ifdef CONFIG_PAX_PAGEEXEC
3067+extern pgprot_t PAGE_SHARED_NOEXEC;
3068+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3069+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3070+#else
3071+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3072+# define PAGE_COPY_NOEXEC PAGE_COPY
3073+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3074+#endif
3075+
3076 extern unsigned long page_kernel;
3077
3078 #ifdef MODULE
3079diff -urNp linux-3.1.4/arch/sparc/include/asm/pgtsrmmu.h linux-3.1.4/arch/sparc/include/asm/pgtsrmmu.h
3080--- linux-3.1.4/arch/sparc/include/asm/pgtsrmmu.h 2011-11-11 15:19:27.000000000 -0500
3081+++ linux-3.1.4/arch/sparc/include/asm/pgtsrmmu.h 2011-11-16 18:39:07.000000000 -0500
3082@@ -115,6 +115,13 @@
3083 SRMMU_EXEC | SRMMU_REF)
3084 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3085 SRMMU_EXEC | SRMMU_REF)
3086+
3087+#ifdef CONFIG_PAX_PAGEEXEC
3088+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3089+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3090+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3091+#endif
3092+
3093 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3094 SRMMU_DIRTY | SRMMU_REF)
3095
3096diff -urNp linux-3.1.4/arch/sparc/include/asm/spinlock_64.h linux-3.1.4/arch/sparc/include/asm/spinlock_64.h
3097--- linux-3.1.4/arch/sparc/include/asm/spinlock_64.h 2011-11-11 15:19:27.000000000 -0500
3098+++ linux-3.1.4/arch/sparc/include/asm/spinlock_64.h 2011-11-16 18:39:07.000000000 -0500
3099@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3100
3101 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3102
3103-static void inline arch_read_lock(arch_rwlock_t *lock)
3104+static inline void arch_read_lock(arch_rwlock_t *lock)
3105 {
3106 unsigned long tmp1, tmp2;
3107
3108 __asm__ __volatile__ (
3109 "1: ldsw [%2], %0\n"
3110 " brlz,pn %0, 2f\n"
3111-"4: add %0, 1, %1\n"
3112+"4: addcc %0, 1, %1\n"
3113+
3114+#ifdef CONFIG_PAX_REFCOUNT
3115+" tvs %%icc, 6\n"
3116+#endif
3117+
3118 " cas [%2], %0, %1\n"
3119 " cmp %0, %1\n"
3120 " bne,pn %%icc, 1b\n"
3121@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3122 " .previous"
3123 : "=&r" (tmp1), "=&r" (tmp2)
3124 : "r" (lock)
3125- : "memory");
3126+ : "memory", "cc");
3127 }
3128
3129-static int inline arch_read_trylock(arch_rwlock_t *lock)
3130+static inline int arch_read_trylock(arch_rwlock_t *lock)
3131 {
3132 int tmp1, tmp2;
3133
3134@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3135 "1: ldsw [%2], %0\n"
3136 " brlz,a,pn %0, 2f\n"
3137 " mov 0, %0\n"
3138-" add %0, 1, %1\n"
3139+" addcc %0, 1, %1\n"
3140+
3141+#ifdef CONFIG_PAX_REFCOUNT
3142+" tvs %%icc, 6\n"
3143+#endif
3144+
3145 " cas [%2], %0, %1\n"
3146 " cmp %0, %1\n"
3147 " bne,pn %%icc, 1b\n"
3148@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3149 return tmp1;
3150 }
3151
3152-static void inline arch_read_unlock(arch_rwlock_t *lock)
3153+static inline void arch_read_unlock(arch_rwlock_t *lock)
3154 {
3155 unsigned long tmp1, tmp2;
3156
3157 __asm__ __volatile__(
3158 "1: lduw [%2], %0\n"
3159-" sub %0, 1, %1\n"
3160+" subcc %0, 1, %1\n"
3161+
3162+#ifdef CONFIG_PAX_REFCOUNT
3163+" tvs %%icc, 6\n"
3164+#endif
3165+
3166 " cas [%2], %0, %1\n"
3167 " cmp %0, %1\n"
3168 " bne,pn %%xcc, 1b\n"
3169@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3170 : "memory");
3171 }
3172
3173-static void inline arch_write_lock(arch_rwlock_t *lock)
3174+static inline void arch_write_lock(arch_rwlock_t *lock)
3175 {
3176 unsigned long mask, tmp1, tmp2;
3177
3178@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3179 : "memory");
3180 }
3181
3182-static void inline arch_write_unlock(arch_rwlock_t *lock)
3183+static inline void arch_write_unlock(arch_rwlock_t *lock)
3184 {
3185 __asm__ __volatile__(
3186 " stw %%g0, [%0]"
3187@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3188 : "memory");
3189 }
3190
3191-static int inline arch_write_trylock(arch_rwlock_t *lock)
3192+static inline int arch_write_trylock(arch_rwlock_t *lock)
3193 {
3194 unsigned long mask, tmp1, tmp2, result;
3195
3196diff -urNp linux-3.1.4/arch/sparc/include/asm/thread_info_32.h linux-3.1.4/arch/sparc/include/asm/thread_info_32.h
3197--- linux-3.1.4/arch/sparc/include/asm/thread_info_32.h 2011-11-11 15:19:27.000000000 -0500
3198+++ linux-3.1.4/arch/sparc/include/asm/thread_info_32.h 2011-11-16 18:39:07.000000000 -0500
3199@@ -50,6 +50,8 @@ struct thread_info {
3200 unsigned long w_saved;
3201
3202 struct restart_block restart_block;
3203+
3204+ unsigned long lowest_stack;
3205 };
3206
3207 /*
3208diff -urNp linux-3.1.4/arch/sparc/include/asm/thread_info_64.h linux-3.1.4/arch/sparc/include/asm/thread_info_64.h
3209--- linux-3.1.4/arch/sparc/include/asm/thread_info_64.h 2011-11-11 15:19:27.000000000 -0500
3210+++ linux-3.1.4/arch/sparc/include/asm/thread_info_64.h 2011-11-16 18:39:07.000000000 -0500
3211@@ -63,6 +63,8 @@ struct thread_info {
3212 struct pt_regs *kern_una_regs;
3213 unsigned int kern_una_insn;
3214
3215+ unsigned long lowest_stack;
3216+
3217 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3218 };
3219
3220diff -urNp linux-3.1.4/arch/sparc/include/asm/uaccess_32.h linux-3.1.4/arch/sparc/include/asm/uaccess_32.h
3221--- linux-3.1.4/arch/sparc/include/asm/uaccess_32.h 2011-11-11 15:19:27.000000000 -0500
3222+++ linux-3.1.4/arch/sparc/include/asm/uaccess_32.h 2011-11-16 18:39:07.000000000 -0500
3223@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3224
3225 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3226 {
3227- if (n && __access_ok((unsigned long) to, n))
3228+ if ((long)n < 0)
3229+ return n;
3230+
3231+ if (n && __access_ok((unsigned long) to, n)) {
3232+ if (!__builtin_constant_p(n))
3233+ check_object_size(from, n, true);
3234 return __copy_user(to, (__force void __user *) from, n);
3235- else
3236+ } else
3237 return n;
3238 }
3239
3240 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3241 {
3242+ if ((long)n < 0)
3243+ return n;
3244+
3245+ if (!__builtin_constant_p(n))
3246+ check_object_size(from, n, true);
3247+
3248 return __copy_user(to, (__force void __user *) from, n);
3249 }
3250
3251 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3252 {
3253- if (n && __access_ok((unsigned long) from, n))
3254+ if ((long)n < 0)
3255+ return n;
3256+
3257+ if (n && __access_ok((unsigned long) from, n)) {
3258+ if (!__builtin_constant_p(n))
3259+ check_object_size(to, n, false);
3260 return __copy_user((__force void __user *) to, from, n);
3261- else
3262+ } else
3263 return n;
3264 }
3265
3266 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3267 {
3268+ if ((long)n < 0)
3269+ return n;
3270+
3271 return __copy_user((__force void __user *) to, from, n);
3272 }
3273
3274diff -urNp linux-3.1.4/arch/sparc/include/asm/uaccess_64.h linux-3.1.4/arch/sparc/include/asm/uaccess_64.h
3275--- linux-3.1.4/arch/sparc/include/asm/uaccess_64.h 2011-11-11 15:19:27.000000000 -0500
3276+++ linux-3.1.4/arch/sparc/include/asm/uaccess_64.h 2011-11-16 18:39:07.000000000 -0500
3277@@ -10,6 +10,7 @@
3278 #include <linux/compiler.h>
3279 #include <linux/string.h>
3280 #include <linux/thread_info.h>
3281+#include <linux/kernel.h>
3282 #include <asm/asi.h>
3283 #include <asm/system.h>
3284 #include <asm/spitfire.h>
3285@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3286 static inline unsigned long __must_check
3287 copy_from_user(void *to, const void __user *from, unsigned long size)
3288 {
3289- unsigned long ret = ___copy_from_user(to, from, size);
3290+ unsigned long ret;
3291
3292+ if ((long)size < 0 || size > INT_MAX)
3293+ return size;
3294+
3295+ if (!__builtin_constant_p(size))
3296+ check_object_size(to, size, false);
3297+
3298+ ret = ___copy_from_user(to, from, size);
3299 if (unlikely(ret))
3300 ret = copy_from_user_fixup(to, from, size);
3301
3302@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3303 static inline unsigned long __must_check
3304 copy_to_user(void __user *to, const void *from, unsigned long size)
3305 {
3306- unsigned long ret = ___copy_to_user(to, from, size);
3307+ unsigned long ret;
3308+
3309+ if ((long)size < 0 || size > INT_MAX)
3310+ return size;
3311+
3312+ if (!__builtin_constant_p(size))
3313+ check_object_size(from, size, true);
3314
3315+ ret = ___copy_to_user(to, from, size);
3316 if (unlikely(ret))
3317 ret = copy_to_user_fixup(to, from, size);
3318 return ret;
3319diff -urNp linux-3.1.4/arch/sparc/include/asm/uaccess.h linux-3.1.4/arch/sparc/include/asm/uaccess.h
3320--- linux-3.1.4/arch/sparc/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
3321+++ linux-3.1.4/arch/sparc/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
3322@@ -1,5 +1,13 @@
3323 #ifndef ___ASM_SPARC_UACCESS_H
3324 #define ___ASM_SPARC_UACCESS_H
3325+
3326+#ifdef __KERNEL__
3327+#ifndef __ASSEMBLY__
3328+#include <linux/types.h>
3329+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3330+#endif
3331+#endif
3332+
3333 #if defined(__sparc__) && defined(__arch64__)
3334 #include <asm/uaccess_64.h>
3335 #else
3336diff -urNp linux-3.1.4/arch/sparc/kernel/Makefile linux-3.1.4/arch/sparc/kernel/Makefile
3337--- linux-3.1.4/arch/sparc/kernel/Makefile 2011-11-11 15:19:27.000000000 -0500
3338+++ linux-3.1.4/arch/sparc/kernel/Makefile 2011-11-16 18:39:07.000000000 -0500
3339@@ -3,7 +3,7 @@
3340 #
3341
3342 asflags-y := -ansi
3343-ccflags-y := -Werror
3344+#ccflags-y := -Werror
3345
3346 extra-y := head_$(BITS).o
3347 extra-y += init_task.o
3348diff -urNp linux-3.1.4/arch/sparc/kernel/process_32.c linux-3.1.4/arch/sparc/kernel/process_32.c
3349--- linux-3.1.4/arch/sparc/kernel/process_32.c 2011-11-11 15:19:27.000000000 -0500
3350+++ linux-3.1.4/arch/sparc/kernel/process_32.c 2011-11-16 18:40:08.000000000 -0500
3351@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3352 rw->ins[4], rw->ins[5],
3353 rw->ins[6],
3354 rw->ins[7]);
3355- printk("%pS\n", (void *) rw->ins[7]);
3356+ printk("%pA\n", (void *) rw->ins[7]);
3357 rw = (struct reg_window32 *) rw->ins[6];
3358 }
3359 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3360@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3361
3362 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3363 r->psr, r->pc, r->npc, r->y, print_tainted());
3364- printk("PC: <%pS>\n", (void *) r->pc);
3365+ printk("PC: <%pA>\n", (void *) r->pc);
3366 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3367 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3368 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3369 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3370 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3371 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3372- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3373+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3374
3375 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3376 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3377@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk,
3378 rw = (struct reg_window32 *) fp;
3379 pc = rw->ins[7];
3380 printk("[%08lx : ", pc);
3381- printk("%pS ] ", (void *) pc);
3382+ printk("%pA ] ", (void *) pc);
3383 fp = rw->ins[6];
3384 } while (++count < 16);
3385 printk("\n");
3386diff -urNp linux-3.1.4/arch/sparc/kernel/process_64.c linux-3.1.4/arch/sparc/kernel/process_64.c
3387--- linux-3.1.4/arch/sparc/kernel/process_64.c 2011-11-11 15:19:27.000000000 -0500
3388+++ linux-3.1.4/arch/sparc/kernel/process_64.c 2011-11-16 18:40:08.000000000 -0500
3389@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3390 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3391 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3392 if (regs->tstate & TSTATE_PRIV)
3393- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3394+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3395 }
3396
3397 void show_regs(struct pt_regs *regs)
3398 {
3399 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3400 regs->tpc, regs->tnpc, regs->y, print_tainted());
3401- printk("TPC: <%pS>\n", (void *) regs->tpc);
3402+ printk("TPC: <%pA>\n", (void *) regs->tpc);
3403 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3404 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3405 regs->u_regs[3]);
3406@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3407 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3408 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3409 regs->u_regs[15]);
3410- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3411+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3412 show_regwindow(regs);
3413 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3414 }
3415@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3416 ((tp && tp->task) ? tp->task->pid : -1));
3417
3418 if (gp->tstate & TSTATE_PRIV) {
3419- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3420+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3421 (void *) gp->tpc,
3422 (void *) gp->o7,
3423 (void *) gp->i7,
3424diff -urNp linux-3.1.4/arch/sparc/kernel/sys_sparc_32.c linux-3.1.4/arch/sparc/kernel/sys_sparc_32.c
3425--- linux-3.1.4/arch/sparc/kernel/sys_sparc_32.c 2011-11-11 15:19:27.000000000 -0500
3426+++ linux-3.1.4/arch/sparc/kernel/sys_sparc_32.c 2011-11-16 18:39:07.000000000 -0500
3427@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3428 if (ARCH_SUN4C && len > 0x20000000)
3429 return -ENOMEM;
3430 if (!addr)
3431- addr = TASK_UNMAPPED_BASE;
3432+ addr = current->mm->mmap_base;
3433
3434 if (flags & MAP_SHARED)
3435 addr = COLOUR_ALIGN(addr);
3436@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3437 }
3438 if (TASK_SIZE - PAGE_SIZE - len < addr)
3439 return -ENOMEM;
3440- if (!vmm || addr + len <= vmm->vm_start)
3441+ if (check_heap_stack_gap(vmm, addr, len))
3442 return addr;
3443 addr = vmm->vm_end;
3444 if (flags & MAP_SHARED)
3445diff -urNp linux-3.1.4/arch/sparc/kernel/sys_sparc_64.c linux-3.1.4/arch/sparc/kernel/sys_sparc_64.c
3446--- linux-3.1.4/arch/sparc/kernel/sys_sparc_64.c 2011-11-11 15:19:27.000000000 -0500
3447+++ linux-3.1.4/arch/sparc/kernel/sys_sparc_64.c 2011-11-16 18:39:07.000000000 -0500
3448@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3449 /* We do not accept a shared mapping if it would violate
3450 * cache aliasing constraints.
3451 */
3452- if ((flags & MAP_SHARED) &&
3453+ if ((filp || (flags & MAP_SHARED)) &&
3454 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3455 return -EINVAL;
3456 return addr;
3457@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3458 if (filp || (flags & MAP_SHARED))
3459 do_color_align = 1;
3460
3461+#ifdef CONFIG_PAX_RANDMMAP
3462+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3463+#endif
3464+
3465 if (addr) {
3466 if (do_color_align)
3467 addr = COLOUR_ALIGN(addr, pgoff);
3468@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3469 addr = PAGE_ALIGN(addr);
3470
3471 vma = find_vma(mm, addr);
3472- if (task_size - len >= addr &&
3473- (!vma || addr + len <= vma->vm_start))
3474+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3475 return addr;
3476 }
3477
3478 if (len > mm->cached_hole_size) {
3479- start_addr = addr = mm->free_area_cache;
3480+ start_addr = addr = mm->free_area_cache;
3481 } else {
3482- start_addr = addr = TASK_UNMAPPED_BASE;
3483+ start_addr = addr = mm->mmap_base;
3484 mm->cached_hole_size = 0;
3485 }
3486
3487@@ -174,14 +177,14 @@ full_search:
3488 vma = find_vma(mm, VA_EXCLUDE_END);
3489 }
3490 if (unlikely(task_size < addr)) {
3491- if (start_addr != TASK_UNMAPPED_BASE) {
3492- start_addr = addr = TASK_UNMAPPED_BASE;
3493+ if (start_addr != mm->mmap_base) {
3494+ start_addr = addr = mm->mmap_base;
3495 mm->cached_hole_size = 0;
3496 goto full_search;
3497 }
3498 return -ENOMEM;
3499 }
3500- if (likely(!vma || addr + len <= vma->vm_start)) {
3501+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3502 /*
3503 * Remember the place where we stopped the search:
3504 */
3505@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3506 /* We do not accept a shared mapping if it would violate
3507 * cache aliasing constraints.
3508 */
3509- if ((flags & MAP_SHARED) &&
3510+ if ((filp || (flags & MAP_SHARED)) &&
3511 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3512 return -EINVAL;
3513 return addr;
3514@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3515 addr = PAGE_ALIGN(addr);
3516
3517 vma = find_vma(mm, addr);
3518- if (task_size - len >= addr &&
3519- (!vma || addr + len <= vma->vm_start))
3520+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3521 return addr;
3522 }
3523
3524@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3525 /* make sure it can fit in the remaining address space */
3526 if (likely(addr > len)) {
3527 vma = find_vma(mm, addr-len);
3528- if (!vma || addr <= vma->vm_start) {
3529+ if (check_heap_stack_gap(vma, addr - len, len)) {
3530 /* remember the address as a hint for next time */
3531 return (mm->free_area_cache = addr-len);
3532 }
3533@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3534 if (unlikely(mm->mmap_base < len))
3535 goto bottomup;
3536
3537- addr = mm->mmap_base-len;
3538- if (do_color_align)
3539- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3540+ addr = mm->mmap_base - len;
3541
3542 do {
3543+ if (do_color_align)
3544+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3545 /*
3546 * Lookup failure means no vma is above this address,
3547 * else if new region fits below vma->vm_start,
3548 * return with success:
3549 */
3550 vma = find_vma(mm, addr);
3551- if (likely(!vma || addr+len <= vma->vm_start)) {
3552+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3553 /* remember the address as a hint for next time */
3554 return (mm->free_area_cache = addr);
3555 }
3556@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3557 mm->cached_hole_size = vma->vm_start - addr;
3558
3559 /* try just below the current vma->vm_start */
3560- addr = vma->vm_start-len;
3561- if (do_color_align)
3562- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3563- } while (likely(len < vma->vm_start));
3564+ addr = skip_heap_stack_gap(vma, len);
3565+ } while (!IS_ERR_VALUE(addr));
3566
3567 bottomup:
3568 /*
3569@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3570 gap == RLIM_INFINITY ||
3571 sysctl_legacy_va_layout) {
3572 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3573+
3574+#ifdef CONFIG_PAX_RANDMMAP
3575+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3576+ mm->mmap_base += mm->delta_mmap;
3577+#endif
3578+
3579 mm->get_unmapped_area = arch_get_unmapped_area;
3580 mm->unmap_area = arch_unmap_area;
3581 } else {
3582@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3583 gap = (task_size / 6 * 5);
3584
3585 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3586+
3587+#ifdef CONFIG_PAX_RANDMMAP
3588+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3589+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3590+#endif
3591+
3592 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3593 mm->unmap_area = arch_unmap_area_topdown;
3594 }
3595diff -urNp linux-3.1.4/arch/sparc/kernel/traps_32.c linux-3.1.4/arch/sparc/kernel/traps_32.c
3596--- linux-3.1.4/arch/sparc/kernel/traps_32.c 2011-11-11 15:19:27.000000000 -0500
3597+++ linux-3.1.4/arch/sparc/kernel/traps_32.c 2011-11-16 18:40:08.000000000 -0500
3598@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3599 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3600 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3601
3602+extern void gr_handle_kernel_exploit(void);
3603+
3604 void die_if_kernel(char *str, struct pt_regs *regs)
3605 {
3606 static int die_counter;
3607@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3608 count++ < 30 &&
3609 (((unsigned long) rw) >= PAGE_OFFSET) &&
3610 !(((unsigned long) rw) & 0x7)) {
3611- printk("Caller[%08lx]: %pS\n", rw->ins[7],
3612+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
3613 (void *) rw->ins[7]);
3614 rw = (struct reg_window32 *)rw->ins[6];
3615 }
3616 }
3617 printk("Instruction DUMP:");
3618 instruction_dump ((unsigned long *) regs->pc);
3619- if(regs->psr & PSR_PS)
3620+ if(regs->psr & PSR_PS) {
3621+ gr_handle_kernel_exploit();
3622 do_exit(SIGKILL);
3623+ }
3624 do_exit(SIGSEGV);
3625 }
3626
3627diff -urNp linux-3.1.4/arch/sparc/kernel/traps_64.c linux-3.1.4/arch/sparc/kernel/traps_64.c
3628--- linux-3.1.4/arch/sparc/kernel/traps_64.c 2011-11-11 15:19:27.000000000 -0500
3629+++ linux-3.1.4/arch/sparc/kernel/traps_64.c 2011-11-16 18:40:08.000000000 -0500
3630@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3631 i + 1,
3632 p->trapstack[i].tstate, p->trapstack[i].tpc,
3633 p->trapstack[i].tnpc, p->trapstack[i].tt);
3634- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3635+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3636 }
3637 }
3638
3639@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3640
3641 lvl -= 0x100;
3642 if (regs->tstate & TSTATE_PRIV) {
3643+
3644+#ifdef CONFIG_PAX_REFCOUNT
3645+ if (lvl == 6)
3646+ pax_report_refcount_overflow(regs);
3647+#endif
3648+
3649 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3650 die_if_kernel(buffer, regs);
3651 }
3652@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3653 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3654 {
3655 char buffer[32];
3656-
3657+
3658 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3659 0, lvl, SIGTRAP) == NOTIFY_STOP)
3660 return;
3661
3662+#ifdef CONFIG_PAX_REFCOUNT
3663+ if (lvl == 6)
3664+ pax_report_refcount_overflow(regs);
3665+#endif
3666+
3667 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3668
3669 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3670@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3671 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3672 printk("%s" "ERROR(%d): ",
3673 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3674- printk("TPC<%pS>\n", (void *) regs->tpc);
3675+ printk("TPC<%pA>\n", (void *) regs->tpc);
3676 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3677 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3678 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3679@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3680 smp_processor_id(),
3681 (type & 0x1) ? 'I' : 'D',
3682 regs->tpc);
3683- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3684+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3685 panic("Irrecoverable Cheetah+ parity error.");
3686 }
3687
3688@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3689 smp_processor_id(),
3690 (type & 0x1) ? 'I' : 'D',
3691 regs->tpc);
3692- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3693+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3694 }
3695
3696 struct sun4v_error_entry {
3697@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3698
3699 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3700 regs->tpc, tl);
3701- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3702+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3703 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3704- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3705+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3706 (void *) regs->u_regs[UREG_I7]);
3707 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3708 "pte[%lx] error[%lx]\n",
3709@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3710
3711 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3712 regs->tpc, tl);
3713- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3714+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3715 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3716- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3717+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3718 (void *) regs->u_regs[UREG_I7]);
3719 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3720 "pte[%lx] error[%lx]\n",
3721@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3722 fp = (unsigned long)sf->fp + STACK_BIAS;
3723 }
3724
3725- printk(" [%016lx] %pS\n", pc, (void *) pc);
3726+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3727 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3728 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3729 int index = tsk->curr_ret_stack;
3730 if (tsk->ret_stack && index >= graph) {
3731 pc = tsk->ret_stack[index - graph].ret;
3732- printk(" [%016lx] %pS\n", pc, (void *) pc);
3733+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3734 graph++;
3735 }
3736 }
3737@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3738 return (struct reg_window *) (fp + STACK_BIAS);
3739 }
3740
3741+extern void gr_handle_kernel_exploit(void);
3742+
3743 void die_if_kernel(char *str, struct pt_regs *regs)
3744 {
3745 static int die_counter;
3746@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3747 while (rw &&
3748 count++ < 30 &&
3749 kstack_valid(tp, (unsigned long) rw)) {
3750- printk("Caller[%016lx]: %pS\n", rw->ins[7],
3751+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
3752 (void *) rw->ins[7]);
3753
3754 rw = kernel_stack_up(rw);
3755@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3756 }
3757 user_instruction_dump ((unsigned int __user *) regs->tpc);
3758 }
3759- if (regs->tstate & TSTATE_PRIV)
3760+ if (regs->tstate & TSTATE_PRIV) {
3761+ gr_handle_kernel_exploit();
3762 do_exit(SIGKILL);
3763+ }
3764 do_exit(SIGSEGV);
3765 }
3766 EXPORT_SYMBOL(die_if_kernel);
3767diff -urNp linux-3.1.4/arch/sparc/kernel/unaligned_64.c linux-3.1.4/arch/sparc/kernel/unaligned_64.c
3768--- linux-3.1.4/arch/sparc/kernel/unaligned_64.c 2011-11-11 15:19:27.000000000 -0500
3769+++ linux-3.1.4/arch/sparc/kernel/unaligned_64.c 2011-11-16 18:40:08.000000000 -0500
3770@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs
3771 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3772
3773 if (__ratelimit(&ratelimit)) {
3774- printk("Kernel unaligned access at TPC[%lx] %pS\n",
3775+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
3776 regs->tpc, (void *) regs->tpc);
3777 }
3778 }
3779diff -urNp linux-3.1.4/arch/sparc/lib/atomic_64.S linux-3.1.4/arch/sparc/lib/atomic_64.S
3780--- linux-3.1.4/arch/sparc/lib/atomic_64.S 2011-11-11 15:19:27.000000000 -0500
3781+++ linux-3.1.4/arch/sparc/lib/atomic_64.S 2011-11-16 18:39:07.000000000 -0500
3782@@ -18,7 +18,12 @@
3783 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3784 BACKOFF_SETUP(%o2)
3785 1: lduw [%o1], %g1
3786- add %g1, %o0, %g7
3787+ addcc %g1, %o0, %g7
3788+
3789+#ifdef CONFIG_PAX_REFCOUNT
3790+ tvs %icc, 6
3791+#endif
3792+
3793 cas [%o1], %g1, %g7
3794 cmp %g1, %g7
3795 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3796@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3797 2: BACKOFF_SPIN(%o2, %o3, 1b)
3798 .size atomic_add, .-atomic_add
3799
3800+ .globl atomic_add_unchecked
3801+ .type atomic_add_unchecked,#function
3802+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3803+ BACKOFF_SETUP(%o2)
3804+1: lduw [%o1], %g1
3805+ add %g1, %o0, %g7
3806+ cas [%o1], %g1, %g7
3807+ cmp %g1, %g7
3808+ bne,pn %icc, 2f
3809+ nop
3810+ retl
3811+ nop
3812+2: BACKOFF_SPIN(%o2, %o3, 1b)
3813+ .size atomic_add_unchecked, .-atomic_add_unchecked
3814+
3815 .globl atomic_sub
3816 .type atomic_sub,#function
3817 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3818 BACKOFF_SETUP(%o2)
3819 1: lduw [%o1], %g1
3820- sub %g1, %o0, %g7
3821+ subcc %g1, %o0, %g7
3822+
3823+#ifdef CONFIG_PAX_REFCOUNT
3824+ tvs %icc, 6
3825+#endif
3826+
3827 cas [%o1], %g1, %g7
3828 cmp %g1, %g7
3829 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3830@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3831 2: BACKOFF_SPIN(%o2, %o3, 1b)
3832 .size atomic_sub, .-atomic_sub
3833
3834+ .globl atomic_sub_unchecked
3835+ .type atomic_sub_unchecked,#function
3836+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3837+ BACKOFF_SETUP(%o2)
3838+1: lduw [%o1], %g1
3839+ sub %g1, %o0, %g7
3840+ cas [%o1], %g1, %g7
3841+ cmp %g1, %g7
3842+ bne,pn %icc, 2f
3843+ nop
3844+ retl
3845+ nop
3846+2: BACKOFF_SPIN(%o2, %o3, 1b)
3847+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
3848+
3849 .globl atomic_add_ret
3850 .type atomic_add_ret,#function
3851 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3852 BACKOFF_SETUP(%o2)
3853 1: lduw [%o1], %g1
3854- add %g1, %o0, %g7
3855+ addcc %g1, %o0, %g7
3856+
3857+#ifdef CONFIG_PAX_REFCOUNT
3858+ tvs %icc, 6
3859+#endif
3860+
3861 cas [%o1], %g1, %g7
3862 cmp %g1, %g7
3863 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3864@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3865 2: BACKOFF_SPIN(%o2, %o3, 1b)
3866 .size atomic_add_ret, .-atomic_add_ret
3867
3868+ .globl atomic_add_ret_unchecked
3869+ .type atomic_add_ret_unchecked,#function
3870+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3871+ BACKOFF_SETUP(%o2)
3872+1: lduw [%o1], %g1
3873+ addcc %g1, %o0, %g7
3874+ cas [%o1], %g1, %g7
3875+ cmp %g1, %g7
3876+ bne,pn %icc, 2f
3877+ add %g7, %o0, %g7
3878+ sra %g7, 0, %o0
3879+ retl
3880+ nop
3881+2: BACKOFF_SPIN(%o2, %o3, 1b)
3882+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3883+
3884 .globl atomic_sub_ret
3885 .type atomic_sub_ret,#function
3886 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3887 BACKOFF_SETUP(%o2)
3888 1: lduw [%o1], %g1
3889- sub %g1, %o0, %g7
3890+ subcc %g1, %o0, %g7
3891+
3892+#ifdef CONFIG_PAX_REFCOUNT
3893+ tvs %icc, 6
3894+#endif
3895+
3896 cas [%o1], %g1, %g7
3897 cmp %g1, %g7
3898 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3899@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3900 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3901 BACKOFF_SETUP(%o2)
3902 1: ldx [%o1], %g1
3903- add %g1, %o0, %g7
3904+ addcc %g1, %o0, %g7
3905+
3906+#ifdef CONFIG_PAX_REFCOUNT
3907+ tvs %xcc, 6
3908+#endif
3909+
3910 casx [%o1], %g1, %g7
3911 cmp %g1, %g7
3912 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3913@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3914 2: BACKOFF_SPIN(%o2, %o3, 1b)
3915 .size atomic64_add, .-atomic64_add
3916
3917+ .globl atomic64_add_unchecked
3918+ .type atomic64_add_unchecked,#function
3919+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3920+ BACKOFF_SETUP(%o2)
3921+1: ldx [%o1], %g1
3922+ addcc %g1, %o0, %g7
3923+ casx [%o1], %g1, %g7
3924+ cmp %g1, %g7
3925+ bne,pn %xcc, 2f
3926+ nop
3927+ retl
3928+ nop
3929+2: BACKOFF_SPIN(%o2, %o3, 1b)
3930+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
3931+
3932 .globl atomic64_sub
3933 .type atomic64_sub,#function
3934 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3935 BACKOFF_SETUP(%o2)
3936 1: ldx [%o1], %g1
3937- sub %g1, %o0, %g7
3938+ subcc %g1, %o0, %g7
3939+
3940+#ifdef CONFIG_PAX_REFCOUNT
3941+ tvs %xcc, 6
3942+#endif
3943+
3944 casx [%o1], %g1, %g7
3945 cmp %g1, %g7
3946 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3947@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
3948 2: BACKOFF_SPIN(%o2, %o3, 1b)
3949 .size atomic64_sub, .-atomic64_sub
3950
3951+ .globl atomic64_sub_unchecked
3952+ .type atomic64_sub_unchecked,#function
3953+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3954+ BACKOFF_SETUP(%o2)
3955+1: ldx [%o1], %g1
3956+ subcc %g1, %o0, %g7
3957+ casx [%o1], %g1, %g7
3958+ cmp %g1, %g7
3959+ bne,pn %xcc, 2f
3960+ nop
3961+ retl
3962+ nop
3963+2: BACKOFF_SPIN(%o2, %o3, 1b)
3964+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
3965+
3966 .globl atomic64_add_ret
3967 .type atomic64_add_ret,#function
3968 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3969 BACKOFF_SETUP(%o2)
3970 1: ldx [%o1], %g1
3971- add %g1, %o0, %g7
3972+ addcc %g1, %o0, %g7
3973+
3974+#ifdef CONFIG_PAX_REFCOUNT
3975+ tvs %xcc, 6
3976+#endif
3977+
3978 casx [%o1], %g1, %g7
3979 cmp %g1, %g7
3980 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3981@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
3982 2: BACKOFF_SPIN(%o2, %o3, 1b)
3983 .size atomic64_add_ret, .-atomic64_add_ret
3984
3985+ .globl atomic64_add_ret_unchecked
3986+ .type atomic64_add_ret_unchecked,#function
3987+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3988+ BACKOFF_SETUP(%o2)
3989+1: ldx [%o1], %g1
3990+ addcc %g1, %o0, %g7
3991+ casx [%o1], %g1, %g7
3992+ cmp %g1, %g7
3993+ bne,pn %xcc, 2f
3994+ add %g7, %o0, %g7
3995+ mov %g7, %o0
3996+ retl
3997+ nop
3998+2: BACKOFF_SPIN(%o2, %o3, 1b)
3999+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4000+
4001 .globl atomic64_sub_ret
4002 .type atomic64_sub_ret,#function
4003 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4004 BACKOFF_SETUP(%o2)
4005 1: ldx [%o1], %g1
4006- sub %g1, %o0, %g7
4007+ subcc %g1, %o0, %g7
4008+
4009+#ifdef CONFIG_PAX_REFCOUNT
4010+ tvs %xcc, 6
4011+#endif
4012+
4013 casx [%o1], %g1, %g7
4014 cmp %g1, %g7
4015 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4016diff -urNp linux-3.1.4/arch/sparc/lib/ksyms.c linux-3.1.4/arch/sparc/lib/ksyms.c
4017--- linux-3.1.4/arch/sparc/lib/ksyms.c 2011-11-11 15:19:27.000000000 -0500
4018+++ linux-3.1.4/arch/sparc/lib/ksyms.c 2011-11-16 18:39:07.000000000 -0500
4019@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4020
4021 /* Atomic counter implementation. */
4022 EXPORT_SYMBOL(atomic_add);
4023+EXPORT_SYMBOL(atomic_add_unchecked);
4024 EXPORT_SYMBOL(atomic_add_ret);
4025+EXPORT_SYMBOL(atomic_add_ret_unchecked);
4026 EXPORT_SYMBOL(atomic_sub);
4027+EXPORT_SYMBOL(atomic_sub_unchecked);
4028 EXPORT_SYMBOL(atomic_sub_ret);
4029 EXPORT_SYMBOL(atomic64_add);
4030+EXPORT_SYMBOL(atomic64_add_unchecked);
4031 EXPORT_SYMBOL(atomic64_add_ret);
4032+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4033 EXPORT_SYMBOL(atomic64_sub);
4034+EXPORT_SYMBOL(atomic64_sub_unchecked);
4035 EXPORT_SYMBOL(atomic64_sub_ret);
4036
4037 /* Atomic bit operations. */
4038diff -urNp linux-3.1.4/arch/sparc/lib/Makefile linux-3.1.4/arch/sparc/lib/Makefile
4039--- linux-3.1.4/arch/sparc/lib/Makefile 2011-11-11 15:19:27.000000000 -0500
4040+++ linux-3.1.4/arch/sparc/lib/Makefile 2011-11-16 18:39:07.000000000 -0500
4041@@ -2,7 +2,7 @@
4042 #
4043
4044 asflags-y := -ansi -DST_DIV0=0x02
4045-ccflags-y := -Werror
4046+#ccflags-y := -Werror
4047
4048 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4049 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4050diff -urNp linux-3.1.4/arch/sparc/Makefile linux-3.1.4/arch/sparc/Makefile
4051--- linux-3.1.4/arch/sparc/Makefile 2011-11-11 15:19:27.000000000 -0500
4052+++ linux-3.1.4/arch/sparc/Makefile 2011-11-16 18:40:08.000000000 -0500
4053@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4054 # Export what is needed by arch/sparc/boot/Makefile
4055 export VMLINUX_INIT VMLINUX_MAIN
4056 VMLINUX_INIT := $(head-y) $(init-y)
4057-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4058+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4059 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4060 VMLINUX_MAIN += $(drivers-y) $(net-y)
4061
4062diff -urNp linux-3.1.4/arch/sparc/mm/fault_32.c linux-3.1.4/arch/sparc/mm/fault_32.c
4063--- linux-3.1.4/arch/sparc/mm/fault_32.c 2011-11-11 15:19:27.000000000 -0500
4064+++ linux-3.1.4/arch/sparc/mm/fault_32.c 2011-11-16 18:39:07.000000000 -0500
4065@@ -22,6 +22,9 @@
4066 #include <linux/interrupt.h>
4067 #include <linux/module.h>
4068 #include <linux/kdebug.h>
4069+#include <linux/slab.h>
4070+#include <linux/pagemap.h>
4071+#include <linux/compiler.h>
4072
4073 #include <asm/system.h>
4074 #include <asm/page.h>
4075@@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4076 return safe_compute_effective_address(regs, insn);
4077 }
4078
4079+#ifdef CONFIG_PAX_PAGEEXEC
4080+#ifdef CONFIG_PAX_DLRESOLVE
4081+static void pax_emuplt_close(struct vm_area_struct *vma)
4082+{
4083+ vma->vm_mm->call_dl_resolve = 0UL;
4084+}
4085+
4086+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4087+{
4088+ unsigned int *kaddr;
4089+
4090+ vmf->page = alloc_page(GFP_HIGHUSER);
4091+ if (!vmf->page)
4092+ return VM_FAULT_OOM;
4093+
4094+ kaddr = kmap(vmf->page);
4095+ memset(kaddr, 0, PAGE_SIZE);
4096+ kaddr[0] = 0x9DE3BFA8U; /* save */
4097+ flush_dcache_page(vmf->page);
4098+ kunmap(vmf->page);
4099+ return VM_FAULT_MAJOR;
4100+}
4101+
4102+static const struct vm_operations_struct pax_vm_ops = {
4103+ .close = pax_emuplt_close,
4104+ .fault = pax_emuplt_fault
4105+};
4106+
4107+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4108+{
4109+ int ret;
4110+
4111+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4112+ vma->vm_mm = current->mm;
4113+ vma->vm_start = addr;
4114+ vma->vm_end = addr + PAGE_SIZE;
4115+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4116+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4117+ vma->vm_ops = &pax_vm_ops;
4118+
4119+ ret = insert_vm_struct(current->mm, vma);
4120+ if (ret)
4121+ return ret;
4122+
4123+ ++current->mm->total_vm;
4124+ return 0;
4125+}
4126+#endif
4127+
4128+/*
4129+ * PaX: decide what to do with offenders (regs->pc = fault address)
4130+ *
4131+ * returns 1 when task should be killed
4132+ * 2 when patched PLT trampoline was detected
4133+ * 3 when unpatched PLT trampoline was detected
4134+ */
4135+static int pax_handle_fetch_fault(struct pt_regs *regs)
4136+{
4137+
4138+#ifdef CONFIG_PAX_EMUPLT
4139+ int err;
4140+
4141+ do { /* PaX: patched PLT emulation #1 */
4142+ unsigned int sethi1, sethi2, jmpl;
4143+
4144+ err = get_user(sethi1, (unsigned int *)regs->pc);
4145+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4146+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4147+
4148+ if (err)
4149+ break;
4150+
4151+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4152+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4153+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4154+ {
4155+ unsigned int addr;
4156+
4157+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4158+ addr = regs->u_regs[UREG_G1];
4159+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4160+ regs->pc = addr;
4161+ regs->npc = addr+4;
4162+ return 2;
4163+ }
4164+ } while (0);
4165+
4166+ { /* PaX: patched PLT emulation #2 */
4167+ unsigned int ba;
4168+
4169+ err = get_user(ba, (unsigned int *)regs->pc);
4170+
4171+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4172+ unsigned int addr;
4173+
4174+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4175+ regs->pc = addr;
4176+ regs->npc = addr+4;
4177+ return 2;
4178+ }
4179+ }
4180+
4181+ do { /* PaX: patched PLT emulation #3 */
4182+ unsigned int sethi, jmpl, nop;
4183+
4184+ err = get_user(sethi, (unsigned int *)regs->pc);
4185+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4186+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4187+
4188+ if (err)
4189+ break;
4190+
4191+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4192+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4193+ nop == 0x01000000U)
4194+ {
4195+ unsigned int addr;
4196+
4197+ addr = (sethi & 0x003FFFFFU) << 10;
4198+ regs->u_regs[UREG_G1] = addr;
4199+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4200+ regs->pc = addr;
4201+ regs->npc = addr+4;
4202+ return 2;
4203+ }
4204+ } while (0);
4205+
4206+ do { /* PaX: unpatched PLT emulation step 1 */
4207+ unsigned int sethi, ba, nop;
4208+
4209+ err = get_user(sethi, (unsigned int *)regs->pc);
4210+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4211+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4212+
4213+ if (err)
4214+ break;
4215+
4216+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4217+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4218+ nop == 0x01000000U)
4219+ {
4220+ unsigned int addr, save, call;
4221+
4222+ if ((ba & 0xFFC00000U) == 0x30800000U)
4223+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4224+ else
4225+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4226+
4227+ err = get_user(save, (unsigned int *)addr);
4228+ err |= get_user(call, (unsigned int *)(addr+4));
4229+ err |= get_user(nop, (unsigned int *)(addr+8));
4230+ if (err)
4231+ break;
4232+
4233+#ifdef CONFIG_PAX_DLRESOLVE
4234+ if (save == 0x9DE3BFA8U &&
4235+ (call & 0xC0000000U) == 0x40000000U &&
4236+ nop == 0x01000000U)
4237+ {
4238+ struct vm_area_struct *vma;
4239+ unsigned long call_dl_resolve;
4240+
4241+ down_read(&current->mm->mmap_sem);
4242+ call_dl_resolve = current->mm->call_dl_resolve;
4243+ up_read(&current->mm->mmap_sem);
4244+ if (likely(call_dl_resolve))
4245+ goto emulate;
4246+
4247+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4248+
4249+ down_write(&current->mm->mmap_sem);
4250+ if (current->mm->call_dl_resolve) {
4251+ call_dl_resolve = current->mm->call_dl_resolve;
4252+ up_write(&current->mm->mmap_sem);
4253+ if (vma)
4254+ kmem_cache_free(vm_area_cachep, vma);
4255+ goto emulate;
4256+ }
4257+
4258+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4259+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4260+ up_write(&current->mm->mmap_sem);
4261+ if (vma)
4262+ kmem_cache_free(vm_area_cachep, vma);
4263+ return 1;
4264+ }
4265+
4266+ if (pax_insert_vma(vma, call_dl_resolve)) {
4267+ up_write(&current->mm->mmap_sem);
4268+ kmem_cache_free(vm_area_cachep, vma);
4269+ return 1;
4270+ }
4271+
4272+ current->mm->call_dl_resolve = call_dl_resolve;
4273+ up_write(&current->mm->mmap_sem);
4274+
4275+emulate:
4276+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4277+ regs->pc = call_dl_resolve;
4278+ regs->npc = addr+4;
4279+ return 3;
4280+ }
4281+#endif
4282+
4283+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4284+ if ((save & 0xFFC00000U) == 0x05000000U &&
4285+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4286+ nop == 0x01000000U)
4287+ {
4288+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4289+ regs->u_regs[UREG_G2] = addr + 4;
4290+ addr = (save & 0x003FFFFFU) << 10;
4291+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4292+ regs->pc = addr;
4293+ regs->npc = addr+4;
4294+ return 3;
4295+ }
4296+ }
4297+ } while (0);
4298+
4299+ do { /* PaX: unpatched PLT emulation step 2 */
4300+ unsigned int save, call, nop;
4301+
4302+ err = get_user(save, (unsigned int *)(regs->pc-4));
4303+ err |= get_user(call, (unsigned int *)regs->pc);
4304+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
4305+ if (err)
4306+ break;
4307+
4308+ if (save == 0x9DE3BFA8U &&
4309+ (call & 0xC0000000U) == 0x40000000U &&
4310+ nop == 0x01000000U)
4311+ {
4312+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4313+
4314+ regs->u_regs[UREG_RETPC] = regs->pc;
4315+ regs->pc = dl_resolve;
4316+ regs->npc = dl_resolve+4;
4317+ return 3;
4318+ }
4319+ } while (0);
4320+#endif
4321+
4322+ return 1;
4323+}
4324+
4325+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4326+{
4327+ unsigned long i;
4328+
4329+ printk(KERN_ERR "PAX: bytes at PC: ");
4330+ for (i = 0; i < 8; i++) {
4331+ unsigned int c;
4332+ if (get_user(c, (unsigned int *)pc+i))
4333+ printk(KERN_CONT "???????? ");
4334+ else
4335+ printk(KERN_CONT "%08x ", c);
4336+ }
4337+ printk("\n");
4338+}
4339+#endif
4340+
4341 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4342 int text_fault)
4343 {
4344@@ -281,6 +546,24 @@ good_area:
4345 if(!(vma->vm_flags & VM_WRITE))
4346 goto bad_area;
4347 } else {
4348+
4349+#ifdef CONFIG_PAX_PAGEEXEC
4350+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4351+ up_read(&mm->mmap_sem);
4352+ switch (pax_handle_fetch_fault(regs)) {
4353+
4354+#ifdef CONFIG_PAX_EMUPLT
4355+ case 2:
4356+ case 3:
4357+ return;
4358+#endif
4359+
4360+ }
4361+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4362+ do_group_exit(SIGKILL);
4363+ }
4364+#endif
4365+
4366 /* Allow reads even for write-only mappings */
4367 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4368 goto bad_area;
4369diff -urNp linux-3.1.4/arch/sparc/mm/fault_64.c linux-3.1.4/arch/sparc/mm/fault_64.c
4370--- linux-3.1.4/arch/sparc/mm/fault_64.c 2011-11-11 15:19:27.000000000 -0500
4371+++ linux-3.1.4/arch/sparc/mm/fault_64.c 2011-11-16 18:40:08.000000000 -0500
4372@@ -21,6 +21,9 @@
4373 #include <linux/kprobes.h>
4374 #include <linux/kdebug.h>
4375 #include <linux/percpu.h>
4376+#include <linux/slab.h>
4377+#include <linux/pagemap.h>
4378+#include <linux/compiler.h>
4379
4380 #include <asm/page.h>
4381 #include <asm/pgtable.h>
4382@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4383 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4384 regs->tpc);
4385 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4386- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4387+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4388 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4389 dump_stack();
4390 unhandled_fault(regs->tpc, current, regs);
4391@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4392 show_regs(regs);
4393 }
4394
4395+#ifdef CONFIG_PAX_PAGEEXEC
4396+#ifdef CONFIG_PAX_DLRESOLVE
4397+static void pax_emuplt_close(struct vm_area_struct *vma)
4398+{
4399+ vma->vm_mm->call_dl_resolve = 0UL;
4400+}
4401+
4402+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4403+{
4404+ unsigned int *kaddr;
4405+
4406+ vmf->page = alloc_page(GFP_HIGHUSER);
4407+ if (!vmf->page)
4408+ return VM_FAULT_OOM;
4409+
4410+ kaddr = kmap(vmf->page);
4411+ memset(kaddr, 0, PAGE_SIZE);
4412+ kaddr[0] = 0x9DE3BFA8U; /* save */
4413+ flush_dcache_page(vmf->page);
4414+ kunmap(vmf->page);
4415+ return VM_FAULT_MAJOR;
4416+}
4417+
4418+static const struct vm_operations_struct pax_vm_ops = {
4419+ .close = pax_emuplt_close,
4420+ .fault = pax_emuplt_fault
4421+};
4422+
4423+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4424+{
4425+ int ret;
4426+
4427+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4428+ vma->vm_mm = current->mm;
4429+ vma->vm_start = addr;
4430+ vma->vm_end = addr + PAGE_SIZE;
4431+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4432+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4433+ vma->vm_ops = &pax_vm_ops;
4434+
4435+ ret = insert_vm_struct(current->mm, vma);
4436+ if (ret)
4437+ return ret;
4438+
4439+ ++current->mm->total_vm;
4440+ return 0;
4441+}
4442+#endif
4443+
4444+/*
4445+ * PaX: decide what to do with offenders (regs->tpc = fault address)
4446+ *
4447+ * returns 1 when task should be killed
4448+ * 2 when patched PLT trampoline was detected
4449+ * 3 when unpatched PLT trampoline was detected
4450+ */
4451+static int pax_handle_fetch_fault(struct pt_regs *regs)
4452+{
4453+
4454+#ifdef CONFIG_PAX_EMUPLT
4455+ int err;
4456+
4457+ do { /* PaX: patched PLT emulation #1 */
4458+ unsigned int sethi1, sethi2, jmpl;
4459+
4460+ err = get_user(sethi1, (unsigned int *)regs->tpc);
4461+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4462+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4463+
4464+ if (err)
4465+ break;
4466+
4467+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4468+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4469+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4470+ {
4471+ unsigned long addr;
4472+
4473+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4474+ addr = regs->u_regs[UREG_G1];
4475+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4476+
4477+ if (test_thread_flag(TIF_32BIT))
4478+ addr &= 0xFFFFFFFFUL;
4479+
4480+ regs->tpc = addr;
4481+ regs->tnpc = addr+4;
4482+ return 2;
4483+ }
4484+ } while (0);
4485+
4486+ { /* PaX: patched PLT emulation #2 */
4487+ unsigned int ba;
4488+
4489+ err = get_user(ba, (unsigned int *)regs->tpc);
4490+
4491+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4492+ unsigned long addr;
4493+
4494+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4495+
4496+ if (test_thread_flag(TIF_32BIT))
4497+ addr &= 0xFFFFFFFFUL;
4498+
4499+ regs->tpc = addr;
4500+ regs->tnpc = addr+4;
4501+ return 2;
4502+ }
4503+ }
4504+
4505+ do { /* PaX: patched PLT emulation #3 */
4506+ unsigned int sethi, jmpl, nop;
4507+
4508+ err = get_user(sethi, (unsigned int *)regs->tpc);
4509+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4510+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4511+
4512+ if (err)
4513+ break;
4514+
4515+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4516+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4517+ nop == 0x01000000U)
4518+ {
4519+ unsigned long addr;
4520+
4521+ addr = (sethi & 0x003FFFFFU) << 10;
4522+ regs->u_regs[UREG_G1] = addr;
4523+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4524+
4525+ if (test_thread_flag(TIF_32BIT))
4526+ addr &= 0xFFFFFFFFUL;
4527+
4528+ regs->tpc = addr;
4529+ regs->tnpc = addr+4;
4530+ return 2;
4531+ }
4532+ } while (0);
4533+
4534+ do { /* PaX: patched PLT emulation #4 */
4535+ unsigned int sethi, mov1, call, mov2;
4536+
4537+ err = get_user(sethi, (unsigned int *)regs->tpc);
4538+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4539+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
4540+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4541+
4542+ if (err)
4543+ break;
4544+
4545+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4546+ mov1 == 0x8210000FU &&
4547+ (call & 0xC0000000U) == 0x40000000U &&
4548+ mov2 == 0x9E100001U)
4549+ {
4550+ unsigned long addr;
4551+
4552+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4553+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4554+
4555+ if (test_thread_flag(TIF_32BIT))
4556+ addr &= 0xFFFFFFFFUL;
4557+
4558+ regs->tpc = addr;
4559+ regs->tnpc = addr+4;
4560+ return 2;
4561+ }
4562+ } while (0);
4563+
4564+ do { /* PaX: patched PLT emulation #5 */
4565+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4566+
4567+ err = get_user(sethi, (unsigned int *)regs->tpc);
4568+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4569+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4570+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4571+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4572+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4573+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4574+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4575+
4576+ if (err)
4577+ break;
4578+
4579+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4580+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4581+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4582+ (or1 & 0xFFFFE000U) == 0x82106000U &&
4583+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4584+ sllx == 0x83287020U &&
4585+ jmpl == 0x81C04005U &&
4586+ nop == 0x01000000U)
4587+ {
4588+ unsigned long addr;
4589+
4590+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4591+ regs->u_regs[UREG_G1] <<= 32;
4592+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4593+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4594+ regs->tpc = addr;
4595+ regs->tnpc = addr+4;
4596+ return 2;
4597+ }
4598+ } while (0);
4599+
4600+ do { /* PaX: patched PLT emulation #6 */
4601+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4602+
4603+ err = get_user(sethi, (unsigned int *)regs->tpc);
4604+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4605+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4606+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4607+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
4608+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4609+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4610+
4611+ if (err)
4612+ break;
4613+
4614+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4615+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4616+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4617+ sllx == 0x83287020U &&
4618+ (or & 0xFFFFE000U) == 0x8A116000U &&
4619+ jmpl == 0x81C04005U &&
4620+ nop == 0x01000000U)
4621+ {
4622+ unsigned long addr;
4623+
4624+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4625+ regs->u_regs[UREG_G1] <<= 32;
4626+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4627+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4628+ regs->tpc = addr;
4629+ regs->tnpc = addr+4;
4630+ return 2;
4631+ }
4632+ } while (0);
4633+
4634+ do { /* PaX: unpatched PLT emulation step 1 */
4635+ unsigned int sethi, ba, nop;
4636+
4637+ err = get_user(sethi, (unsigned int *)regs->tpc);
4638+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4639+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4640+
4641+ if (err)
4642+ break;
4643+
4644+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4645+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4646+ nop == 0x01000000U)
4647+ {
4648+ unsigned long addr;
4649+ unsigned int save, call;
4650+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4651+
4652+ if ((ba & 0xFFC00000U) == 0x30800000U)
4653+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4654+ else
4655+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4656+
4657+ if (test_thread_flag(TIF_32BIT))
4658+ addr &= 0xFFFFFFFFUL;
4659+
4660+ err = get_user(save, (unsigned int *)addr);
4661+ err |= get_user(call, (unsigned int *)(addr+4));
4662+ err |= get_user(nop, (unsigned int *)(addr+8));
4663+ if (err)
4664+ break;
4665+
4666+#ifdef CONFIG_PAX_DLRESOLVE
4667+ if (save == 0x9DE3BFA8U &&
4668+ (call & 0xC0000000U) == 0x40000000U &&
4669+ nop == 0x01000000U)
4670+ {
4671+ struct vm_area_struct *vma;
4672+ unsigned long call_dl_resolve;
4673+
4674+ down_read(&current->mm->mmap_sem);
4675+ call_dl_resolve = current->mm->call_dl_resolve;
4676+ up_read(&current->mm->mmap_sem);
4677+ if (likely(call_dl_resolve))
4678+ goto emulate;
4679+
4680+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4681+
4682+ down_write(&current->mm->mmap_sem);
4683+ if (current->mm->call_dl_resolve) {
4684+ call_dl_resolve = current->mm->call_dl_resolve;
4685+ up_write(&current->mm->mmap_sem);
4686+ if (vma)
4687+ kmem_cache_free(vm_area_cachep, vma);
4688+ goto emulate;
4689+ }
4690+
4691+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4692+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4693+ up_write(&current->mm->mmap_sem);
4694+ if (vma)
4695+ kmem_cache_free(vm_area_cachep, vma);
4696+ return 1;
4697+ }
4698+
4699+ if (pax_insert_vma(vma, call_dl_resolve)) {
4700+ up_write(&current->mm->mmap_sem);
4701+ kmem_cache_free(vm_area_cachep, vma);
4702+ return 1;
4703+ }
4704+
4705+ current->mm->call_dl_resolve = call_dl_resolve;
4706+ up_write(&current->mm->mmap_sem);
4707+
4708+emulate:
4709+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4710+ regs->tpc = call_dl_resolve;
4711+ regs->tnpc = addr+4;
4712+ return 3;
4713+ }
4714+#endif
4715+
4716+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4717+ if ((save & 0xFFC00000U) == 0x05000000U &&
4718+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4719+ nop == 0x01000000U)
4720+ {
4721+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4722+ regs->u_regs[UREG_G2] = addr + 4;
4723+ addr = (save & 0x003FFFFFU) << 10;
4724+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4725+
4726+ if (test_thread_flag(TIF_32BIT))
4727+ addr &= 0xFFFFFFFFUL;
4728+
4729+ regs->tpc = addr;
4730+ regs->tnpc = addr+4;
4731+ return 3;
4732+ }
4733+
4734+ /* PaX: 64-bit PLT stub */
4735+ err = get_user(sethi1, (unsigned int *)addr);
4736+ err |= get_user(sethi2, (unsigned int *)(addr+4));
4737+ err |= get_user(or1, (unsigned int *)(addr+8));
4738+ err |= get_user(or2, (unsigned int *)(addr+12));
4739+ err |= get_user(sllx, (unsigned int *)(addr+16));
4740+ err |= get_user(add, (unsigned int *)(addr+20));
4741+ err |= get_user(jmpl, (unsigned int *)(addr+24));
4742+ err |= get_user(nop, (unsigned int *)(addr+28));
4743+ if (err)
4744+ break;
4745+
4746+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4747+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4748+ (or1 & 0xFFFFE000U) == 0x88112000U &&
4749+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4750+ sllx == 0x89293020U &&
4751+ add == 0x8A010005U &&
4752+ jmpl == 0x89C14000U &&
4753+ nop == 0x01000000U)
4754+ {
4755+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4756+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4757+ regs->u_regs[UREG_G4] <<= 32;
4758+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4759+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4760+ regs->u_regs[UREG_G4] = addr + 24;
4761+ addr = regs->u_regs[UREG_G5];
4762+ regs->tpc = addr;
4763+ regs->tnpc = addr+4;
4764+ return 3;
4765+ }
4766+ }
4767+ } while (0);
4768+
4769+#ifdef CONFIG_PAX_DLRESOLVE
4770+ do { /* PaX: unpatched PLT emulation step 2 */
4771+ unsigned int save, call, nop;
4772+
4773+ err = get_user(save, (unsigned int *)(regs->tpc-4));
4774+ err |= get_user(call, (unsigned int *)regs->tpc);
4775+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4776+ if (err)
4777+ break;
4778+
4779+ if (save == 0x9DE3BFA8U &&
4780+ (call & 0xC0000000U) == 0x40000000U &&
4781+ nop == 0x01000000U)
4782+ {
4783+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4784+
4785+ if (test_thread_flag(TIF_32BIT))
4786+ dl_resolve &= 0xFFFFFFFFUL;
4787+
4788+ regs->u_regs[UREG_RETPC] = regs->tpc;
4789+ regs->tpc = dl_resolve;
4790+ regs->tnpc = dl_resolve+4;
4791+ return 3;
4792+ }
4793+ } while (0);
4794+#endif
4795+
4796+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4797+ unsigned int sethi, ba, nop;
4798+
4799+ err = get_user(sethi, (unsigned int *)regs->tpc);
4800+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4801+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4802+
4803+ if (err)
4804+ break;
4805+
4806+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4807+ (ba & 0xFFF00000U) == 0x30600000U &&
4808+ nop == 0x01000000U)
4809+ {
4810+ unsigned long addr;
4811+
4812+ addr = (sethi & 0x003FFFFFU) << 10;
4813+ regs->u_regs[UREG_G1] = addr;
4814+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4815+
4816+ if (test_thread_flag(TIF_32BIT))
4817+ addr &= 0xFFFFFFFFUL;
4818+
4819+ regs->tpc = addr;
4820+ regs->tnpc = addr+4;
4821+ return 2;
4822+ }
4823+ } while (0);
4824+
4825+#endif
4826+
4827+ return 1;
4828+}
4829+
4830+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4831+{
4832+ unsigned long i;
4833+
4834+ printk(KERN_ERR "PAX: bytes at PC: ");
4835+ for (i = 0; i < 8; i++) {
4836+ unsigned int c;
4837+ if (get_user(c, (unsigned int *)pc+i))
4838+ printk(KERN_CONT "???????? ");
4839+ else
4840+ printk(KERN_CONT "%08x ", c);
4841+ }
4842+ printk("\n");
4843+}
4844+#endif
4845+
4846 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4847 {
4848 struct mm_struct *mm = current->mm;
4849@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4850 if (!vma)
4851 goto bad_area;
4852
4853+#ifdef CONFIG_PAX_PAGEEXEC
4854+ /* PaX: detect ITLB misses on non-exec pages */
4855+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4856+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4857+ {
4858+ if (address != regs->tpc)
4859+ goto good_area;
4860+
4861+ up_read(&mm->mmap_sem);
4862+ switch (pax_handle_fetch_fault(regs)) {
4863+
4864+#ifdef CONFIG_PAX_EMUPLT
4865+ case 2:
4866+ case 3:
4867+ return;
4868+#endif
4869+
4870+ }
4871+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4872+ do_group_exit(SIGKILL);
4873+ }
4874+#endif
4875+
4876 /* Pure DTLB misses do not tell us whether the fault causing
4877 * load/store/atomic was a write or not, it only says that there
4878 * was no match. So in such a case we (carefully) read the
4879diff -urNp linux-3.1.4/arch/sparc/mm/hugetlbpage.c linux-3.1.4/arch/sparc/mm/hugetlbpage.c
4880--- linux-3.1.4/arch/sparc/mm/hugetlbpage.c 2011-11-11 15:19:27.000000000 -0500
4881+++ linux-3.1.4/arch/sparc/mm/hugetlbpage.c 2011-11-16 18:39:07.000000000 -0500
4882@@ -68,7 +68,7 @@ full_search:
4883 }
4884 return -ENOMEM;
4885 }
4886- if (likely(!vma || addr + len <= vma->vm_start)) {
4887+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4888 /*
4889 * Remember the place where we stopped the search:
4890 */
4891@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4892 /* make sure it can fit in the remaining address space */
4893 if (likely(addr > len)) {
4894 vma = find_vma(mm, addr-len);
4895- if (!vma || addr <= vma->vm_start) {
4896+ if (check_heap_stack_gap(vma, addr - len, len)) {
4897 /* remember the address as a hint for next time */
4898 return (mm->free_area_cache = addr-len);
4899 }
4900@@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4901 if (unlikely(mm->mmap_base < len))
4902 goto bottomup;
4903
4904- addr = (mm->mmap_base-len) & HPAGE_MASK;
4905+ addr = mm->mmap_base - len;
4906
4907 do {
4908+ addr &= HPAGE_MASK;
4909 /*
4910 * Lookup failure means no vma is above this address,
4911 * else if new region fits below vma->vm_start,
4912 * return with success:
4913 */
4914 vma = find_vma(mm, addr);
4915- if (likely(!vma || addr+len <= vma->vm_start)) {
4916+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4917 /* remember the address as a hint for next time */
4918 return (mm->free_area_cache = addr);
4919 }
4920@@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4921 mm->cached_hole_size = vma->vm_start - addr;
4922
4923 /* try just below the current vma->vm_start */
4924- addr = (vma->vm_start-len) & HPAGE_MASK;
4925- } while (likely(len < vma->vm_start));
4926+ addr = skip_heap_stack_gap(vma, len);
4927+ } while (!IS_ERR_VALUE(addr));
4928
4929 bottomup:
4930 /*
4931@@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4932 if (addr) {
4933 addr = ALIGN(addr, HPAGE_SIZE);
4934 vma = find_vma(mm, addr);
4935- if (task_size - len >= addr &&
4936- (!vma || addr + len <= vma->vm_start))
4937+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4938 return addr;
4939 }
4940 if (mm->get_unmapped_area == arch_get_unmapped_area)
4941diff -urNp linux-3.1.4/arch/sparc/mm/init_32.c linux-3.1.4/arch/sparc/mm/init_32.c
4942--- linux-3.1.4/arch/sparc/mm/init_32.c 2011-11-11 15:19:27.000000000 -0500
4943+++ linux-3.1.4/arch/sparc/mm/init_32.c 2011-11-16 18:39:07.000000000 -0500
4944@@ -316,6 +316,9 @@ extern void device_scan(void);
4945 pgprot_t PAGE_SHARED __read_mostly;
4946 EXPORT_SYMBOL(PAGE_SHARED);
4947
4948+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
4949+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
4950+
4951 void __init paging_init(void)
4952 {
4953 switch(sparc_cpu_model) {
4954@@ -344,17 +347,17 @@ void __init paging_init(void)
4955
4956 /* Initialize the protection map with non-constant, MMU dependent values. */
4957 protection_map[0] = PAGE_NONE;
4958- protection_map[1] = PAGE_READONLY;
4959- protection_map[2] = PAGE_COPY;
4960- protection_map[3] = PAGE_COPY;
4961+ protection_map[1] = PAGE_READONLY_NOEXEC;
4962+ protection_map[2] = PAGE_COPY_NOEXEC;
4963+ protection_map[3] = PAGE_COPY_NOEXEC;
4964 protection_map[4] = PAGE_READONLY;
4965 protection_map[5] = PAGE_READONLY;
4966 protection_map[6] = PAGE_COPY;
4967 protection_map[7] = PAGE_COPY;
4968 protection_map[8] = PAGE_NONE;
4969- protection_map[9] = PAGE_READONLY;
4970- protection_map[10] = PAGE_SHARED;
4971- protection_map[11] = PAGE_SHARED;
4972+ protection_map[9] = PAGE_READONLY_NOEXEC;
4973+ protection_map[10] = PAGE_SHARED_NOEXEC;
4974+ protection_map[11] = PAGE_SHARED_NOEXEC;
4975 protection_map[12] = PAGE_READONLY;
4976 protection_map[13] = PAGE_READONLY;
4977 protection_map[14] = PAGE_SHARED;
4978diff -urNp linux-3.1.4/arch/sparc/mm/Makefile linux-3.1.4/arch/sparc/mm/Makefile
4979--- linux-3.1.4/arch/sparc/mm/Makefile 2011-11-11 15:19:27.000000000 -0500
4980+++ linux-3.1.4/arch/sparc/mm/Makefile 2011-11-16 18:39:07.000000000 -0500
4981@@ -2,7 +2,7 @@
4982 #
4983
4984 asflags-y := -ansi
4985-ccflags-y := -Werror
4986+#ccflags-y := -Werror
4987
4988 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
4989 obj-y += fault_$(BITS).o
4990diff -urNp linux-3.1.4/arch/sparc/mm/srmmu.c linux-3.1.4/arch/sparc/mm/srmmu.c
4991--- linux-3.1.4/arch/sparc/mm/srmmu.c 2011-11-11 15:19:27.000000000 -0500
4992+++ linux-3.1.4/arch/sparc/mm/srmmu.c 2011-11-16 18:39:07.000000000 -0500
4993@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
4994 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
4995 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
4996 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
4997+
4998+#ifdef CONFIG_PAX_PAGEEXEC
4999+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5000+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5001+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5002+#endif
5003+
5004 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5005 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5006
5007diff -urNp linux-3.1.4/arch/um/include/asm/kmap_types.h linux-3.1.4/arch/um/include/asm/kmap_types.h
5008--- linux-3.1.4/arch/um/include/asm/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
5009+++ linux-3.1.4/arch/um/include/asm/kmap_types.h 2011-11-16 18:39:07.000000000 -0500
5010@@ -23,6 +23,7 @@ enum km_type {
5011 KM_IRQ1,
5012 KM_SOFTIRQ0,
5013 KM_SOFTIRQ1,
5014+ KM_CLEARPAGE,
5015 KM_TYPE_NR
5016 };
5017
5018diff -urNp linux-3.1.4/arch/um/include/asm/page.h linux-3.1.4/arch/um/include/asm/page.h
5019--- linux-3.1.4/arch/um/include/asm/page.h 2011-11-11 15:19:27.000000000 -0500
5020+++ linux-3.1.4/arch/um/include/asm/page.h 2011-11-16 18:39:07.000000000 -0500
5021@@ -14,6 +14,9 @@
5022 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5023 #define PAGE_MASK (~(PAGE_SIZE-1))
5024
5025+#define ktla_ktva(addr) (addr)
5026+#define ktva_ktla(addr) (addr)
5027+
5028 #ifndef __ASSEMBLY__
5029
5030 struct page;
5031diff -urNp linux-3.1.4/arch/um/kernel/process.c linux-3.1.4/arch/um/kernel/process.c
5032--- linux-3.1.4/arch/um/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
5033+++ linux-3.1.4/arch/um/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
5034@@ -404,22 +404,6 @@ int singlestepping(void * t)
5035 return 2;
5036 }
5037
5038-/*
5039- * Only x86 and x86_64 have an arch_align_stack().
5040- * All other arches have "#define arch_align_stack(x) (x)"
5041- * in their asm/system.h
5042- * As this is included in UML from asm-um/system-generic.h,
5043- * we can use it to behave as the subarch does.
5044- */
5045-#ifndef arch_align_stack
5046-unsigned long arch_align_stack(unsigned long sp)
5047-{
5048- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5049- sp -= get_random_int() % 8192;
5050- return sp & ~0xf;
5051-}
5052-#endif
5053-
5054 unsigned long get_wchan(struct task_struct *p)
5055 {
5056 unsigned long stack_page, sp, ip;
5057diff -urNp linux-3.1.4/arch/um/Makefile linux-3.1.4/arch/um/Makefile
5058--- linux-3.1.4/arch/um/Makefile 2011-11-11 15:19:27.000000000 -0500
5059+++ linux-3.1.4/arch/um/Makefile 2011-11-16 18:39:07.000000000 -0500
5060@@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINE
5061 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
5062 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
5063
5064+ifdef CONSTIFY_PLUGIN
5065+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5066+endif
5067+
5068 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
5069
5070 #This will adjust *FLAGS accordingly to the platform.
5071diff -urNp linux-3.1.4/arch/um/sys-i386/shared/sysdep/system.h linux-3.1.4/arch/um/sys-i386/shared/sysdep/system.h
5072--- linux-3.1.4/arch/um/sys-i386/shared/sysdep/system.h 2011-11-11 15:19:27.000000000 -0500
5073+++ linux-3.1.4/arch/um/sys-i386/shared/sysdep/system.h 2011-11-16 18:39:07.000000000 -0500
5074@@ -17,7 +17,7 @@
5075 # define AT_VECTOR_SIZE_ARCH 1
5076 #endif
5077
5078-extern unsigned long arch_align_stack(unsigned long sp);
5079+#define arch_align_stack(x) ((x) & ~0xfUL)
5080
5081 void default_idle(void);
5082
5083diff -urNp linux-3.1.4/arch/um/sys-i386/syscalls.c linux-3.1.4/arch/um/sys-i386/syscalls.c
5084--- linux-3.1.4/arch/um/sys-i386/syscalls.c 2011-11-11 15:19:27.000000000 -0500
5085+++ linux-3.1.4/arch/um/sys-i386/syscalls.c 2011-11-16 18:39:07.000000000 -0500
5086@@ -11,6 +11,21 @@
5087 #include "asm/uaccess.h"
5088 #include "asm/unistd.h"
5089
5090+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5091+{
5092+ unsigned long pax_task_size = TASK_SIZE;
5093+
5094+#ifdef CONFIG_PAX_SEGMEXEC
5095+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5096+ pax_task_size = SEGMEXEC_TASK_SIZE;
5097+#endif
5098+
5099+ if (len > pax_task_size || addr > pax_task_size - len)
5100+ return -EINVAL;
5101+
5102+ return 0;
5103+}
5104+
5105 /*
5106 * The prototype on i386 is:
5107 *
5108diff -urNp linux-3.1.4/arch/um/sys-x86_64/shared/sysdep/system.h linux-3.1.4/arch/um/sys-x86_64/shared/sysdep/system.h
5109--- linux-3.1.4/arch/um/sys-x86_64/shared/sysdep/system.h 2011-11-11 15:19:27.000000000 -0500
5110+++ linux-3.1.4/arch/um/sys-x86_64/shared/sysdep/system.h 2011-11-16 18:39:07.000000000 -0500
5111@@ -17,7 +17,7 @@
5112 # define AT_VECTOR_SIZE_ARCH 1
5113 #endif
5114
5115-extern unsigned long arch_align_stack(unsigned long sp);
5116+#define arch_align_stack(x) ((x) & ~0xfUL)
5117
5118 void default_idle(void);
5119
5120diff -urNp linux-3.1.4/arch/x86/boot/bitops.h linux-3.1.4/arch/x86/boot/bitops.h
5121--- linux-3.1.4/arch/x86/boot/bitops.h 2011-11-11 15:19:27.000000000 -0500
5122+++ linux-3.1.4/arch/x86/boot/bitops.h 2011-11-16 18:39:07.000000000 -0500
5123@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5124 u8 v;
5125 const u32 *p = (const u32 *)addr;
5126
5127- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5128+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5129 return v;
5130 }
5131
5132@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5133
5134 static inline void set_bit(int nr, void *addr)
5135 {
5136- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5137+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5138 }
5139
5140 #endif /* BOOT_BITOPS_H */
5141diff -urNp linux-3.1.4/arch/x86/boot/boot.h linux-3.1.4/arch/x86/boot/boot.h
5142--- linux-3.1.4/arch/x86/boot/boot.h 2011-11-11 15:19:27.000000000 -0500
5143+++ linux-3.1.4/arch/x86/boot/boot.h 2011-11-16 18:39:07.000000000 -0500
5144@@ -85,7 +85,7 @@ static inline void io_delay(void)
5145 static inline u16 ds(void)
5146 {
5147 u16 seg;
5148- asm("movw %%ds,%0" : "=rm" (seg));
5149+ asm volatile("movw %%ds,%0" : "=rm" (seg));
5150 return seg;
5151 }
5152
5153@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5154 static inline int memcmp(const void *s1, const void *s2, size_t len)
5155 {
5156 u8 diff;
5157- asm("repe; cmpsb; setnz %0"
5158+ asm volatile("repe; cmpsb; setnz %0"
5159 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5160 return diff;
5161 }
5162diff -urNp linux-3.1.4/arch/x86/boot/compressed/head_32.S linux-3.1.4/arch/x86/boot/compressed/head_32.S
5163--- linux-3.1.4/arch/x86/boot/compressed/head_32.S 2011-11-11 15:19:27.000000000 -0500
5164+++ linux-3.1.4/arch/x86/boot/compressed/head_32.S 2011-11-16 18:39:07.000000000 -0500
5165@@ -76,7 +76,7 @@ ENTRY(startup_32)
5166 notl %eax
5167 andl %eax, %ebx
5168 #else
5169- movl $LOAD_PHYSICAL_ADDR, %ebx
5170+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5171 #endif
5172
5173 /* Target address to relocate to for decompression */
5174@@ -162,7 +162,7 @@ relocated:
5175 * and where it was actually loaded.
5176 */
5177 movl %ebp, %ebx
5178- subl $LOAD_PHYSICAL_ADDR, %ebx
5179+ subl $____LOAD_PHYSICAL_ADDR, %ebx
5180 jz 2f /* Nothing to be done if loaded at compiled addr. */
5181 /*
5182 * Process relocations.
5183@@ -170,8 +170,7 @@ relocated:
5184
5185 1: subl $4, %edi
5186 movl (%edi), %ecx
5187- testl %ecx, %ecx
5188- jz 2f
5189+ jecxz 2f
5190 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5191 jmp 1b
5192 2:
5193diff -urNp linux-3.1.4/arch/x86/boot/compressed/head_64.S linux-3.1.4/arch/x86/boot/compressed/head_64.S
5194--- linux-3.1.4/arch/x86/boot/compressed/head_64.S 2011-11-11 15:19:27.000000000 -0500
5195+++ linux-3.1.4/arch/x86/boot/compressed/head_64.S 2011-11-16 18:39:07.000000000 -0500
5196@@ -91,7 +91,7 @@ ENTRY(startup_32)
5197 notl %eax
5198 andl %eax, %ebx
5199 #else
5200- movl $LOAD_PHYSICAL_ADDR, %ebx
5201+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5202 #endif
5203
5204 /* Target address to relocate to for decompression */
5205@@ -233,7 +233,7 @@ ENTRY(startup_64)
5206 notq %rax
5207 andq %rax, %rbp
5208 #else
5209- movq $LOAD_PHYSICAL_ADDR, %rbp
5210+ movq $____LOAD_PHYSICAL_ADDR, %rbp
5211 #endif
5212
5213 /* Target address to relocate to for decompression */
5214diff -urNp linux-3.1.4/arch/x86/boot/compressed/Makefile linux-3.1.4/arch/x86/boot/compressed/Makefile
5215--- linux-3.1.4/arch/x86/boot/compressed/Makefile 2011-11-11 15:19:27.000000000 -0500
5216+++ linux-3.1.4/arch/x86/boot/compressed/Makefile 2011-11-16 18:39:07.000000000 -0500
5217@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5218 KBUILD_CFLAGS += $(cflags-y)
5219 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5220 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5221+ifdef CONSTIFY_PLUGIN
5222+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5223+endif
5224
5225 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5226 GCOV_PROFILE := n
5227diff -urNp linux-3.1.4/arch/x86/boot/compressed/misc.c linux-3.1.4/arch/x86/boot/compressed/misc.c
5228--- linux-3.1.4/arch/x86/boot/compressed/misc.c 2011-11-11 15:19:27.000000000 -0500
5229+++ linux-3.1.4/arch/x86/boot/compressed/misc.c 2011-11-16 18:39:07.000000000 -0500
5230@@ -310,7 +310,7 @@ static void parse_elf(void *output)
5231 case PT_LOAD:
5232 #ifdef CONFIG_RELOCATABLE
5233 dest = output;
5234- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5235+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5236 #else
5237 dest = (void *)(phdr->p_paddr);
5238 #endif
5239@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5240 error("Destination address too large");
5241 #endif
5242 #ifndef CONFIG_RELOCATABLE
5243- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5244+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5245 error("Wrong destination address");
5246 #endif
5247
5248diff -urNp linux-3.1.4/arch/x86/boot/compressed/relocs.c linux-3.1.4/arch/x86/boot/compressed/relocs.c
5249--- linux-3.1.4/arch/x86/boot/compressed/relocs.c 2011-11-11 15:19:27.000000000 -0500
5250+++ linux-3.1.4/arch/x86/boot/compressed/relocs.c 2011-11-16 18:39:07.000000000 -0500
5251@@ -13,8 +13,11 @@
5252
5253 static void die(char *fmt, ...);
5254
5255+#include "../../../../include/generated/autoconf.h"
5256+
5257 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5258 static Elf32_Ehdr ehdr;
5259+static Elf32_Phdr *phdr;
5260 static unsigned long reloc_count, reloc_idx;
5261 static unsigned long *relocs;
5262
5263@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5264 }
5265 }
5266
5267+static void read_phdrs(FILE *fp)
5268+{
5269+ unsigned int i;
5270+
5271+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5272+ if (!phdr) {
5273+ die("Unable to allocate %d program headers\n",
5274+ ehdr.e_phnum);
5275+ }
5276+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5277+ die("Seek to %d failed: %s\n",
5278+ ehdr.e_phoff, strerror(errno));
5279+ }
5280+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5281+ die("Cannot read ELF program headers: %s\n",
5282+ strerror(errno));
5283+ }
5284+ for(i = 0; i < ehdr.e_phnum; i++) {
5285+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5286+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5287+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5288+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5289+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5290+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5291+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5292+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5293+ }
5294+
5295+}
5296+
5297 static void read_shdrs(FILE *fp)
5298 {
5299- int i;
5300+ unsigned int i;
5301 Elf32_Shdr shdr;
5302
5303 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5304@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5305
5306 static void read_strtabs(FILE *fp)
5307 {
5308- int i;
5309+ unsigned int i;
5310 for (i = 0; i < ehdr.e_shnum; i++) {
5311 struct section *sec = &secs[i];
5312 if (sec->shdr.sh_type != SHT_STRTAB) {
5313@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5314
5315 static void read_symtabs(FILE *fp)
5316 {
5317- int i,j;
5318+ unsigned int i,j;
5319 for (i = 0; i < ehdr.e_shnum; i++) {
5320 struct section *sec = &secs[i];
5321 if (sec->shdr.sh_type != SHT_SYMTAB) {
5322@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5323
5324 static void read_relocs(FILE *fp)
5325 {
5326- int i,j;
5327+ unsigned int i,j;
5328+ uint32_t base;
5329+
5330 for (i = 0; i < ehdr.e_shnum; i++) {
5331 struct section *sec = &secs[i];
5332 if (sec->shdr.sh_type != SHT_REL) {
5333@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5334 die("Cannot read symbol table: %s\n",
5335 strerror(errno));
5336 }
5337+ base = 0;
5338+ for (j = 0; j < ehdr.e_phnum; j++) {
5339+ if (phdr[j].p_type != PT_LOAD )
5340+ continue;
5341+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5342+ continue;
5343+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5344+ break;
5345+ }
5346 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5347 Elf32_Rel *rel = &sec->reltab[j];
5348- rel->r_offset = elf32_to_cpu(rel->r_offset);
5349+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5350 rel->r_info = elf32_to_cpu(rel->r_info);
5351 }
5352 }
5353@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5354
5355 static void print_absolute_symbols(void)
5356 {
5357- int i;
5358+ unsigned int i;
5359 printf("Absolute symbols\n");
5360 printf(" Num: Value Size Type Bind Visibility Name\n");
5361 for (i = 0; i < ehdr.e_shnum; i++) {
5362 struct section *sec = &secs[i];
5363 char *sym_strtab;
5364 Elf32_Sym *sh_symtab;
5365- int j;
5366+ unsigned int j;
5367
5368 if (sec->shdr.sh_type != SHT_SYMTAB) {
5369 continue;
5370@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5371
5372 static void print_absolute_relocs(void)
5373 {
5374- int i, printed = 0;
5375+ unsigned int i, printed = 0;
5376
5377 for (i = 0; i < ehdr.e_shnum; i++) {
5378 struct section *sec = &secs[i];
5379 struct section *sec_applies, *sec_symtab;
5380 char *sym_strtab;
5381 Elf32_Sym *sh_symtab;
5382- int j;
5383+ unsigned int j;
5384 if (sec->shdr.sh_type != SHT_REL) {
5385 continue;
5386 }
5387@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5388
5389 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5390 {
5391- int i;
5392+ unsigned int i;
5393 /* Walk through the relocations */
5394 for (i = 0; i < ehdr.e_shnum; i++) {
5395 char *sym_strtab;
5396 Elf32_Sym *sh_symtab;
5397 struct section *sec_applies, *sec_symtab;
5398- int j;
5399+ unsigned int j;
5400 struct section *sec = &secs[i];
5401
5402 if (sec->shdr.sh_type != SHT_REL) {
5403@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5404 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5405 continue;
5406 }
5407+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5408+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5409+ continue;
5410+
5411+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5412+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5413+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5414+ continue;
5415+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5416+ continue;
5417+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5418+ continue;
5419+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5420+ continue;
5421+#endif
5422+
5423 switch (r_type) {
5424 case R_386_NONE:
5425 case R_386_PC32:
5426@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5427
5428 static void emit_relocs(int as_text)
5429 {
5430- int i;
5431+ unsigned int i;
5432 /* Count how many relocations I have and allocate space for them. */
5433 reloc_count = 0;
5434 walk_relocs(count_reloc);
5435@@ -665,6 +725,7 @@ int main(int argc, char **argv)
5436 fname, strerror(errno));
5437 }
5438 read_ehdr(fp);
5439+ read_phdrs(fp);
5440 read_shdrs(fp);
5441 read_strtabs(fp);
5442 read_symtabs(fp);
5443diff -urNp linux-3.1.4/arch/x86/boot/cpucheck.c linux-3.1.4/arch/x86/boot/cpucheck.c
5444--- linux-3.1.4/arch/x86/boot/cpucheck.c 2011-11-11 15:19:27.000000000 -0500
5445+++ linux-3.1.4/arch/x86/boot/cpucheck.c 2011-11-16 18:39:07.000000000 -0500
5446@@ -74,7 +74,7 @@ static int has_fpu(void)
5447 u16 fcw = -1, fsw = -1;
5448 u32 cr0;
5449
5450- asm("movl %%cr0,%0" : "=r" (cr0));
5451+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
5452 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5453 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5454 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5455@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5456 {
5457 u32 f0, f1;
5458
5459- asm("pushfl ; "
5460+ asm volatile("pushfl ; "
5461 "pushfl ; "
5462 "popl %0 ; "
5463 "movl %0,%1 ; "
5464@@ -115,7 +115,7 @@ static void get_flags(void)
5465 set_bit(X86_FEATURE_FPU, cpu.flags);
5466
5467 if (has_eflag(X86_EFLAGS_ID)) {
5468- asm("cpuid"
5469+ asm volatile("cpuid"
5470 : "=a" (max_intel_level),
5471 "=b" (cpu_vendor[0]),
5472 "=d" (cpu_vendor[1]),
5473@@ -124,7 +124,7 @@ static void get_flags(void)
5474
5475 if (max_intel_level >= 0x00000001 &&
5476 max_intel_level <= 0x0000ffff) {
5477- asm("cpuid"
5478+ asm volatile("cpuid"
5479 : "=a" (tfms),
5480 "=c" (cpu.flags[4]),
5481 "=d" (cpu.flags[0])
5482@@ -136,7 +136,7 @@ static void get_flags(void)
5483 cpu.model += ((tfms >> 16) & 0xf) << 4;
5484 }
5485
5486- asm("cpuid"
5487+ asm volatile("cpuid"
5488 : "=a" (max_amd_level)
5489 : "a" (0x80000000)
5490 : "ebx", "ecx", "edx");
5491@@ -144,7 +144,7 @@ static void get_flags(void)
5492 if (max_amd_level >= 0x80000001 &&
5493 max_amd_level <= 0x8000ffff) {
5494 u32 eax = 0x80000001;
5495- asm("cpuid"
5496+ asm volatile("cpuid"
5497 : "+a" (eax),
5498 "=c" (cpu.flags[6]),
5499 "=d" (cpu.flags[1])
5500@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5501 u32 ecx = MSR_K7_HWCR;
5502 u32 eax, edx;
5503
5504- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5505+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5506 eax &= ~(1 << 15);
5507- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5508+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5509
5510 get_flags(); /* Make sure it really did something */
5511 err = check_flags();
5512@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5513 u32 ecx = MSR_VIA_FCR;
5514 u32 eax, edx;
5515
5516- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5517+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5518 eax |= (1<<1)|(1<<7);
5519- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5520+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5521
5522 set_bit(X86_FEATURE_CX8, cpu.flags);
5523 err = check_flags();
5524@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5525 u32 eax, edx;
5526 u32 level = 1;
5527
5528- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5529- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5530- asm("cpuid"
5531+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5532+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5533+ asm volatile("cpuid"
5534 : "+a" (level), "=d" (cpu.flags[0])
5535 : : "ecx", "ebx");
5536- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5537+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5538
5539 err = check_flags();
5540 }
5541diff -urNp linux-3.1.4/arch/x86/boot/header.S linux-3.1.4/arch/x86/boot/header.S
5542--- linux-3.1.4/arch/x86/boot/header.S 2011-11-11 15:19:27.000000000 -0500
5543+++ linux-3.1.4/arch/x86/boot/header.S 2011-11-16 18:39:07.000000000 -0500
5544@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5545 # single linked list of
5546 # struct setup_data
5547
5548-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5549+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5550
5551 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5552 #define VO_INIT_SIZE (VO__end - VO__text)
5553diff -urNp linux-3.1.4/arch/x86/boot/Makefile linux-3.1.4/arch/x86/boot/Makefile
5554--- linux-3.1.4/arch/x86/boot/Makefile 2011-11-11 15:19:27.000000000 -0500
5555+++ linux-3.1.4/arch/x86/boot/Makefile 2011-11-16 18:39:07.000000000 -0500
5556@@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5557 $(call cc-option, -fno-stack-protector) \
5558 $(call cc-option, -mpreferred-stack-boundary=2)
5559 KBUILD_CFLAGS += $(call cc-option, -m32)
5560+ifdef CONSTIFY_PLUGIN
5561+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5562+endif
5563 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5564 GCOV_PROFILE := n
5565
5566diff -urNp linux-3.1.4/arch/x86/boot/memory.c linux-3.1.4/arch/x86/boot/memory.c
5567--- linux-3.1.4/arch/x86/boot/memory.c 2011-11-11 15:19:27.000000000 -0500
5568+++ linux-3.1.4/arch/x86/boot/memory.c 2011-11-16 18:39:07.000000000 -0500
5569@@ -19,7 +19,7 @@
5570
5571 static int detect_memory_e820(void)
5572 {
5573- int count = 0;
5574+ unsigned int count = 0;
5575 struct biosregs ireg, oreg;
5576 struct e820entry *desc = boot_params.e820_map;
5577 static struct e820entry buf; /* static so it is zeroed */
5578diff -urNp linux-3.1.4/arch/x86/boot/video.c linux-3.1.4/arch/x86/boot/video.c
5579--- linux-3.1.4/arch/x86/boot/video.c 2011-11-11 15:19:27.000000000 -0500
5580+++ linux-3.1.4/arch/x86/boot/video.c 2011-11-16 18:39:07.000000000 -0500
5581@@ -96,7 +96,7 @@ static void store_mode_params(void)
5582 static unsigned int get_entry(void)
5583 {
5584 char entry_buf[4];
5585- int i, len = 0;
5586+ unsigned int i, len = 0;
5587 int key;
5588 unsigned int v;
5589
5590diff -urNp linux-3.1.4/arch/x86/boot/video-vesa.c linux-3.1.4/arch/x86/boot/video-vesa.c
5591--- linux-3.1.4/arch/x86/boot/video-vesa.c 2011-11-11 15:19:27.000000000 -0500
5592+++ linux-3.1.4/arch/x86/boot/video-vesa.c 2011-11-16 18:39:07.000000000 -0500
5593@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5594
5595 boot_params.screen_info.vesapm_seg = oreg.es;
5596 boot_params.screen_info.vesapm_off = oreg.di;
5597+ boot_params.screen_info.vesapm_size = oreg.cx;
5598 }
5599
5600 /*
5601diff -urNp linux-3.1.4/arch/x86/crypto/aesni-intel_asm.S linux-3.1.4/arch/x86/crypto/aesni-intel_asm.S
5602--- linux-3.1.4/arch/x86/crypto/aesni-intel_asm.S 2011-11-11 15:19:27.000000000 -0500
5603+++ linux-3.1.4/arch/x86/crypto/aesni-intel_asm.S 2011-12-02 17:38:47.000000000 -0500
5604@@ -31,6 +31,7 @@
5605
5606 #include <linux/linkage.h>
5607 #include <asm/inst.h>
5608+#include <asm/alternative-asm.h>
5609
5610 #ifdef __x86_64__
5611 .data
5612@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
5613 pop %r14
5614 pop %r13
5615 pop %r12
5616+ pax_force_retaddr 0, 1
5617 ret
5618+ENDPROC(aesni_gcm_dec)
5619
5620
5621 /*****************************************************************************
5622@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
5623 pop %r14
5624 pop %r13
5625 pop %r12
5626+ pax_force_retaddr 0, 1
5627 ret
5628+ENDPROC(aesni_gcm_enc)
5629
5630 #endif
5631
5632@@ -1714,6 +1719,7 @@ _key_expansion_256a:
5633 pxor %xmm1, %xmm0
5634 movaps %xmm0, (TKEYP)
5635 add $0x10, TKEYP
5636+ pax_force_retaddr_bts
5637 ret
5638
5639 .align 4
5640@@ -1738,6 +1744,7 @@ _key_expansion_192a:
5641 shufps $0b01001110, %xmm2, %xmm1
5642 movaps %xmm1, 0x10(TKEYP)
5643 add $0x20, TKEYP
5644+ pax_force_retaddr_bts
5645 ret
5646
5647 .align 4
5648@@ -1757,6 +1764,7 @@ _key_expansion_192b:
5649
5650 movaps %xmm0, (TKEYP)
5651 add $0x10, TKEYP
5652+ pax_force_retaddr_bts
5653 ret
5654
5655 .align 4
5656@@ -1769,6 +1777,7 @@ _key_expansion_256b:
5657 pxor %xmm1, %xmm2
5658 movaps %xmm2, (TKEYP)
5659 add $0x10, TKEYP
5660+ pax_force_retaddr_bts
5661 ret
5662
5663 /*
5664@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
5665 #ifndef __x86_64__
5666 popl KEYP
5667 #endif
5668+ pax_force_retaddr 0, 1
5669 ret
5670+ENDPROC(aesni_set_key)
5671
5672 /*
5673 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
5674@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
5675 popl KLEN
5676 popl KEYP
5677 #endif
5678+ pax_force_retaddr 0, 1
5679 ret
5680+ENDPROC(aesni_enc)
5681
5682 /*
5683 * _aesni_enc1: internal ABI
5684@@ -1959,6 +1972,7 @@ _aesni_enc1:
5685 AESENC KEY STATE
5686 movaps 0x70(TKEYP), KEY
5687 AESENCLAST KEY STATE
5688+ pax_force_retaddr_bts
5689 ret
5690
5691 /*
5692@@ -2067,6 +2081,7 @@ _aesni_enc4:
5693 AESENCLAST KEY STATE2
5694 AESENCLAST KEY STATE3
5695 AESENCLAST KEY STATE4
5696+ pax_force_retaddr_bts
5697 ret
5698
5699 /*
5700@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
5701 popl KLEN
5702 popl KEYP
5703 #endif
5704+ pax_force_retaddr 0, 1
5705 ret
5706+ENDPROC(aesni_dec)
5707
5708 /*
5709 * _aesni_dec1: internal ABI
5710@@ -2146,6 +2163,7 @@ _aesni_dec1:
5711 AESDEC KEY STATE
5712 movaps 0x70(TKEYP), KEY
5713 AESDECLAST KEY STATE
5714+ pax_force_retaddr_bts
5715 ret
5716
5717 /*
5718@@ -2254,6 +2272,7 @@ _aesni_dec4:
5719 AESDECLAST KEY STATE2
5720 AESDECLAST KEY STATE3
5721 AESDECLAST KEY STATE4
5722+ pax_force_retaddr_bts
5723 ret
5724
5725 /*
5726@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
5727 popl KEYP
5728 popl LEN
5729 #endif
5730+ pax_force_retaddr 0, 1
5731 ret
5732+ENDPROC(aesni_ecb_enc)
5733
5734 /*
5735 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
5736@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
5737 popl KEYP
5738 popl LEN
5739 #endif
5740+ pax_force_retaddr 0, 1
5741 ret
5742+ENDPROC(aesni_ecb_dec)
5743
5744 /*
5745 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
5746@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
5747 popl LEN
5748 popl IVP
5749 #endif
5750+ pax_force_retaddr 0, 1
5751 ret
5752+ENDPROC(aesni_cbc_enc)
5753
5754 /*
5755 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
5756@@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
5757 popl LEN
5758 popl IVP
5759 #endif
5760+ pax_force_retaddr 0, 1
5761 ret
5762+ENDPROC(aesni_cbc_dec)
5763
5764 #ifdef __x86_64__
5765 .align 16
5766@@ -2524,6 +2551,7 @@ _aesni_inc_init:
5767 mov $1, TCTR_LOW
5768 MOVQ_R64_XMM TCTR_LOW INC
5769 MOVQ_R64_XMM CTR TCTR_LOW
5770+ pax_force_retaddr_bts
5771 ret
5772
5773 /*
5774@@ -2552,6 +2580,7 @@ _aesni_inc:
5775 .Linc_low:
5776 movaps CTR, IV
5777 PSHUFB_XMM BSWAP_MASK IV
5778+ pax_force_retaddr_bts
5779 ret
5780
5781 /*
5782@@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
5783 .Lctr_enc_ret:
5784 movups IV, (IVP)
5785 .Lctr_enc_just_ret:
5786+ pax_force_retaddr 0, 1
5787 ret
5788+ENDPROC(aesni_ctr_enc)
5789 #endif
5790diff -urNp linux-3.1.4/arch/x86/crypto/aes-x86_64-asm_64.S linux-3.1.4/arch/x86/crypto/aes-x86_64-asm_64.S
5791--- linux-3.1.4/arch/x86/crypto/aes-x86_64-asm_64.S 2011-11-11 15:19:27.000000000 -0500
5792+++ linux-3.1.4/arch/x86/crypto/aes-x86_64-asm_64.S 2011-12-02 17:38:47.000000000 -0500
5793@@ -8,6 +8,8 @@
5794 * including this sentence is retained in full.
5795 */
5796
5797+#include <asm/alternative-asm.h>
5798+
5799 .extern crypto_ft_tab
5800 .extern crypto_it_tab
5801 .extern crypto_fl_tab
5802@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
5803 je B192; \
5804 leaq 32(r9),r9;
5805
5806+#define ret pax_force_retaddr 0, 1; ret
5807+
5808 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
5809 movq r1,r2; \
5810 movq r3,r4; \
5811diff -urNp linux-3.1.4/arch/x86/crypto/salsa20-x86_64-asm_64.S linux-3.1.4/arch/x86/crypto/salsa20-x86_64-asm_64.S
5812--- linux-3.1.4/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-11-11 15:19:27.000000000 -0500
5813+++ linux-3.1.4/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-12-02 17:38:47.000000000 -0500
5814@@ -1,3 +1,5 @@
5815+#include <asm/alternative-asm.h>
5816+
5817 # enter ECRYPT_encrypt_bytes
5818 .text
5819 .p2align 5
5820@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
5821 add %r11,%rsp
5822 mov %rdi,%rax
5823 mov %rsi,%rdx
5824+ pax_force_retaddr 0, 1
5825 ret
5826 # bytesatleast65:
5827 ._bytesatleast65:
5828@@ -891,6 +894,7 @@ ECRYPT_keysetup:
5829 add %r11,%rsp
5830 mov %rdi,%rax
5831 mov %rsi,%rdx
5832+ pax_force_retaddr
5833 ret
5834 # enter ECRYPT_ivsetup
5835 .text
5836@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
5837 add %r11,%rsp
5838 mov %rdi,%rax
5839 mov %rsi,%rdx
5840+ pax_force_retaddr
5841 ret
5842diff -urNp linux-3.1.4/arch/x86/crypto/twofish-x86_64-asm_64.S linux-3.1.4/arch/x86/crypto/twofish-x86_64-asm_64.S
5843--- linux-3.1.4/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-11-11 15:19:27.000000000 -0500
5844+++ linux-3.1.4/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-12-02 17:38:47.000000000 -0500
5845@@ -21,6 +21,7 @@
5846 .text
5847
5848 #include <asm/asm-offsets.h>
5849+#include <asm/alternative-asm.h>
5850
5851 #define a_offset 0
5852 #define b_offset 4
5853@@ -269,6 +270,7 @@ twofish_enc_blk:
5854
5855 popq R1
5856 movq $1,%rax
5857+ pax_force_retaddr 0, 1
5858 ret
5859
5860 twofish_dec_blk:
5861@@ -321,4 +323,5 @@ twofish_dec_blk:
5862
5863 popq R1
5864 movq $1,%rax
5865+ pax_force_retaddr 0, 1
5866 ret
5867diff -urNp linux-3.1.4/arch/x86/ia32/ia32_aout.c linux-3.1.4/arch/x86/ia32/ia32_aout.c
5868--- linux-3.1.4/arch/x86/ia32/ia32_aout.c 2011-11-11 15:19:27.000000000 -0500
5869+++ linux-3.1.4/arch/x86/ia32/ia32_aout.c 2011-11-16 18:40:08.000000000 -0500
5870@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5871 unsigned long dump_start, dump_size;
5872 struct user32 dump;
5873
5874+ memset(&dump, 0, sizeof(dump));
5875+
5876 fs = get_fs();
5877 set_fs(KERNEL_DS);
5878 has_dumped = 1;
5879diff -urNp linux-3.1.4/arch/x86/ia32/ia32entry.S linux-3.1.4/arch/x86/ia32/ia32entry.S
5880--- linux-3.1.4/arch/x86/ia32/ia32entry.S 2011-11-11 15:19:27.000000000 -0500
5881+++ linux-3.1.4/arch/x86/ia32/ia32entry.S 2011-12-02 17:38:47.000000000 -0500
5882@@ -13,7 +13,9 @@
5883 #include <asm/thread_info.h>
5884 #include <asm/segment.h>
5885 #include <asm/irqflags.h>
5886+#include <asm/pgtable.h>
5887 #include <linux/linkage.h>
5888+#include <asm/alternative-asm.h>
5889
5890 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5891 #include <linux/elf-em.h>
5892@@ -95,6 +97,30 @@ ENTRY(native_irq_enable_sysexit)
5893 ENDPROC(native_irq_enable_sysexit)
5894 #endif
5895
5896+ .macro pax_enter_kernel_user
5897+ pax_set_fptr_mask
5898+#ifdef CONFIG_PAX_MEMORY_UDEREF
5899+ call pax_enter_kernel_user
5900+#endif
5901+ .endm
5902+
5903+ .macro pax_exit_kernel_user
5904+#ifdef CONFIG_PAX_MEMORY_UDEREF
5905+ call pax_exit_kernel_user
5906+#endif
5907+#ifdef CONFIG_PAX_RANDKSTACK
5908+ pushq %rax
5909+ call pax_randomize_kstack
5910+ popq %rax
5911+#endif
5912+ .endm
5913+
5914+.macro pax_erase_kstack
5915+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5916+ call pax_erase_kstack
5917+#endif
5918+.endm
5919+
5920 /*
5921 * 32bit SYSENTER instruction entry.
5922 *
5923@@ -121,12 +147,6 @@ ENTRY(ia32_sysenter_target)
5924 CFI_REGISTER rsp,rbp
5925 SWAPGS_UNSAFE_STACK
5926 movq PER_CPU_VAR(kernel_stack), %rsp
5927- addq $(KERNEL_STACK_OFFSET),%rsp
5928- /*
5929- * No need to follow this irqs on/off section: the syscall
5930- * disabled irqs, here we enable it straight after entry:
5931- */
5932- ENABLE_INTERRUPTS(CLBR_NONE)
5933 movl %ebp,%ebp /* zero extension */
5934 pushq_cfi $__USER32_DS
5935 /*CFI_REL_OFFSET ss,0*/
5936@@ -134,25 +154,38 @@ ENTRY(ia32_sysenter_target)
5937 CFI_REL_OFFSET rsp,0
5938 pushfq_cfi
5939 /*CFI_REL_OFFSET rflags,0*/
5940- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5941- CFI_REGISTER rip,r10
5942+ GET_THREAD_INFO(%r11)
5943+ movl TI_sysenter_return(%r11), %r11d
5944+ CFI_REGISTER rip,r11
5945 pushq_cfi $__USER32_CS
5946 /*CFI_REL_OFFSET cs,0*/
5947 movl %eax, %eax
5948- pushq_cfi %r10
5949+ pushq_cfi %r11
5950 CFI_REL_OFFSET rip,0
5951 pushq_cfi %rax
5952 cld
5953 SAVE_ARGS 0,1,0
5954+ pax_enter_kernel_user
5955+ /*
5956+ * No need to follow this irqs on/off section: the syscall
5957+ * disabled irqs, here we enable it straight after entry:
5958+ */
5959+ ENABLE_INTERRUPTS(CLBR_NONE)
5960 /* no need to do an access_ok check here because rbp has been
5961 32bit zero extended */
5962+
5963+#ifdef CONFIG_PAX_MEMORY_UDEREF
5964+ mov $PAX_USER_SHADOW_BASE,%r11
5965+ add %r11,%rbp
5966+#endif
5967+
5968 1: movl (%rbp),%ebp
5969 .section __ex_table,"a"
5970 .quad 1b,ia32_badarg
5971 .previous
5972- GET_THREAD_INFO(%r10)
5973- orl $TS_COMPAT,TI_status(%r10)
5974- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
5975+ GET_THREAD_INFO(%r11)
5976+ orl $TS_COMPAT,TI_status(%r11)
5977+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
5978 CFI_REMEMBER_STATE
5979 jnz sysenter_tracesys
5980 cmpq $(IA32_NR_syscalls-1),%rax
5981@@ -162,13 +195,15 @@ sysenter_do_call:
5982 sysenter_dispatch:
5983 call *ia32_sys_call_table(,%rax,8)
5984 movq %rax,RAX-ARGOFFSET(%rsp)
5985- GET_THREAD_INFO(%r10)
5986+ GET_THREAD_INFO(%r11)
5987 DISABLE_INTERRUPTS(CLBR_NONE)
5988 TRACE_IRQS_OFF
5989- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5990+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
5991 jnz sysexit_audit
5992 sysexit_from_sys_call:
5993- andl $~TS_COMPAT,TI_status(%r10)
5994+ pax_exit_kernel_user
5995+ pax_erase_kstack
5996+ andl $~TS_COMPAT,TI_status(%r11)
5997 /* clear IF, that popfq doesn't enable interrupts early */
5998 andl $~0x200,EFLAGS-R11(%rsp)
5999 movl RIP-R11(%rsp),%edx /* User %eip */
6000@@ -194,6 +229,9 @@ sysexit_from_sys_call:
6001 movl %eax,%esi /* 2nd arg: syscall number */
6002 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6003 call audit_syscall_entry
6004+
6005+ pax_erase_kstack
6006+
6007 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6008 cmpq $(IA32_NR_syscalls-1),%rax
6009 ja ia32_badsys
6010@@ -205,7 +243,7 @@ sysexit_from_sys_call:
6011 .endm
6012
6013 .macro auditsys_exit exit
6014- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6015+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6016 jnz ia32_ret_from_sys_call
6017 TRACE_IRQS_ON
6018 sti
6019@@ -215,12 +253,12 @@ sysexit_from_sys_call:
6020 movzbl %al,%edi /* zero-extend that into %edi */
6021 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
6022 call audit_syscall_exit
6023- GET_THREAD_INFO(%r10)
6024+ GET_THREAD_INFO(%r11)
6025 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
6026 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
6027 cli
6028 TRACE_IRQS_OFF
6029- testl %edi,TI_flags(%r10)
6030+ testl %edi,TI_flags(%r11)
6031 jz \exit
6032 CLEAR_RREGS -ARGOFFSET
6033 jmp int_with_check
6034@@ -238,7 +276,7 @@ sysexit_audit:
6035
6036 sysenter_tracesys:
6037 #ifdef CONFIG_AUDITSYSCALL
6038- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6039+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6040 jz sysenter_auditsys
6041 #endif
6042 SAVE_REST
6043@@ -246,6 +284,9 @@ sysenter_tracesys:
6044 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6045 movq %rsp,%rdi /* &pt_regs -> arg1 */
6046 call syscall_trace_enter
6047+
6048+ pax_erase_kstack
6049+
6050 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6051 RESTORE_REST
6052 cmpq $(IA32_NR_syscalls-1),%rax
6053@@ -277,19 +318,20 @@ ENDPROC(ia32_sysenter_target)
6054 ENTRY(ia32_cstar_target)
6055 CFI_STARTPROC32 simple
6056 CFI_SIGNAL_FRAME
6057- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6058+ CFI_DEF_CFA rsp,0
6059 CFI_REGISTER rip,rcx
6060 /*CFI_REGISTER rflags,r11*/
6061 SWAPGS_UNSAFE_STACK
6062 movl %esp,%r8d
6063 CFI_REGISTER rsp,r8
6064 movq PER_CPU_VAR(kernel_stack),%rsp
6065+ SAVE_ARGS 8*6,0,0
6066+ pax_enter_kernel_user
6067 /*
6068 * No need to follow this irqs on/off section: the syscall
6069 * disabled irqs and here we enable it straight after entry:
6070 */
6071 ENABLE_INTERRUPTS(CLBR_NONE)
6072- SAVE_ARGS 8,0,0
6073 movl %eax,%eax /* zero extension */
6074 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6075 movq %rcx,RIP-ARGOFFSET(%rsp)
6076@@ -305,13 +347,19 @@ ENTRY(ia32_cstar_target)
6077 /* no need to do an access_ok check here because r8 has been
6078 32bit zero extended */
6079 /* hardware stack frame is complete now */
6080+
6081+#ifdef CONFIG_PAX_MEMORY_UDEREF
6082+ mov $PAX_USER_SHADOW_BASE,%r11
6083+ add %r11,%r8
6084+#endif
6085+
6086 1: movl (%r8),%r9d
6087 .section __ex_table,"a"
6088 .quad 1b,ia32_badarg
6089 .previous
6090- GET_THREAD_INFO(%r10)
6091- orl $TS_COMPAT,TI_status(%r10)
6092- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6093+ GET_THREAD_INFO(%r11)
6094+ orl $TS_COMPAT,TI_status(%r11)
6095+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6096 CFI_REMEMBER_STATE
6097 jnz cstar_tracesys
6098 cmpq $IA32_NR_syscalls-1,%rax
6099@@ -321,13 +369,15 @@ cstar_do_call:
6100 cstar_dispatch:
6101 call *ia32_sys_call_table(,%rax,8)
6102 movq %rax,RAX-ARGOFFSET(%rsp)
6103- GET_THREAD_INFO(%r10)
6104+ GET_THREAD_INFO(%r11)
6105 DISABLE_INTERRUPTS(CLBR_NONE)
6106 TRACE_IRQS_OFF
6107- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6108+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6109 jnz sysretl_audit
6110 sysretl_from_sys_call:
6111- andl $~TS_COMPAT,TI_status(%r10)
6112+ pax_exit_kernel_user
6113+ pax_erase_kstack
6114+ andl $~TS_COMPAT,TI_status(%r11)
6115 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
6116 movl RIP-ARGOFFSET(%rsp),%ecx
6117 CFI_REGISTER rip,rcx
6118@@ -355,7 +405,7 @@ sysretl_audit:
6119
6120 cstar_tracesys:
6121 #ifdef CONFIG_AUDITSYSCALL
6122- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6123+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6124 jz cstar_auditsys
6125 #endif
6126 xchgl %r9d,%ebp
6127@@ -364,6 +414,9 @@ cstar_tracesys:
6128 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6129 movq %rsp,%rdi /* &pt_regs -> arg1 */
6130 call syscall_trace_enter
6131+
6132+ pax_erase_kstack
6133+
6134 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6135 RESTORE_REST
6136 xchgl %ebp,%r9d
6137@@ -409,20 +462,21 @@ ENTRY(ia32_syscall)
6138 CFI_REL_OFFSET rip,RIP-RIP
6139 PARAVIRT_ADJUST_EXCEPTION_FRAME
6140 SWAPGS
6141- /*
6142- * No need to follow this irqs on/off section: the syscall
6143- * disabled irqs and here we enable it straight after entry:
6144- */
6145- ENABLE_INTERRUPTS(CLBR_NONE)
6146 movl %eax,%eax
6147 pushq_cfi %rax
6148 cld
6149 /* note the registers are not zero extended to the sf.
6150 this could be a problem. */
6151 SAVE_ARGS 0,1,0
6152- GET_THREAD_INFO(%r10)
6153- orl $TS_COMPAT,TI_status(%r10)
6154- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6155+ pax_enter_kernel_user
6156+ /*
6157+ * No need to follow this irqs on/off section: the syscall
6158+ * disabled irqs and here we enable it straight after entry:
6159+ */
6160+ ENABLE_INTERRUPTS(CLBR_NONE)
6161+ GET_THREAD_INFO(%r11)
6162+ orl $TS_COMPAT,TI_status(%r11)
6163+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6164 jnz ia32_tracesys
6165 cmpq $(IA32_NR_syscalls-1),%rax
6166 ja ia32_badsys
6167@@ -441,6 +495,9 @@ ia32_tracesys:
6168 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6169 movq %rsp,%rdi /* &pt_regs -> arg1 */
6170 call syscall_trace_enter
6171+
6172+ pax_erase_kstack
6173+
6174 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6175 RESTORE_REST
6176 cmpq $(IA32_NR_syscalls-1),%rax
6177@@ -455,6 +512,7 @@ ia32_badsys:
6178
6179 quiet_ni_syscall:
6180 movq $-ENOSYS,%rax
6181+ pax_force_retaddr
6182 ret
6183 CFI_ENDPROC
6184
6185diff -urNp linux-3.1.4/arch/x86/ia32/ia32_signal.c linux-3.1.4/arch/x86/ia32/ia32_signal.c
6186--- linux-3.1.4/arch/x86/ia32/ia32_signal.c 2011-11-11 15:19:27.000000000 -0500
6187+++ linux-3.1.4/arch/x86/ia32/ia32_signal.c 2011-11-16 18:39:07.000000000 -0500
6188@@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const
6189 }
6190 seg = get_fs();
6191 set_fs(KERNEL_DS);
6192- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
6193+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
6194 set_fs(seg);
6195 if (ret >= 0 && uoss_ptr) {
6196 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
6197@@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct
6198 */
6199 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6200 size_t frame_size,
6201- void **fpstate)
6202+ void __user **fpstate)
6203 {
6204 unsigned long sp;
6205
6206@@ -391,7 +391,7 @@ static void __user *get_sigframe(struct
6207
6208 if (used_math()) {
6209 sp = sp - sig_xstate_ia32_size;
6210- *fpstate = (struct _fpstate_ia32 *) sp;
6211+ *fpstate = (struct _fpstate_ia32 __user *) sp;
6212 if (save_i387_xstate_ia32(*fpstate) < 0)
6213 return (void __user *) -1L;
6214 }
6215@@ -399,7 +399,7 @@ static void __user *get_sigframe(struct
6216 sp -= frame_size;
6217 /* Align the stack pointer according to the i386 ABI,
6218 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6219- sp = ((sp + 4) & -16ul) - 4;
6220+ sp = ((sp - 12) & -16ul) - 4;
6221 return (void __user *) sp;
6222 }
6223
6224@@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_s
6225 * These are actually not used anymore, but left because some
6226 * gdb versions depend on them as a marker.
6227 */
6228- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6229+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6230 } put_user_catch(err);
6231
6232 if (err)
6233@@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct
6234 0xb8,
6235 __NR_ia32_rt_sigreturn,
6236 0x80cd,
6237- 0,
6238+ 0
6239 };
6240
6241 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6242@@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct
6243
6244 if (ka->sa.sa_flags & SA_RESTORER)
6245 restorer = ka->sa.sa_restorer;
6246+ else if (current->mm->context.vdso)
6247+ /* Return stub is in 32bit vsyscall page */
6248+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6249 else
6250- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6251- rt_sigreturn);
6252+ restorer = &frame->retcode;
6253 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6254
6255 /*
6256 * Not actually used anymore, but left because some gdb
6257 * versions need it.
6258 */
6259- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6260+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6261 } put_user_catch(err);
6262
6263 if (err)
6264diff -urNp linux-3.1.4/arch/x86/ia32/sys_ia32.c linux-3.1.4/arch/x86/ia32/sys_ia32.c
6265--- linux-3.1.4/arch/x86/ia32/sys_ia32.c 2011-11-11 15:19:27.000000000 -0500
6266+++ linux-3.1.4/arch/x86/ia32/sys_ia32.c 2011-11-16 18:39:07.000000000 -0500
6267@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsign
6268 */
6269 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
6270 {
6271- typeof(ubuf->st_uid) uid = 0;
6272- typeof(ubuf->st_gid) gid = 0;
6273+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
6274+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
6275 SET_UID(uid, stat->uid);
6276 SET_GID(gid, stat->gid);
6277 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
6278@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int
6279 }
6280 set_fs(KERNEL_DS);
6281 ret = sys_rt_sigprocmask(how,
6282- set ? (sigset_t __user *)&s : NULL,
6283- oset ? (sigset_t __user *)&s : NULL,
6284+ set ? (sigset_t __force_user *)&s : NULL,
6285+ oset ? (sigset_t __force_user *)&s : NULL,
6286 sigsetsize);
6287 set_fs(old_fs);
6288 if (ret)
6289@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int
6290 return alarm_setitimer(seconds);
6291 }
6292
6293-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
6294+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
6295 int options)
6296 {
6297 return compat_sys_wait4(pid, stat_addr, options, NULL);
6298@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_inter
6299 mm_segment_t old_fs = get_fs();
6300
6301 set_fs(KERNEL_DS);
6302- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
6303+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
6304 set_fs(old_fs);
6305 if (put_compat_timespec(&t, interval))
6306 return -EFAULT;
6307@@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(comp
6308 mm_segment_t old_fs = get_fs();
6309
6310 set_fs(KERNEL_DS);
6311- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
6312+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
6313 set_fs(old_fs);
6314 if (!ret) {
6315 switch (_NSIG_WORDS) {
6316@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(in
6317 if (copy_siginfo_from_user32(&info, uinfo))
6318 return -EFAULT;
6319 set_fs(KERNEL_DS);
6320- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
6321+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
6322 set_fs(old_fs);
6323 return ret;
6324 }
6325@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_f
6326 return -EFAULT;
6327
6328 set_fs(KERNEL_DS);
6329- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
6330+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
6331 count);
6332 set_fs(old_fs);
6333
6334diff -urNp linux-3.1.4/arch/x86/include/asm/alternative-asm.h linux-3.1.4/arch/x86/include/asm/alternative-asm.h
6335--- linux-3.1.4/arch/x86/include/asm/alternative-asm.h 2011-11-11 15:19:27.000000000 -0500
6336+++ linux-3.1.4/arch/x86/include/asm/alternative-asm.h 2011-12-02 17:40:13.000000000 -0500
6337@@ -15,6 +15,45 @@
6338 .endm
6339 #endif
6340
6341+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
6342+ .macro pax_force_retaddr_bts rip=0
6343+ btsq $63,\rip(%rsp)
6344+ .endm
6345+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
6346+ .macro pax_force_retaddr rip=0, reload=0
6347+ btsq $63,\rip(%rsp)
6348+ .endm
6349+ .macro pax_force_fptr ptr
6350+ btsq $63,\ptr
6351+ .endm
6352+ .macro pax_set_fptr_mask
6353+ .endm
6354+#endif
6355+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
6356+ .macro pax_force_retaddr rip=0, reload=0
6357+ .if \reload
6358+ pax_set_fptr_mask
6359+ .endif
6360+ orq %r10,\rip(%rsp)
6361+ .endm
6362+ .macro pax_force_fptr ptr
6363+ orq %r10,\ptr
6364+ .endm
6365+ .macro pax_set_fptr_mask
6366+ movabs $0x8000000000000000,%r10
6367+ .endm
6368+#endif
6369+#else
6370+ .macro pax_force_retaddr rip=0, reload=0
6371+ .endm
6372+ .macro pax_force_fptr ptr
6373+ .endm
6374+ .macro pax_force_retaddr_bts rip=0
6375+ .endm
6376+ .macro pax_set_fptr_mask
6377+ .endm
6378+#endif
6379+
6380 .macro altinstruction_entry orig alt feature orig_len alt_len
6381 .long \orig - .
6382 .long \alt - .
6383diff -urNp linux-3.1.4/arch/x86/include/asm/alternative.h linux-3.1.4/arch/x86/include/asm/alternative.h
6384--- linux-3.1.4/arch/x86/include/asm/alternative.h 2011-11-11 15:19:27.000000000 -0500
6385+++ linux-3.1.4/arch/x86/include/asm/alternative.h 2011-11-16 18:39:07.000000000 -0500
6386@@ -89,7 +89,7 @@ static inline int alternatives_text_rese
6387 ".section .discard,\"aw\",@progbits\n" \
6388 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
6389 ".previous\n" \
6390- ".section .altinstr_replacement, \"ax\"\n" \
6391+ ".section .altinstr_replacement, \"a\"\n" \
6392 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6393 ".previous"
6394
6395diff -urNp linux-3.1.4/arch/x86/include/asm/apic.h linux-3.1.4/arch/x86/include/asm/apic.h
6396--- linux-3.1.4/arch/x86/include/asm/apic.h 2011-11-11 15:19:27.000000000 -0500
6397+++ linux-3.1.4/arch/x86/include/asm/apic.h 2011-11-16 18:39:07.000000000 -0500
6398@@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
6399
6400 #ifdef CONFIG_X86_LOCAL_APIC
6401
6402-extern unsigned int apic_verbosity;
6403+extern int apic_verbosity;
6404 extern int local_apic_timer_c2_ok;
6405
6406 extern int disable_apic;
6407diff -urNp linux-3.1.4/arch/x86/include/asm/apm.h linux-3.1.4/arch/x86/include/asm/apm.h
6408--- linux-3.1.4/arch/x86/include/asm/apm.h 2011-11-11 15:19:27.000000000 -0500
6409+++ linux-3.1.4/arch/x86/include/asm/apm.h 2011-11-16 18:39:07.000000000 -0500
6410@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6411 __asm__ __volatile__(APM_DO_ZERO_SEGS
6412 "pushl %%edi\n\t"
6413 "pushl %%ebp\n\t"
6414- "lcall *%%cs:apm_bios_entry\n\t"
6415+ "lcall *%%ss:apm_bios_entry\n\t"
6416 "setc %%al\n\t"
6417 "popl %%ebp\n\t"
6418 "popl %%edi\n\t"
6419@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6420 __asm__ __volatile__(APM_DO_ZERO_SEGS
6421 "pushl %%edi\n\t"
6422 "pushl %%ebp\n\t"
6423- "lcall *%%cs:apm_bios_entry\n\t"
6424+ "lcall *%%ss:apm_bios_entry\n\t"
6425 "setc %%bl\n\t"
6426 "popl %%ebp\n\t"
6427 "popl %%edi\n\t"
6428diff -urNp linux-3.1.4/arch/x86/include/asm/atomic64_32.h linux-3.1.4/arch/x86/include/asm/atomic64_32.h
6429--- linux-3.1.4/arch/x86/include/asm/atomic64_32.h 2011-11-11 15:19:27.000000000 -0500
6430+++ linux-3.1.4/arch/x86/include/asm/atomic64_32.h 2011-11-16 18:39:07.000000000 -0500
6431@@ -12,6 +12,14 @@ typedef struct {
6432 u64 __aligned(8) counter;
6433 } atomic64_t;
6434
6435+#ifdef CONFIG_PAX_REFCOUNT
6436+typedef struct {
6437+ u64 __aligned(8) counter;
6438+} atomic64_unchecked_t;
6439+#else
6440+typedef atomic64_t atomic64_unchecked_t;
6441+#endif
6442+
6443 #define ATOMIC64_INIT(val) { (val) }
6444
6445 #ifdef CONFIG_X86_CMPXCHG64
6446@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
6447 }
6448
6449 /**
6450+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
6451+ * @p: pointer to type atomic64_unchecked_t
6452+ * @o: expected value
6453+ * @n: new value
6454+ *
6455+ * Atomically sets @v to @n if it was equal to @o and returns
6456+ * the old value.
6457+ */
6458+
6459+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
6460+{
6461+ return cmpxchg64(&v->counter, o, n);
6462+}
6463+
6464+/**
6465 * atomic64_xchg - xchg atomic64 variable
6466 * @v: pointer to type atomic64_t
6467 * @n: value to assign
6468@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
6469 }
6470
6471 /**
6472+ * atomic64_set_unchecked - set atomic64 variable
6473+ * @v: pointer to type atomic64_unchecked_t
6474+ * @n: value to assign
6475+ *
6476+ * Atomically sets the value of @v to @n.
6477+ */
6478+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
6479+{
6480+ unsigned high = (unsigned)(i >> 32);
6481+ unsigned low = (unsigned)i;
6482+ asm volatile(ATOMIC64_ALTERNATIVE(set)
6483+ : "+b" (low), "+c" (high)
6484+ : "S" (v)
6485+ : "eax", "edx", "memory"
6486+ );
6487+}
6488+
6489+/**
6490 * atomic64_read - read atomic64 variable
6491 * @v: pointer to type atomic64_t
6492 *
6493@@ -93,6 +134,22 @@ static inline long long atomic64_read(at
6494 }
6495
6496 /**
6497+ * atomic64_read_unchecked - read atomic64 variable
6498+ * @v: pointer to type atomic64_unchecked_t
6499+ *
6500+ * Atomically reads the value of @v and returns it.
6501+ */
6502+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
6503+{
6504+ long long r;
6505+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
6506+ : "=A" (r), "+c" (v)
6507+ : : "memory"
6508+ );
6509+ return r;
6510+ }
6511+
6512+/**
6513 * atomic64_add_return - add and return
6514 * @i: integer value to add
6515 * @v: pointer to type atomic64_t
6516@@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
6517 return i;
6518 }
6519
6520+/**
6521+ * atomic64_add_return_unchecked - add and return
6522+ * @i: integer value to add
6523+ * @v: pointer to type atomic64_unchecked_t
6524+ *
6525+ * Atomically adds @i to @v and returns @i + *@v
6526+ */
6527+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
6528+{
6529+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
6530+ : "+A" (i), "+c" (v)
6531+ : : "memory"
6532+ );
6533+ return i;
6534+}
6535+
6536 /*
6537 * Other variants with different arithmetic operators:
6538 */
6539@@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
6540 return a;
6541 }
6542
6543+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6544+{
6545+ long long a;
6546+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
6547+ : "=A" (a)
6548+ : "S" (v)
6549+ : "memory", "ecx"
6550+ );
6551+ return a;
6552+}
6553+
6554 static inline long long atomic64_dec_return(atomic64_t *v)
6555 {
6556 long long a;
6557@@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6558 }
6559
6560 /**
6561+ * atomic64_add_unchecked - add integer to atomic64 variable
6562+ * @i: integer value to add
6563+ * @v: pointer to type atomic64_unchecked_t
6564+ *
6565+ * Atomically adds @i to @v.
6566+ */
6567+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6568+{
6569+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6570+ : "+A" (i), "+c" (v)
6571+ : : "memory"
6572+ );
6573+ return i;
6574+}
6575+
6576+/**
6577 * atomic64_sub - subtract the atomic64 variable
6578 * @i: integer value to subtract
6579 * @v: pointer to type atomic64_t
6580diff -urNp linux-3.1.4/arch/x86/include/asm/atomic64_64.h linux-3.1.4/arch/x86/include/asm/atomic64_64.h
6581--- linux-3.1.4/arch/x86/include/asm/atomic64_64.h 2011-11-11 15:19:27.000000000 -0500
6582+++ linux-3.1.4/arch/x86/include/asm/atomic64_64.h 2011-11-16 18:39:07.000000000 -0500
6583@@ -18,7 +18,19 @@
6584 */
6585 static inline long atomic64_read(const atomic64_t *v)
6586 {
6587- return (*(volatile long *)&(v)->counter);
6588+ return (*(volatile const long *)&(v)->counter);
6589+}
6590+
6591+/**
6592+ * atomic64_read_unchecked - read atomic64 variable
6593+ * @v: pointer of type atomic64_unchecked_t
6594+ *
6595+ * Atomically reads the value of @v.
6596+ * Doesn't imply a read memory barrier.
6597+ */
6598+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6599+{
6600+ return (*(volatile const long *)&(v)->counter);
6601 }
6602
6603 /**
6604@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6605 }
6606
6607 /**
6608+ * atomic64_set_unchecked - set atomic64 variable
6609+ * @v: pointer to type atomic64_unchecked_t
6610+ * @i: required value
6611+ *
6612+ * Atomically sets the value of @v to @i.
6613+ */
6614+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6615+{
6616+ v->counter = i;
6617+}
6618+
6619+/**
6620 * atomic64_add - add integer to atomic64 variable
6621 * @i: integer value to add
6622 * @v: pointer to type atomic64_t
6623@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6624 */
6625 static inline void atomic64_add(long i, atomic64_t *v)
6626 {
6627+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
6628+
6629+#ifdef CONFIG_PAX_REFCOUNT
6630+ "jno 0f\n"
6631+ LOCK_PREFIX "subq %1,%0\n"
6632+ "int $4\n0:\n"
6633+ _ASM_EXTABLE(0b, 0b)
6634+#endif
6635+
6636+ : "=m" (v->counter)
6637+ : "er" (i), "m" (v->counter));
6638+}
6639+
6640+/**
6641+ * atomic64_add_unchecked - add integer to atomic64 variable
6642+ * @i: integer value to add
6643+ * @v: pointer to type atomic64_unchecked_t
6644+ *
6645+ * Atomically adds @i to @v.
6646+ */
6647+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6648+{
6649 asm volatile(LOCK_PREFIX "addq %1,%0"
6650 : "=m" (v->counter)
6651 : "er" (i), "m" (v->counter));
6652@@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6653 */
6654 static inline void atomic64_sub(long i, atomic64_t *v)
6655 {
6656- asm volatile(LOCK_PREFIX "subq %1,%0"
6657+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6658+
6659+#ifdef CONFIG_PAX_REFCOUNT
6660+ "jno 0f\n"
6661+ LOCK_PREFIX "addq %1,%0\n"
6662+ "int $4\n0:\n"
6663+ _ASM_EXTABLE(0b, 0b)
6664+#endif
6665+
6666+ : "=m" (v->counter)
6667+ : "er" (i), "m" (v->counter));
6668+}
6669+
6670+/**
6671+ * atomic64_sub_unchecked - subtract the atomic64 variable
6672+ * @i: integer value to subtract
6673+ * @v: pointer to type atomic64_unchecked_t
6674+ *
6675+ * Atomically subtracts @i from @v.
6676+ */
6677+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6678+{
6679+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6680 : "=m" (v->counter)
6681 : "er" (i), "m" (v->counter));
6682 }
6683@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6684 {
6685 unsigned char c;
6686
6687- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6688+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
6689+
6690+#ifdef CONFIG_PAX_REFCOUNT
6691+ "jno 0f\n"
6692+ LOCK_PREFIX "addq %2,%0\n"
6693+ "int $4\n0:\n"
6694+ _ASM_EXTABLE(0b, 0b)
6695+#endif
6696+
6697+ "sete %1\n"
6698 : "=m" (v->counter), "=qm" (c)
6699 : "er" (i), "m" (v->counter) : "memory");
6700 return c;
6701@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6702 */
6703 static inline void atomic64_inc(atomic64_t *v)
6704 {
6705+ asm volatile(LOCK_PREFIX "incq %0\n"
6706+
6707+#ifdef CONFIG_PAX_REFCOUNT
6708+ "jno 0f\n"
6709+ LOCK_PREFIX "decq %0\n"
6710+ "int $4\n0:\n"
6711+ _ASM_EXTABLE(0b, 0b)
6712+#endif
6713+
6714+ : "=m" (v->counter)
6715+ : "m" (v->counter));
6716+}
6717+
6718+/**
6719+ * atomic64_inc_unchecked - increment atomic64 variable
6720+ * @v: pointer to type atomic64_unchecked_t
6721+ *
6722+ * Atomically increments @v by 1.
6723+ */
6724+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6725+{
6726 asm volatile(LOCK_PREFIX "incq %0"
6727 : "=m" (v->counter)
6728 : "m" (v->counter));
6729@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6730 */
6731 static inline void atomic64_dec(atomic64_t *v)
6732 {
6733- asm volatile(LOCK_PREFIX "decq %0"
6734+ asm volatile(LOCK_PREFIX "decq %0\n"
6735+
6736+#ifdef CONFIG_PAX_REFCOUNT
6737+ "jno 0f\n"
6738+ LOCK_PREFIX "incq %0\n"
6739+ "int $4\n0:\n"
6740+ _ASM_EXTABLE(0b, 0b)
6741+#endif
6742+
6743+ : "=m" (v->counter)
6744+ : "m" (v->counter));
6745+}
6746+
6747+/**
6748+ * atomic64_dec_unchecked - decrement atomic64 variable
6749+ * @v: pointer to type atomic64_t
6750+ *
6751+ * Atomically decrements @v by 1.
6752+ */
6753+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6754+{
6755+ asm volatile(LOCK_PREFIX "decq %0\n"
6756 : "=m" (v->counter)
6757 : "m" (v->counter));
6758 }
6759@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6760 {
6761 unsigned char c;
6762
6763- asm volatile(LOCK_PREFIX "decq %0; sete %1"
6764+ asm volatile(LOCK_PREFIX "decq %0\n"
6765+
6766+#ifdef CONFIG_PAX_REFCOUNT
6767+ "jno 0f\n"
6768+ LOCK_PREFIX "incq %0\n"
6769+ "int $4\n0:\n"
6770+ _ASM_EXTABLE(0b, 0b)
6771+#endif
6772+
6773+ "sete %1\n"
6774 : "=m" (v->counter), "=qm" (c)
6775 : "m" (v->counter) : "memory");
6776 return c != 0;
6777@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6778 {
6779 unsigned char c;
6780
6781- asm volatile(LOCK_PREFIX "incq %0; sete %1"
6782+ asm volatile(LOCK_PREFIX "incq %0\n"
6783+
6784+#ifdef CONFIG_PAX_REFCOUNT
6785+ "jno 0f\n"
6786+ LOCK_PREFIX "decq %0\n"
6787+ "int $4\n0:\n"
6788+ _ASM_EXTABLE(0b, 0b)
6789+#endif
6790+
6791+ "sete %1\n"
6792 : "=m" (v->counter), "=qm" (c)
6793 : "m" (v->counter) : "memory");
6794 return c != 0;
6795@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6796 {
6797 unsigned char c;
6798
6799- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6800+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
6801+
6802+#ifdef CONFIG_PAX_REFCOUNT
6803+ "jno 0f\n"
6804+ LOCK_PREFIX "subq %2,%0\n"
6805+ "int $4\n0:\n"
6806+ _ASM_EXTABLE(0b, 0b)
6807+#endif
6808+
6809+ "sets %1\n"
6810 : "=m" (v->counter), "=qm" (c)
6811 : "er" (i), "m" (v->counter) : "memory");
6812 return c;
6813@@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6814 static inline long atomic64_add_return(long i, atomic64_t *v)
6815 {
6816 long __i = i;
6817- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6818+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6819+
6820+#ifdef CONFIG_PAX_REFCOUNT
6821+ "jno 0f\n"
6822+ "movq %0, %1\n"
6823+ "int $4\n0:\n"
6824+ _ASM_EXTABLE(0b, 0b)
6825+#endif
6826+
6827+ : "+r" (i), "+m" (v->counter)
6828+ : : "memory");
6829+ return i + __i;
6830+}
6831+
6832+/**
6833+ * atomic64_add_return_unchecked - add and return
6834+ * @i: integer value to add
6835+ * @v: pointer to type atomic64_unchecked_t
6836+ *
6837+ * Atomically adds @i to @v and returns @i + @v
6838+ */
6839+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6840+{
6841+ long __i = i;
6842+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
6843 : "+r" (i), "+m" (v->counter)
6844 : : "memory");
6845 return i + __i;
6846@@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6847 }
6848
6849 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6850+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6851+{
6852+ return atomic64_add_return_unchecked(1, v);
6853+}
6854 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6855
6856 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6857@@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6858 return cmpxchg(&v->counter, old, new);
6859 }
6860
6861+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6862+{
6863+ return cmpxchg(&v->counter, old, new);
6864+}
6865+
6866 static inline long atomic64_xchg(atomic64_t *v, long new)
6867 {
6868 return xchg(&v->counter, new);
6869@@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6870 */
6871 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6872 {
6873- long c, old;
6874+ long c, old, new;
6875 c = atomic64_read(v);
6876 for (;;) {
6877- if (unlikely(c == (u)))
6878+ if (unlikely(c == u))
6879 break;
6880- old = atomic64_cmpxchg((v), c, c + (a));
6881+
6882+ asm volatile("add %2,%0\n"
6883+
6884+#ifdef CONFIG_PAX_REFCOUNT
6885+ "jno 0f\n"
6886+ "sub %2,%0\n"
6887+ "int $4\n0:\n"
6888+ _ASM_EXTABLE(0b, 0b)
6889+#endif
6890+
6891+ : "=r" (new)
6892+ : "0" (c), "ir" (a));
6893+
6894+ old = atomic64_cmpxchg(v, c, new);
6895 if (likely(old == c))
6896 break;
6897 c = old;
6898 }
6899- return c != (u);
6900+ return c != u;
6901 }
6902
6903 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6904diff -urNp linux-3.1.4/arch/x86/include/asm/atomic.h linux-3.1.4/arch/x86/include/asm/atomic.h
6905--- linux-3.1.4/arch/x86/include/asm/atomic.h 2011-11-11 15:19:27.000000000 -0500
6906+++ linux-3.1.4/arch/x86/include/asm/atomic.h 2011-11-16 18:39:07.000000000 -0500
6907@@ -22,7 +22,18 @@
6908 */
6909 static inline int atomic_read(const atomic_t *v)
6910 {
6911- return (*(volatile int *)&(v)->counter);
6912+ return (*(volatile const int *)&(v)->counter);
6913+}
6914+
6915+/**
6916+ * atomic_read_unchecked - read atomic variable
6917+ * @v: pointer of type atomic_unchecked_t
6918+ *
6919+ * Atomically reads the value of @v.
6920+ */
6921+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6922+{
6923+ return (*(volatile const int *)&(v)->counter);
6924 }
6925
6926 /**
6927@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6928 }
6929
6930 /**
6931+ * atomic_set_unchecked - set atomic variable
6932+ * @v: pointer of type atomic_unchecked_t
6933+ * @i: required value
6934+ *
6935+ * Atomically sets the value of @v to @i.
6936+ */
6937+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6938+{
6939+ v->counter = i;
6940+}
6941+
6942+/**
6943 * atomic_add - add integer to atomic variable
6944 * @i: integer value to add
6945 * @v: pointer of type atomic_t
6946@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6947 */
6948 static inline void atomic_add(int i, atomic_t *v)
6949 {
6950- asm volatile(LOCK_PREFIX "addl %1,%0"
6951+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6952+
6953+#ifdef CONFIG_PAX_REFCOUNT
6954+ "jno 0f\n"
6955+ LOCK_PREFIX "subl %1,%0\n"
6956+ "int $4\n0:\n"
6957+ _ASM_EXTABLE(0b, 0b)
6958+#endif
6959+
6960+ : "+m" (v->counter)
6961+ : "ir" (i));
6962+}
6963+
6964+/**
6965+ * atomic_add_unchecked - add integer to atomic variable
6966+ * @i: integer value to add
6967+ * @v: pointer of type atomic_unchecked_t
6968+ *
6969+ * Atomically adds @i to @v.
6970+ */
6971+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6972+{
6973+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6974 : "+m" (v->counter)
6975 : "ir" (i));
6976 }
6977@@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6978 */
6979 static inline void atomic_sub(int i, atomic_t *v)
6980 {
6981- asm volatile(LOCK_PREFIX "subl %1,%0"
6982+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6983+
6984+#ifdef CONFIG_PAX_REFCOUNT
6985+ "jno 0f\n"
6986+ LOCK_PREFIX "addl %1,%0\n"
6987+ "int $4\n0:\n"
6988+ _ASM_EXTABLE(0b, 0b)
6989+#endif
6990+
6991+ : "+m" (v->counter)
6992+ : "ir" (i));
6993+}
6994+
6995+/**
6996+ * atomic_sub_unchecked - subtract integer from atomic variable
6997+ * @i: integer value to subtract
6998+ * @v: pointer of type atomic_unchecked_t
6999+ *
7000+ * Atomically subtracts @i from @v.
7001+ */
7002+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7003+{
7004+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7005 : "+m" (v->counter)
7006 : "ir" (i));
7007 }
7008@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
7009 {
7010 unsigned char c;
7011
7012- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7013+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
7014+
7015+#ifdef CONFIG_PAX_REFCOUNT
7016+ "jno 0f\n"
7017+ LOCK_PREFIX "addl %2,%0\n"
7018+ "int $4\n0:\n"
7019+ _ASM_EXTABLE(0b, 0b)
7020+#endif
7021+
7022+ "sete %1\n"
7023 : "+m" (v->counter), "=qm" (c)
7024 : "ir" (i) : "memory");
7025 return c;
7026@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
7027 */
7028 static inline void atomic_inc(atomic_t *v)
7029 {
7030- asm volatile(LOCK_PREFIX "incl %0"
7031+ asm volatile(LOCK_PREFIX "incl %0\n"
7032+
7033+#ifdef CONFIG_PAX_REFCOUNT
7034+ "jno 0f\n"
7035+ LOCK_PREFIX "decl %0\n"
7036+ "int $4\n0:\n"
7037+ _ASM_EXTABLE(0b, 0b)
7038+#endif
7039+
7040+ : "+m" (v->counter));
7041+}
7042+
7043+/**
7044+ * atomic_inc_unchecked - increment atomic variable
7045+ * @v: pointer of type atomic_unchecked_t
7046+ *
7047+ * Atomically increments @v by 1.
7048+ */
7049+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7050+{
7051+ asm volatile(LOCK_PREFIX "incl %0\n"
7052 : "+m" (v->counter));
7053 }
7054
7055@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
7056 */
7057 static inline void atomic_dec(atomic_t *v)
7058 {
7059- asm volatile(LOCK_PREFIX "decl %0"
7060+ asm volatile(LOCK_PREFIX "decl %0\n"
7061+
7062+#ifdef CONFIG_PAX_REFCOUNT
7063+ "jno 0f\n"
7064+ LOCK_PREFIX "incl %0\n"
7065+ "int $4\n0:\n"
7066+ _ASM_EXTABLE(0b, 0b)
7067+#endif
7068+
7069+ : "+m" (v->counter));
7070+}
7071+
7072+/**
7073+ * atomic_dec_unchecked - decrement atomic variable
7074+ * @v: pointer of type atomic_unchecked_t
7075+ *
7076+ * Atomically decrements @v by 1.
7077+ */
7078+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7079+{
7080+ asm volatile(LOCK_PREFIX "decl %0\n"
7081 : "+m" (v->counter));
7082 }
7083
7084@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
7085 {
7086 unsigned char c;
7087
7088- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7089+ asm volatile(LOCK_PREFIX "decl %0\n"
7090+
7091+#ifdef CONFIG_PAX_REFCOUNT
7092+ "jno 0f\n"
7093+ LOCK_PREFIX "incl %0\n"
7094+ "int $4\n0:\n"
7095+ _ASM_EXTABLE(0b, 0b)
7096+#endif
7097+
7098+ "sete %1\n"
7099 : "+m" (v->counter), "=qm" (c)
7100 : : "memory");
7101 return c != 0;
7102@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
7103 {
7104 unsigned char c;
7105
7106- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7107+ asm volatile(LOCK_PREFIX "incl %0\n"
7108+
7109+#ifdef CONFIG_PAX_REFCOUNT
7110+ "jno 0f\n"
7111+ LOCK_PREFIX "decl %0\n"
7112+ "int $4\n0:\n"
7113+ _ASM_EXTABLE(0b, 0b)
7114+#endif
7115+
7116+ "sete %1\n"
7117+ : "+m" (v->counter), "=qm" (c)
7118+ : : "memory");
7119+ return c != 0;
7120+}
7121+
7122+/**
7123+ * atomic_inc_and_test_unchecked - increment and test
7124+ * @v: pointer of type atomic_unchecked_t
7125+ *
7126+ * Atomically increments @v by 1
7127+ * and returns true if the result is zero, or false for all
7128+ * other cases.
7129+ */
7130+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7131+{
7132+ unsigned char c;
7133+
7134+ asm volatile(LOCK_PREFIX "incl %0\n"
7135+ "sete %1\n"
7136 : "+m" (v->counter), "=qm" (c)
7137 : : "memory");
7138 return c != 0;
7139@@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
7140 {
7141 unsigned char c;
7142
7143- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7144+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7145+
7146+#ifdef CONFIG_PAX_REFCOUNT
7147+ "jno 0f\n"
7148+ LOCK_PREFIX "subl %2,%0\n"
7149+ "int $4\n0:\n"
7150+ _ASM_EXTABLE(0b, 0b)
7151+#endif
7152+
7153+ "sets %1\n"
7154 : "+m" (v->counter), "=qm" (c)
7155 : "ir" (i) : "memory");
7156 return c;
7157@@ -180,6 +342,46 @@ static inline int atomic_add_return(int
7158 #endif
7159 /* Modern 486+ processor */
7160 __i = i;
7161+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7162+
7163+#ifdef CONFIG_PAX_REFCOUNT
7164+ "jno 0f\n"
7165+ "movl %0, %1\n"
7166+ "int $4\n0:\n"
7167+ _ASM_EXTABLE(0b, 0b)
7168+#endif
7169+
7170+ : "+r" (i), "+m" (v->counter)
7171+ : : "memory");
7172+ return i + __i;
7173+
7174+#ifdef CONFIG_M386
7175+no_xadd: /* Legacy 386 processor */
7176+ local_irq_save(flags);
7177+ __i = atomic_read(v);
7178+ atomic_set(v, i + __i);
7179+ local_irq_restore(flags);
7180+ return i + __i;
7181+#endif
7182+}
7183+
7184+/**
7185+ * atomic_add_return_unchecked - add integer and return
7186+ * @v: pointer of type atomic_unchecked_t
7187+ * @i: integer value to add
7188+ *
7189+ * Atomically adds @i to @v and returns @i + @v
7190+ */
7191+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7192+{
7193+ int __i;
7194+#ifdef CONFIG_M386
7195+ unsigned long flags;
7196+ if (unlikely(boot_cpu_data.x86 <= 3))
7197+ goto no_xadd;
7198+#endif
7199+ /* Modern 486+ processor */
7200+ __i = i;
7201 asm volatile(LOCK_PREFIX "xaddl %0, %1"
7202 : "+r" (i), "+m" (v->counter)
7203 : : "memory");
7204@@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
7205 }
7206
7207 #define atomic_inc_return(v) (atomic_add_return(1, v))
7208+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7209+{
7210+ return atomic_add_return_unchecked(1, v);
7211+}
7212 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7213
7214 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7215@@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
7216 return cmpxchg(&v->counter, old, new);
7217 }
7218
7219+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7220+{
7221+ return cmpxchg(&v->counter, old, new);
7222+}
7223+
7224 static inline int atomic_xchg(atomic_t *v, int new)
7225 {
7226 return xchg(&v->counter, new);
7227 }
7228
7229+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7230+{
7231+ return xchg(&v->counter, new);
7232+}
7233+
7234 /**
7235 * __atomic_add_unless - add unless the number is already a given value
7236 * @v: pointer of type atomic_t
7237@@ -231,12 +447,25 @@ static inline int atomic_xchg(atomic_t *
7238 */
7239 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7240 {
7241- int c, old;
7242+ int c, old, new;
7243 c = atomic_read(v);
7244 for (;;) {
7245- if (unlikely(c == (u)))
7246+ if (unlikely(c == u))
7247 break;
7248- old = atomic_cmpxchg((v), c, c + (a));
7249+
7250+ asm volatile("addl %2,%0\n"
7251+
7252+#ifdef CONFIG_PAX_REFCOUNT
7253+ "jno 0f\n"
7254+ "subl %2,%0\n"
7255+ "int $4\n0:\n"
7256+ _ASM_EXTABLE(0b, 0b)
7257+#endif
7258+
7259+ : "=r" (new)
7260+ : "0" (c), "ir" (a));
7261+
7262+ old = atomic_cmpxchg(v, c, new);
7263 if (likely(old == c))
7264 break;
7265 c = old;
7266@@ -244,6 +473,48 @@ static inline int __atomic_add_unless(at
7267 return c;
7268 }
7269
7270+/**
7271+ * atomic_inc_not_zero_hint - increment if not null
7272+ * @v: pointer of type atomic_t
7273+ * @hint: probable value of the atomic before the increment
7274+ *
7275+ * This version of atomic_inc_not_zero() gives a hint of probable
7276+ * value of the atomic. This helps processor to not read the memory
7277+ * before doing the atomic read/modify/write cycle, lowering
7278+ * number of bus transactions on some arches.
7279+ *
7280+ * Returns: 0 if increment was not done, 1 otherwise.
7281+ */
7282+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
7283+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
7284+{
7285+ int val, c = hint, new;
7286+
7287+ /* sanity test, should be removed by compiler if hint is a constant */
7288+ if (!hint)
7289+ return __atomic_add_unless(v, 1, 0);
7290+
7291+ do {
7292+ asm volatile("incl %0\n"
7293+
7294+#ifdef CONFIG_PAX_REFCOUNT
7295+ "jno 0f\n"
7296+ "decl %0\n"
7297+ "int $4\n0:\n"
7298+ _ASM_EXTABLE(0b, 0b)
7299+#endif
7300+
7301+ : "=r" (new)
7302+ : "0" (c));
7303+
7304+ val = atomic_cmpxchg(v, c, new);
7305+ if (val == c)
7306+ return 1;
7307+ c = val;
7308+ } while (c);
7309+
7310+ return 0;
7311+}
7312
7313 /*
7314 * atomic_dec_if_positive - decrement by 1 if old value positive
7315diff -urNp linux-3.1.4/arch/x86/include/asm/bitops.h linux-3.1.4/arch/x86/include/asm/bitops.h
7316--- linux-3.1.4/arch/x86/include/asm/bitops.h 2011-11-11 15:19:27.000000000 -0500
7317+++ linux-3.1.4/arch/x86/include/asm/bitops.h 2011-11-16 18:39:07.000000000 -0500
7318@@ -38,7 +38,7 @@
7319 * a mask operation on a byte.
7320 */
7321 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
7322-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
7323+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
7324 #define CONST_MASK(nr) (1 << ((nr) & 7))
7325
7326 /**
7327diff -urNp linux-3.1.4/arch/x86/include/asm/boot.h linux-3.1.4/arch/x86/include/asm/boot.h
7328--- linux-3.1.4/arch/x86/include/asm/boot.h 2011-11-11 15:19:27.000000000 -0500
7329+++ linux-3.1.4/arch/x86/include/asm/boot.h 2011-11-16 18:39:07.000000000 -0500
7330@@ -11,10 +11,15 @@
7331 #include <asm/pgtable_types.h>
7332
7333 /* Physical address where kernel should be loaded. */
7334-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7335+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7336 + (CONFIG_PHYSICAL_ALIGN - 1)) \
7337 & ~(CONFIG_PHYSICAL_ALIGN - 1))
7338
7339+#ifndef __ASSEMBLY__
7340+extern unsigned char __LOAD_PHYSICAL_ADDR[];
7341+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
7342+#endif
7343+
7344 /* Minimum kernel alignment, as a power of two */
7345 #ifdef CONFIG_X86_64
7346 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
7347diff -urNp linux-3.1.4/arch/x86/include/asm/cacheflush.h linux-3.1.4/arch/x86/include/asm/cacheflush.h
7348--- linux-3.1.4/arch/x86/include/asm/cacheflush.h 2011-11-11 15:19:27.000000000 -0500
7349+++ linux-3.1.4/arch/x86/include/asm/cacheflush.h 2011-11-16 18:39:07.000000000 -0500
7350@@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
7351 unsigned long pg_flags = pg->flags & _PGMT_MASK;
7352
7353 if (pg_flags == _PGMT_DEFAULT)
7354- return -1;
7355+ return ~0UL;
7356 else if (pg_flags == _PGMT_WC)
7357 return _PAGE_CACHE_WC;
7358 else if (pg_flags == _PGMT_UC_MINUS)
7359diff -urNp linux-3.1.4/arch/x86/include/asm/cache.h linux-3.1.4/arch/x86/include/asm/cache.h
7360--- linux-3.1.4/arch/x86/include/asm/cache.h 2011-11-11 15:19:27.000000000 -0500
7361+++ linux-3.1.4/arch/x86/include/asm/cache.h 2011-11-16 18:39:07.000000000 -0500
7362@@ -5,12 +5,13 @@
7363
7364 /* L1 cache line size */
7365 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7366-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7367+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7368
7369 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7370+#define __read_only __attribute__((__section__(".data..read_only")))
7371
7372 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
7373-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
7374+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
7375
7376 #ifdef CONFIG_X86_VSMP
7377 #ifdef CONFIG_SMP
7378diff -urNp linux-3.1.4/arch/x86/include/asm/checksum_32.h linux-3.1.4/arch/x86/include/asm/checksum_32.h
7379--- linux-3.1.4/arch/x86/include/asm/checksum_32.h 2011-11-11 15:19:27.000000000 -0500
7380+++ linux-3.1.4/arch/x86/include/asm/checksum_32.h 2011-11-16 18:39:07.000000000 -0500
7381@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
7382 int len, __wsum sum,
7383 int *src_err_ptr, int *dst_err_ptr);
7384
7385+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
7386+ int len, __wsum sum,
7387+ int *src_err_ptr, int *dst_err_ptr);
7388+
7389+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
7390+ int len, __wsum sum,
7391+ int *src_err_ptr, int *dst_err_ptr);
7392+
7393 /*
7394 * Note: when you get a NULL pointer exception here this means someone
7395 * passed in an incorrect kernel address to one of these functions.
7396@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
7397 int *err_ptr)
7398 {
7399 might_sleep();
7400- return csum_partial_copy_generic((__force void *)src, dst,
7401+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
7402 len, sum, err_ptr, NULL);
7403 }
7404
7405@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
7406 {
7407 might_sleep();
7408 if (access_ok(VERIFY_WRITE, dst, len))
7409- return csum_partial_copy_generic(src, (__force void *)dst,
7410+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
7411 len, sum, NULL, err_ptr);
7412
7413 if (len)
7414diff -urNp linux-3.1.4/arch/x86/include/asm/cpufeature.h linux-3.1.4/arch/x86/include/asm/cpufeature.h
7415--- linux-3.1.4/arch/x86/include/asm/cpufeature.h 2011-11-11 15:19:27.000000000 -0500
7416+++ linux-3.1.4/arch/x86/include/asm/cpufeature.h 2011-11-16 18:39:07.000000000 -0500
7417@@ -358,7 +358,7 @@ static __always_inline __pure bool __sta
7418 ".section .discard,\"aw\",@progbits\n"
7419 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
7420 ".previous\n"
7421- ".section .altinstr_replacement,\"ax\"\n"
7422+ ".section .altinstr_replacement,\"a\"\n"
7423 "3: movb $1,%0\n"
7424 "4:\n"
7425 ".previous\n"
7426diff -urNp linux-3.1.4/arch/x86/include/asm/desc_defs.h linux-3.1.4/arch/x86/include/asm/desc_defs.h
7427--- linux-3.1.4/arch/x86/include/asm/desc_defs.h 2011-11-11 15:19:27.000000000 -0500
7428+++ linux-3.1.4/arch/x86/include/asm/desc_defs.h 2011-11-16 18:39:07.000000000 -0500
7429@@ -31,6 +31,12 @@ struct desc_struct {
7430 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
7431 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
7432 };
7433+ struct {
7434+ u16 offset_low;
7435+ u16 seg;
7436+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
7437+ unsigned offset_high: 16;
7438+ } gate;
7439 };
7440 } __attribute__((packed));
7441
7442diff -urNp linux-3.1.4/arch/x86/include/asm/desc.h linux-3.1.4/arch/x86/include/asm/desc.h
7443--- linux-3.1.4/arch/x86/include/asm/desc.h 2011-11-11 15:19:27.000000000 -0500
7444+++ linux-3.1.4/arch/x86/include/asm/desc.h 2011-11-16 18:39:07.000000000 -0500
7445@@ -4,6 +4,7 @@
7446 #include <asm/desc_defs.h>
7447 #include <asm/ldt.h>
7448 #include <asm/mmu.h>
7449+#include <asm/pgtable.h>
7450
7451 #include <linux/smp.h>
7452
7453@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_
7454
7455 desc->type = (info->read_exec_only ^ 1) << 1;
7456 desc->type |= info->contents << 2;
7457+ desc->type |= info->seg_not_present ^ 1;
7458
7459 desc->s = 1;
7460 desc->dpl = 0x3;
7461@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_
7462 }
7463
7464 extern struct desc_ptr idt_descr;
7465-extern gate_desc idt_table[];
7466-
7467-struct gdt_page {
7468- struct desc_struct gdt[GDT_ENTRIES];
7469-} __attribute__((aligned(PAGE_SIZE)));
7470-
7471-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
7472+extern gate_desc idt_table[256];
7473
7474+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
7475 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
7476 {
7477- return per_cpu(gdt_page, cpu).gdt;
7478+ return cpu_gdt_table[cpu];
7479 }
7480
7481 #ifdef CONFIG_X86_64
7482@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *
7483 unsigned long base, unsigned dpl, unsigned flags,
7484 unsigned short seg)
7485 {
7486- gate->a = (seg << 16) | (base & 0xffff);
7487- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
7488+ gate->gate.offset_low = base;
7489+ gate->gate.seg = seg;
7490+ gate->gate.reserved = 0;
7491+ gate->gate.type = type;
7492+ gate->gate.s = 0;
7493+ gate->gate.dpl = dpl;
7494+ gate->gate.p = 1;
7495+ gate->gate.offset_high = base >> 16;
7496 }
7497
7498 #endif
7499@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(str
7500
7501 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
7502 {
7503+ pax_open_kernel();
7504 memcpy(&idt[entry], gate, sizeof(*gate));
7505+ pax_close_kernel();
7506 }
7507
7508 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
7509 {
7510+ pax_open_kernel();
7511 memcpy(&ldt[entry], desc, 8);
7512+ pax_close_kernel();
7513 }
7514
7515 static inline void
7516@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struc
7517 default: size = sizeof(*gdt); break;
7518 }
7519
7520+ pax_open_kernel();
7521 memcpy(&gdt[entry], desc, size);
7522+ pax_close_kernel();
7523 }
7524
7525 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7526@@ -207,7 +216,9 @@ static inline void native_set_ldt(const
7527
7528 static inline void native_load_tr_desc(void)
7529 {
7530+ pax_open_kernel();
7531 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
7532+ pax_close_kernel();
7533 }
7534
7535 static inline void native_load_gdt(const struct desc_ptr *dtr)
7536@@ -244,8 +255,10 @@ static inline void native_load_tls(struc
7537 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
7538 unsigned int i;
7539
7540+ pax_open_kernel();
7541 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
7542 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7543+ pax_close_kernel();
7544 }
7545
7546 #define _LDT_empty(info) \
7547@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct
7548 desc->limit = (limit >> 16) & 0xf;
7549 }
7550
7551-static inline void _set_gate(int gate, unsigned type, void *addr,
7552+static inline void _set_gate(int gate, unsigned type, const void *addr,
7553 unsigned dpl, unsigned ist, unsigned seg)
7554 {
7555 gate_desc s;
7556@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, u
7557 * Pentium F0 0F bugfix can have resulted in the mapped
7558 * IDT being write-protected.
7559 */
7560-static inline void set_intr_gate(unsigned int n, void *addr)
7561+static inline void set_intr_gate(unsigned int n, const void *addr)
7562 {
7563 BUG_ON((unsigned)n > 0xFF);
7564 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7565@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsig
7566 /*
7567 * This routine sets up an interrupt gate at directory privilege level 3.
7568 */
7569-static inline void set_system_intr_gate(unsigned int n, void *addr)
7570+static inline void set_system_intr_gate(unsigned int n, const void *addr)
7571 {
7572 BUG_ON((unsigned)n > 0xFF);
7573 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7574 }
7575
7576-static inline void set_system_trap_gate(unsigned int n, void *addr)
7577+static inline void set_system_trap_gate(unsigned int n, const void *addr)
7578 {
7579 BUG_ON((unsigned)n > 0xFF);
7580 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7581 }
7582
7583-static inline void set_trap_gate(unsigned int n, void *addr)
7584+static inline void set_trap_gate(unsigned int n, const void *addr)
7585 {
7586 BUG_ON((unsigned)n > 0xFF);
7587 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7588@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigne
7589 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7590 {
7591 BUG_ON((unsigned)n > 0xFF);
7592- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7593+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7594 }
7595
7596-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7597+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7598 {
7599 BUG_ON((unsigned)n > 0xFF);
7600 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7601 }
7602
7603-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7604+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7605 {
7606 BUG_ON((unsigned)n > 0xFF);
7607 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7608 }
7609
7610+#ifdef CONFIG_X86_32
7611+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7612+{
7613+ struct desc_struct d;
7614+
7615+ if (likely(limit))
7616+ limit = (limit - 1UL) >> PAGE_SHIFT;
7617+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
7618+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7619+}
7620+#endif
7621+
7622 #endif /* _ASM_X86_DESC_H */
7623diff -urNp linux-3.1.4/arch/x86/include/asm/e820.h linux-3.1.4/arch/x86/include/asm/e820.h
7624--- linux-3.1.4/arch/x86/include/asm/e820.h 2011-11-11 15:19:27.000000000 -0500
7625+++ linux-3.1.4/arch/x86/include/asm/e820.h 2011-11-16 18:39:07.000000000 -0500
7626@@ -69,7 +69,7 @@ struct e820map {
7627 #define ISA_START_ADDRESS 0xa0000
7628 #define ISA_END_ADDRESS 0x100000
7629
7630-#define BIOS_BEGIN 0x000a0000
7631+#define BIOS_BEGIN 0x000c0000
7632 #define BIOS_END 0x00100000
7633
7634 #define BIOS_ROM_BASE 0xffe00000
7635diff -urNp linux-3.1.4/arch/x86/include/asm/elf.h linux-3.1.4/arch/x86/include/asm/elf.h
7636--- linux-3.1.4/arch/x86/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
7637+++ linux-3.1.4/arch/x86/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
7638@@ -237,7 +237,25 @@ extern int force_personality32;
7639 the loader. We need to make sure that it is out of the way of the program
7640 that it will "exec", and that there is sufficient room for the brk. */
7641
7642+#ifdef CONFIG_PAX_SEGMEXEC
7643+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7644+#else
7645 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7646+#endif
7647+
7648+#ifdef CONFIG_PAX_ASLR
7649+#ifdef CONFIG_X86_32
7650+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7651+
7652+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7653+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7654+#else
7655+#define PAX_ELF_ET_DYN_BASE 0x400000UL
7656+
7657+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7658+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7659+#endif
7660+#endif
7661
7662 /* This yields a mask that user programs can use to figure out what
7663 instruction set this CPU supports. This could be done in user space,
7664@@ -290,9 +308,7 @@ do { \
7665
7666 #define ARCH_DLINFO \
7667 do { \
7668- if (vdso_enabled) \
7669- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7670- (unsigned long)current->mm->context.vdso); \
7671+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
7672 } while (0)
7673
7674 #define AT_SYSINFO 32
7675@@ -303,7 +319,7 @@ do { \
7676
7677 #endif /* !CONFIG_X86_32 */
7678
7679-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7680+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7681
7682 #define VDSO_ENTRY \
7683 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7684@@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(s
7685 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7686 #define compat_arch_setup_additional_pages syscall32_setup_pages
7687
7688-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7689-#define arch_randomize_brk arch_randomize_brk
7690-
7691 #endif /* _ASM_X86_ELF_H */
7692diff -urNp linux-3.1.4/arch/x86/include/asm/emergency-restart.h linux-3.1.4/arch/x86/include/asm/emergency-restart.h
7693--- linux-3.1.4/arch/x86/include/asm/emergency-restart.h 2011-11-11 15:19:27.000000000 -0500
7694+++ linux-3.1.4/arch/x86/include/asm/emergency-restart.h 2011-11-16 18:39:07.000000000 -0500
7695@@ -15,6 +15,6 @@ enum reboot_type {
7696
7697 extern enum reboot_type reboot_type;
7698
7699-extern void machine_emergency_restart(void);
7700+extern void machine_emergency_restart(void) __noreturn;
7701
7702 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7703diff -urNp linux-3.1.4/arch/x86/include/asm/futex.h linux-3.1.4/arch/x86/include/asm/futex.h
7704--- linux-3.1.4/arch/x86/include/asm/futex.h 2011-11-11 15:19:27.000000000 -0500
7705+++ linux-3.1.4/arch/x86/include/asm/futex.h 2011-11-16 18:39:07.000000000 -0500
7706@@ -12,16 +12,18 @@
7707 #include <asm/system.h>
7708
7709 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7710+ typecheck(u32 __user *, uaddr); \
7711 asm volatile("1:\t" insn "\n" \
7712 "2:\t.section .fixup,\"ax\"\n" \
7713 "3:\tmov\t%3, %1\n" \
7714 "\tjmp\t2b\n" \
7715 "\t.previous\n" \
7716 _ASM_EXTABLE(1b, 3b) \
7717- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7718+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
7719 : "i" (-EFAULT), "0" (oparg), "1" (0))
7720
7721 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7722+ typecheck(u32 __user *, uaddr); \
7723 asm volatile("1:\tmovl %2, %0\n" \
7724 "\tmovl\t%0, %3\n" \
7725 "\t" insn "\n" \
7726@@ -34,7 +36,7 @@
7727 _ASM_EXTABLE(1b, 4b) \
7728 _ASM_EXTABLE(2b, 4b) \
7729 : "=&a" (oldval), "=&r" (ret), \
7730- "+m" (*uaddr), "=&r" (tem) \
7731+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
7732 : "r" (oparg), "i" (-EFAULT), "1" (0))
7733
7734 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7735@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7736
7737 switch (op) {
7738 case FUTEX_OP_SET:
7739- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7740+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7741 break;
7742 case FUTEX_OP_ADD:
7743- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7744+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7745 uaddr, oparg);
7746 break;
7747 case FUTEX_OP_OR:
7748@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7749 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7750 return -EFAULT;
7751
7752- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7753+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7754 "2:\t.section .fixup, \"ax\"\n"
7755 "3:\tmov %3, %0\n"
7756 "\tjmp 2b\n"
7757 "\t.previous\n"
7758 _ASM_EXTABLE(1b, 3b)
7759- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7760+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
7761 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7762 : "memory"
7763 );
7764diff -urNp linux-3.1.4/arch/x86/include/asm/hw_irq.h linux-3.1.4/arch/x86/include/asm/hw_irq.h
7765--- linux-3.1.4/arch/x86/include/asm/hw_irq.h 2011-11-11 15:19:27.000000000 -0500
7766+++ linux-3.1.4/arch/x86/include/asm/hw_irq.h 2011-11-16 18:39:07.000000000 -0500
7767@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
7768 extern void enable_IO_APIC(void);
7769
7770 /* Statistics */
7771-extern atomic_t irq_err_count;
7772-extern atomic_t irq_mis_count;
7773+extern atomic_unchecked_t irq_err_count;
7774+extern atomic_unchecked_t irq_mis_count;
7775
7776 /* EISA */
7777 extern void eisa_set_level_irq(unsigned int irq);
7778diff -urNp linux-3.1.4/arch/x86/include/asm/i387.h linux-3.1.4/arch/x86/include/asm/i387.h
7779--- linux-3.1.4/arch/x86/include/asm/i387.h 2011-11-11 15:19:27.000000000 -0500
7780+++ linux-3.1.4/arch/x86/include/asm/i387.h 2011-11-16 18:39:07.000000000 -0500
7781@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7782 {
7783 int err;
7784
7785+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7786+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7787+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
7788+#endif
7789+
7790 /* See comment in fxsave() below. */
7791 #ifdef CONFIG_AS_FXSAVEQ
7792 asm volatile("1: fxrstorq %[fx]\n\t"
7793@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7794 {
7795 int err;
7796
7797+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7798+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7799+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7800+#endif
7801+
7802 /*
7803 * Clear the bytes not touched by the fxsave and reserved
7804 * for the SW usage.
7805@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7806 #endif /* CONFIG_X86_64 */
7807
7808 /* We need a safe address that is cheap to find and that is already
7809- in L1 during context switch. The best choices are unfortunately
7810- different for UP and SMP */
7811-#ifdef CONFIG_SMP
7812-#define safe_address (__per_cpu_offset[0])
7813-#else
7814-#define safe_address (kstat_cpu(0).cpustat.user)
7815-#endif
7816+ in L1 during context switch. */
7817+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7818
7819 /*
7820 * These must be called with preempt disabled
7821@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7822 struct thread_info *me = current_thread_info();
7823 preempt_disable();
7824 if (me->status & TS_USEDFPU)
7825- __save_init_fpu(me->task);
7826+ __save_init_fpu(current);
7827 else
7828 clts();
7829 }
7830diff -urNp linux-3.1.4/arch/x86/include/asm/io.h linux-3.1.4/arch/x86/include/asm/io.h
7831--- linux-3.1.4/arch/x86/include/asm/io.h 2011-11-11 15:19:27.000000000 -0500
7832+++ linux-3.1.4/arch/x86/include/asm/io.h 2011-11-16 18:39:07.000000000 -0500
7833@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
7834
7835 #include <linux/vmalloc.h>
7836
7837+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7838+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7839+{
7840+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7841+}
7842+
7843+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7844+{
7845+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7846+}
7847+
7848 /*
7849 * Convert a virtual cached pointer to an uncached pointer
7850 */
7851diff -urNp linux-3.1.4/arch/x86/include/asm/irqflags.h linux-3.1.4/arch/x86/include/asm/irqflags.h
7852--- linux-3.1.4/arch/x86/include/asm/irqflags.h 2011-11-11 15:19:27.000000000 -0500
7853+++ linux-3.1.4/arch/x86/include/asm/irqflags.h 2011-11-16 18:39:07.000000000 -0500
7854@@ -141,6 +141,11 @@ static inline notrace unsigned long arch
7855 sti; \
7856 sysexit
7857
7858+#define GET_CR0_INTO_RDI mov %cr0, %rdi
7859+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7860+#define GET_CR3_INTO_RDI mov %cr3, %rdi
7861+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7862+
7863 #else
7864 #define INTERRUPT_RETURN iret
7865 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7866diff -urNp linux-3.1.4/arch/x86/include/asm/kprobes.h linux-3.1.4/arch/x86/include/asm/kprobes.h
7867--- linux-3.1.4/arch/x86/include/asm/kprobes.h 2011-11-11 15:19:27.000000000 -0500
7868+++ linux-3.1.4/arch/x86/include/asm/kprobes.h 2011-11-16 18:39:07.000000000 -0500
7869@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7870 #define RELATIVEJUMP_SIZE 5
7871 #define RELATIVECALL_OPCODE 0xe8
7872 #define RELATIVE_ADDR_SIZE 4
7873-#define MAX_STACK_SIZE 64
7874-#define MIN_STACK_SIZE(ADDR) \
7875- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7876- THREAD_SIZE - (unsigned long)(ADDR))) \
7877- ? (MAX_STACK_SIZE) \
7878- : (((unsigned long)current_thread_info()) + \
7879- THREAD_SIZE - (unsigned long)(ADDR)))
7880+#define MAX_STACK_SIZE 64UL
7881+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7882
7883 #define flush_insn_slot(p) do { } while (0)
7884
7885diff -urNp linux-3.1.4/arch/x86/include/asm/kvm_host.h linux-3.1.4/arch/x86/include/asm/kvm_host.h
7886--- linux-3.1.4/arch/x86/include/asm/kvm_host.h 2011-11-11 15:19:27.000000000 -0500
7887+++ linux-3.1.4/arch/x86/include/asm/kvm_host.h 2011-11-16 18:39:07.000000000 -0500
7888@@ -456,7 +456,7 @@ struct kvm_arch {
7889 unsigned int n_requested_mmu_pages;
7890 unsigned int n_max_mmu_pages;
7891 unsigned int indirect_shadow_pages;
7892- atomic_t invlpg_counter;
7893+ atomic_unchecked_t invlpg_counter;
7894 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7895 /*
7896 * Hash table of struct kvm_mmu_page.
7897@@ -636,7 +636,7 @@ struct kvm_x86_ops {
7898 enum x86_intercept_stage stage);
7899
7900 const struct trace_print_flags *exit_reasons_str;
7901-};
7902+} __do_const;
7903
7904 struct kvm_arch_async_pf {
7905 u32 token;
7906diff -urNp linux-3.1.4/arch/x86/include/asm/local.h linux-3.1.4/arch/x86/include/asm/local.h
7907--- linux-3.1.4/arch/x86/include/asm/local.h 2011-11-11 15:19:27.000000000 -0500
7908+++ linux-3.1.4/arch/x86/include/asm/local.h 2011-11-16 18:39:07.000000000 -0500
7909@@ -18,26 +18,58 @@ typedef struct {
7910
7911 static inline void local_inc(local_t *l)
7912 {
7913- asm volatile(_ASM_INC "%0"
7914+ asm volatile(_ASM_INC "%0\n"
7915+
7916+#ifdef CONFIG_PAX_REFCOUNT
7917+ "jno 0f\n"
7918+ _ASM_DEC "%0\n"
7919+ "int $4\n0:\n"
7920+ _ASM_EXTABLE(0b, 0b)
7921+#endif
7922+
7923 : "+m" (l->a.counter));
7924 }
7925
7926 static inline void local_dec(local_t *l)
7927 {
7928- asm volatile(_ASM_DEC "%0"
7929+ asm volatile(_ASM_DEC "%0\n"
7930+
7931+#ifdef CONFIG_PAX_REFCOUNT
7932+ "jno 0f\n"
7933+ _ASM_INC "%0\n"
7934+ "int $4\n0:\n"
7935+ _ASM_EXTABLE(0b, 0b)
7936+#endif
7937+
7938 : "+m" (l->a.counter));
7939 }
7940
7941 static inline void local_add(long i, local_t *l)
7942 {
7943- asm volatile(_ASM_ADD "%1,%0"
7944+ asm volatile(_ASM_ADD "%1,%0\n"
7945+
7946+#ifdef CONFIG_PAX_REFCOUNT
7947+ "jno 0f\n"
7948+ _ASM_SUB "%1,%0\n"
7949+ "int $4\n0:\n"
7950+ _ASM_EXTABLE(0b, 0b)
7951+#endif
7952+
7953 : "+m" (l->a.counter)
7954 : "ir" (i));
7955 }
7956
7957 static inline void local_sub(long i, local_t *l)
7958 {
7959- asm volatile(_ASM_SUB "%1,%0"
7960+ asm volatile(_ASM_SUB "%1,%0\n"
7961+
7962+#ifdef CONFIG_PAX_REFCOUNT
7963+ "jno 0f\n"
7964+ _ASM_ADD "%1,%0\n"
7965+ "int $4\n0:\n"
7966+ _ASM_EXTABLE(0b, 0b)
7967+#endif
7968+
7969 : "+m" (l->a.counter)
7970 : "ir" (i));
7971 }
7972@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7973 {
7974 unsigned char c;
7975
7976- asm volatile(_ASM_SUB "%2,%0; sete %1"
7977+ asm volatile(_ASM_SUB "%2,%0\n"
7978+
7979+#ifdef CONFIG_PAX_REFCOUNT
7980+ "jno 0f\n"
7981+ _ASM_ADD "%2,%0\n"
7982+ "int $4\n0:\n"
7983+ _ASM_EXTABLE(0b, 0b)
7984+#endif
7985+
7986+ "sete %1\n"
7987 : "+m" (l->a.counter), "=qm" (c)
7988 : "ir" (i) : "memory");
7989 return c;
7990@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7991 {
7992 unsigned char c;
7993
7994- asm volatile(_ASM_DEC "%0; sete %1"
7995+ asm volatile(_ASM_DEC "%0\n"
7996+
7997+#ifdef CONFIG_PAX_REFCOUNT
7998+ "jno 0f\n"
7999+ _ASM_INC "%0\n"
8000+ "int $4\n0:\n"
8001+ _ASM_EXTABLE(0b, 0b)
8002+#endif
8003+
8004+ "sete %1\n"
8005 : "+m" (l->a.counter), "=qm" (c)
8006 : : "memory");
8007 return c != 0;
8008@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
8009 {
8010 unsigned char c;
8011
8012- asm volatile(_ASM_INC "%0; sete %1"
8013+ asm volatile(_ASM_INC "%0\n"
8014+
8015+#ifdef CONFIG_PAX_REFCOUNT
8016+ "jno 0f\n"
8017+ _ASM_DEC "%0\n"
8018+ "int $4\n0:\n"
8019+ _ASM_EXTABLE(0b, 0b)
8020+#endif
8021+
8022+ "sete %1\n"
8023 : "+m" (l->a.counter), "=qm" (c)
8024 : : "memory");
8025 return c != 0;
8026@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
8027 {
8028 unsigned char c;
8029
8030- asm volatile(_ASM_ADD "%2,%0; sets %1"
8031+ asm volatile(_ASM_ADD "%2,%0\n"
8032+
8033+#ifdef CONFIG_PAX_REFCOUNT
8034+ "jno 0f\n"
8035+ _ASM_SUB "%2,%0\n"
8036+ "int $4\n0:\n"
8037+ _ASM_EXTABLE(0b, 0b)
8038+#endif
8039+
8040+ "sets %1\n"
8041 : "+m" (l->a.counter), "=qm" (c)
8042 : "ir" (i) : "memory");
8043 return c;
8044@@ -133,7 +201,15 @@ static inline long local_add_return(long
8045 #endif
8046 /* Modern 486+ processor */
8047 __i = i;
8048- asm volatile(_ASM_XADD "%0, %1;"
8049+ asm volatile(_ASM_XADD "%0, %1\n"
8050+
8051+#ifdef CONFIG_PAX_REFCOUNT
8052+ "jno 0f\n"
8053+ _ASM_MOV "%0,%1\n"
8054+ "int $4\n0:\n"
8055+ _ASM_EXTABLE(0b, 0b)
8056+#endif
8057+
8058 : "+r" (i), "+m" (l->a.counter)
8059 : : "memory");
8060 return i + __i;
8061diff -urNp linux-3.1.4/arch/x86/include/asm/mman.h linux-3.1.4/arch/x86/include/asm/mman.h
8062--- linux-3.1.4/arch/x86/include/asm/mman.h 2011-11-11 15:19:27.000000000 -0500
8063+++ linux-3.1.4/arch/x86/include/asm/mman.h 2011-11-16 18:39:07.000000000 -0500
8064@@ -5,4 +5,14 @@
8065
8066 #include <asm-generic/mman.h>
8067
8068+#ifdef __KERNEL__
8069+#ifndef __ASSEMBLY__
8070+#ifdef CONFIG_X86_32
8071+#define arch_mmap_check i386_mmap_check
8072+int i386_mmap_check(unsigned long addr, unsigned long len,
8073+ unsigned long flags);
8074+#endif
8075+#endif
8076+#endif
8077+
8078 #endif /* _ASM_X86_MMAN_H */
8079diff -urNp linux-3.1.4/arch/x86/include/asm/mmu_context.h linux-3.1.4/arch/x86/include/asm/mmu_context.h
8080--- linux-3.1.4/arch/x86/include/asm/mmu_context.h 2011-11-11 15:19:27.000000000 -0500
8081+++ linux-3.1.4/arch/x86/include/asm/mmu_context.h 2011-11-16 18:39:07.000000000 -0500
8082@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
8083
8084 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8085 {
8086+
8087+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8088+ unsigned int i;
8089+ pgd_t *pgd;
8090+
8091+ pax_open_kernel();
8092+ pgd = get_cpu_pgd(smp_processor_id());
8093+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8094+ set_pgd_batched(pgd+i, native_make_pgd(0));
8095+ pax_close_kernel();
8096+#endif
8097+
8098 #ifdef CONFIG_SMP
8099 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8100 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8101@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
8102 struct task_struct *tsk)
8103 {
8104 unsigned cpu = smp_processor_id();
8105+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8106+ int tlbstate = TLBSTATE_OK;
8107+#endif
8108
8109 if (likely(prev != next)) {
8110 #ifdef CONFIG_SMP
8111+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8112+ tlbstate = percpu_read(cpu_tlbstate.state);
8113+#endif
8114 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8115 percpu_write(cpu_tlbstate.active_mm, next);
8116 #endif
8117 cpumask_set_cpu(cpu, mm_cpumask(next));
8118
8119 /* Re-load page tables */
8120+#ifdef CONFIG_PAX_PER_CPU_PGD
8121+ pax_open_kernel();
8122+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8123+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8124+ pax_close_kernel();
8125+ load_cr3(get_cpu_pgd(cpu));
8126+#else
8127 load_cr3(next->pgd);
8128+#endif
8129
8130 /* stop flush ipis for the previous mm */
8131 cpumask_clear_cpu(cpu, mm_cpumask(prev));
8132@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
8133 */
8134 if (unlikely(prev->context.ldt != next->context.ldt))
8135 load_LDT_nolock(&next->context);
8136- }
8137+
8138+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8139+ if (!(__supported_pte_mask & _PAGE_NX)) {
8140+ smp_mb__before_clear_bit();
8141+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
8142+ smp_mb__after_clear_bit();
8143+ cpu_set(cpu, next->context.cpu_user_cs_mask);
8144+ }
8145+#endif
8146+
8147+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8148+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
8149+ prev->context.user_cs_limit != next->context.user_cs_limit))
8150+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8151 #ifdef CONFIG_SMP
8152+ else if (unlikely(tlbstate != TLBSTATE_OK))
8153+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8154+#endif
8155+#endif
8156+
8157+ }
8158 else {
8159+
8160+#ifdef CONFIG_PAX_PER_CPU_PGD
8161+ pax_open_kernel();
8162+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8163+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8164+ pax_close_kernel();
8165+ load_cr3(get_cpu_pgd(cpu));
8166+#endif
8167+
8168+#ifdef CONFIG_SMP
8169 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8170 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
8171
8172@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
8173 * tlb flush IPI delivery. We must reload CR3
8174 * to make sure to use no freed page tables.
8175 */
8176+
8177+#ifndef CONFIG_PAX_PER_CPU_PGD
8178 load_cr3(next->pgd);
8179+#endif
8180+
8181 load_LDT_nolock(&next->context);
8182+
8183+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
8184+ if (!(__supported_pte_mask & _PAGE_NX))
8185+ cpu_set(cpu, next->context.cpu_user_cs_mask);
8186+#endif
8187+
8188+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8189+#ifdef CONFIG_PAX_PAGEEXEC
8190+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
8191+#endif
8192+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8193+#endif
8194+
8195 }
8196- }
8197 #endif
8198+ }
8199 }
8200
8201 #define activate_mm(prev, next) \
8202diff -urNp linux-3.1.4/arch/x86/include/asm/mmu.h linux-3.1.4/arch/x86/include/asm/mmu.h
8203--- linux-3.1.4/arch/x86/include/asm/mmu.h 2011-11-11 15:19:27.000000000 -0500
8204+++ linux-3.1.4/arch/x86/include/asm/mmu.h 2011-11-16 18:39:07.000000000 -0500
8205@@ -9,7 +9,7 @@
8206 * we put the segment information here.
8207 */
8208 typedef struct {
8209- void *ldt;
8210+ struct desc_struct *ldt;
8211 int size;
8212
8213 #ifdef CONFIG_X86_64
8214@@ -18,7 +18,19 @@ typedef struct {
8215 #endif
8216
8217 struct mutex lock;
8218- void *vdso;
8219+ unsigned long vdso;
8220+
8221+#ifdef CONFIG_X86_32
8222+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
8223+ unsigned long user_cs_base;
8224+ unsigned long user_cs_limit;
8225+
8226+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8227+ cpumask_t cpu_user_cs_mask;
8228+#endif
8229+
8230+#endif
8231+#endif
8232 } mm_context_t;
8233
8234 #ifdef CONFIG_SMP
8235diff -urNp linux-3.1.4/arch/x86/include/asm/module.h linux-3.1.4/arch/x86/include/asm/module.h
8236--- linux-3.1.4/arch/x86/include/asm/module.h 2011-11-11 15:19:27.000000000 -0500
8237+++ linux-3.1.4/arch/x86/include/asm/module.h 2011-12-02 17:38:47.000000000 -0500
8238@@ -5,6 +5,7 @@
8239
8240 #ifdef CONFIG_X86_64
8241 /* X86_64 does not define MODULE_PROC_FAMILY */
8242+#define MODULE_PROC_FAMILY ""
8243 #elif defined CONFIG_M386
8244 #define MODULE_PROC_FAMILY "386 "
8245 #elif defined CONFIG_M486
8246@@ -59,8 +60,20 @@
8247 #error unknown processor family
8248 #endif
8249
8250-#ifdef CONFIG_X86_32
8251-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
8252+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8253+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
8254+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
8255+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
8256+#else
8257+#define MODULE_PAX_KERNEXEC ""
8258 #endif
8259
8260+#ifdef CONFIG_PAX_MEMORY_UDEREF
8261+#define MODULE_PAX_UDEREF "UDEREF "
8262+#else
8263+#define MODULE_PAX_UDEREF ""
8264+#endif
8265+
8266+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
8267+
8268 #endif /* _ASM_X86_MODULE_H */
8269diff -urNp linux-3.1.4/arch/x86/include/asm/page_64_types.h linux-3.1.4/arch/x86/include/asm/page_64_types.h
8270--- linux-3.1.4/arch/x86/include/asm/page_64_types.h 2011-11-11 15:19:27.000000000 -0500
8271+++ linux-3.1.4/arch/x86/include/asm/page_64_types.h 2011-11-16 18:39:07.000000000 -0500
8272@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
8273
8274 /* duplicated to the one in bootmem.h */
8275 extern unsigned long max_pfn;
8276-extern unsigned long phys_base;
8277+extern const unsigned long phys_base;
8278
8279 extern unsigned long __phys_addr(unsigned long);
8280 #define __phys_reloc_hide(x) (x)
8281diff -urNp linux-3.1.4/arch/x86/include/asm/paravirt.h linux-3.1.4/arch/x86/include/asm/paravirt.h
8282--- linux-3.1.4/arch/x86/include/asm/paravirt.h 2011-11-11 15:19:27.000000000 -0500
8283+++ linux-3.1.4/arch/x86/include/asm/paravirt.h 2011-11-16 18:39:07.000000000 -0500
8284@@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp,
8285 val);
8286 }
8287
8288+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8289+{
8290+ pgdval_t val = native_pgd_val(pgd);
8291+
8292+ if (sizeof(pgdval_t) > sizeof(long))
8293+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
8294+ val, (u64)val >> 32);
8295+ else
8296+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
8297+ val);
8298+}
8299+
8300 static inline void pgd_clear(pgd_t *pgdp)
8301 {
8302 set_pgd(pgdp, __pgd(0));
8303@@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned
8304 pv_mmu_ops.set_fixmap(idx, phys, flags);
8305 }
8306
8307+#ifdef CONFIG_PAX_KERNEXEC
8308+static inline unsigned long pax_open_kernel(void)
8309+{
8310+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
8311+}
8312+
8313+static inline unsigned long pax_close_kernel(void)
8314+{
8315+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
8316+}
8317+#else
8318+static inline unsigned long pax_open_kernel(void) { return 0; }
8319+static inline unsigned long pax_close_kernel(void) { return 0; }
8320+#endif
8321+
8322 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
8323
8324 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
8325@@ -964,7 +991,7 @@ extern void default_banner(void);
8326
8327 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
8328 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
8329-#define PARA_INDIRECT(addr) *%cs:addr
8330+#define PARA_INDIRECT(addr) *%ss:addr
8331 #endif
8332
8333 #define INTERRUPT_RETURN \
8334@@ -1041,6 +1068,21 @@ extern void default_banner(void);
8335 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
8336 CLBR_NONE, \
8337 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
8338+
8339+#define GET_CR0_INTO_RDI \
8340+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
8341+ mov %rax,%rdi
8342+
8343+#define SET_RDI_INTO_CR0 \
8344+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
8345+
8346+#define GET_CR3_INTO_RDI \
8347+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
8348+ mov %rax,%rdi
8349+
8350+#define SET_RDI_INTO_CR3 \
8351+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
8352+
8353 #endif /* CONFIG_X86_32 */
8354
8355 #endif /* __ASSEMBLY__ */
8356diff -urNp linux-3.1.4/arch/x86/include/asm/paravirt_types.h linux-3.1.4/arch/x86/include/asm/paravirt_types.h
8357--- linux-3.1.4/arch/x86/include/asm/paravirt_types.h 2011-11-11 15:19:27.000000000 -0500
8358+++ linux-3.1.4/arch/x86/include/asm/paravirt_types.h 2011-11-16 18:39:07.000000000 -0500
8359@@ -84,20 +84,20 @@ struct pv_init_ops {
8360 */
8361 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
8362 unsigned long addr, unsigned len);
8363-};
8364+} __no_const;
8365
8366
8367 struct pv_lazy_ops {
8368 /* Set deferred update mode, used for batching operations. */
8369 void (*enter)(void);
8370 void (*leave)(void);
8371-};
8372+} __no_const;
8373
8374 struct pv_time_ops {
8375 unsigned long long (*sched_clock)(void);
8376 unsigned long long (*steal_clock)(int cpu);
8377 unsigned long (*get_tsc_khz)(void);
8378-};
8379+} __no_const;
8380
8381 struct pv_cpu_ops {
8382 /* hooks for various privileged instructions */
8383@@ -193,7 +193,7 @@ struct pv_cpu_ops {
8384
8385 void (*start_context_switch)(struct task_struct *prev);
8386 void (*end_context_switch)(struct task_struct *next);
8387-};
8388+} __no_const;
8389
8390 struct pv_irq_ops {
8391 /*
8392@@ -224,7 +224,7 @@ struct pv_apic_ops {
8393 unsigned long start_eip,
8394 unsigned long start_esp);
8395 #endif
8396-};
8397+} __no_const;
8398
8399 struct pv_mmu_ops {
8400 unsigned long (*read_cr2)(void);
8401@@ -313,6 +313,7 @@ struct pv_mmu_ops {
8402 struct paravirt_callee_save make_pud;
8403
8404 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
8405+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
8406 #endif /* PAGETABLE_LEVELS == 4 */
8407 #endif /* PAGETABLE_LEVELS >= 3 */
8408
8409@@ -324,6 +325,12 @@ struct pv_mmu_ops {
8410 an mfn. We can tell which is which from the index. */
8411 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
8412 phys_addr_t phys, pgprot_t flags);
8413+
8414+#ifdef CONFIG_PAX_KERNEXEC
8415+ unsigned long (*pax_open_kernel)(void);
8416+ unsigned long (*pax_close_kernel)(void);
8417+#endif
8418+
8419 };
8420
8421 struct arch_spinlock;
8422@@ -334,7 +341,7 @@ struct pv_lock_ops {
8423 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
8424 int (*spin_trylock)(struct arch_spinlock *lock);
8425 void (*spin_unlock)(struct arch_spinlock *lock);
8426-};
8427+} __no_const;
8428
8429 /* This contains all the paravirt structures: we get a convenient
8430 * number for each function using the offset which we use to indicate
8431diff -urNp linux-3.1.4/arch/x86/include/asm/pgalloc.h linux-3.1.4/arch/x86/include/asm/pgalloc.h
8432--- linux-3.1.4/arch/x86/include/asm/pgalloc.h 2011-11-11 15:19:27.000000000 -0500
8433+++ linux-3.1.4/arch/x86/include/asm/pgalloc.h 2011-11-16 18:39:07.000000000 -0500
8434@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
8435 pmd_t *pmd, pte_t *pte)
8436 {
8437 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8438+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
8439+}
8440+
8441+static inline void pmd_populate_user(struct mm_struct *mm,
8442+ pmd_t *pmd, pte_t *pte)
8443+{
8444+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8445 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
8446 }
8447
8448diff -urNp linux-3.1.4/arch/x86/include/asm/pgtable-2level.h linux-3.1.4/arch/x86/include/asm/pgtable-2level.h
8449--- linux-3.1.4/arch/x86/include/asm/pgtable-2level.h 2011-11-11 15:19:27.000000000 -0500
8450+++ linux-3.1.4/arch/x86/include/asm/pgtable-2level.h 2011-11-16 18:39:07.000000000 -0500
8451@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
8452
8453 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8454 {
8455+ pax_open_kernel();
8456 *pmdp = pmd;
8457+ pax_close_kernel();
8458 }
8459
8460 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
8461diff -urNp linux-3.1.4/arch/x86/include/asm/pgtable_32.h linux-3.1.4/arch/x86/include/asm/pgtable_32.h
8462--- linux-3.1.4/arch/x86/include/asm/pgtable_32.h 2011-11-11 15:19:27.000000000 -0500
8463+++ linux-3.1.4/arch/x86/include/asm/pgtable_32.h 2011-11-16 18:39:07.000000000 -0500
8464@@ -25,9 +25,6 @@
8465 struct mm_struct;
8466 struct vm_area_struct;
8467
8468-extern pgd_t swapper_pg_dir[1024];
8469-extern pgd_t initial_page_table[1024];
8470-
8471 static inline void pgtable_cache_init(void) { }
8472 static inline void check_pgt_cache(void) { }
8473 void paging_init(void);
8474@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
8475 # include <asm/pgtable-2level.h>
8476 #endif
8477
8478+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
8479+extern pgd_t initial_page_table[PTRS_PER_PGD];
8480+#ifdef CONFIG_X86_PAE
8481+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
8482+#endif
8483+
8484 #if defined(CONFIG_HIGHPTE)
8485 #define pte_offset_map(dir, address) \
8486 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
8487@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
8488 /* Clear a kernel PTE and flush it from the TLB */
8489 #define kpte_clear_flush(ptep, vaddr) \
8490 do { \
8491+ pax_open_kernel(); \
8492 pte_clear(&init_mm, (vaddr), (ptep)); \
8493+ pax_close_kernel(); \
8494 __flush_tlb_one((vaddr)); \
8495 } while (0)
8496
8497@@ -74,6 +79,9 @@ do { \
8498
8499 #endif /* !__ASSEMBLY__ */
8500
8501+#define HAVE_ARCH_UNMAPPED_AREA
8502+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
8503+
8504 /*
8505 * kern_addr_valid() is (1) for FLATMEM and (0) for
8506 * SPARSEMEM and DISCONTIGMEM
8507diff -urNp linux-3.1.4/arch/x86/include/asm/pgtable_32_types.h linux-3.1.4/arch/x86/include/asm/pgtable_32_types.h
8508--- linux-3.1.4/arch/x86/include/asm/pgtable_32_types.h 2011-11-11 15:19:27.000000000 -0500
8509+++ linux-3.1.4/arch/x86/include/asm/pgtable_32_types.h 2011-11-16 18:39:07.000000000 -0500
8510@@ -8,7 +8,7 @@
8511 */
8512 #ifdef CONFIG_X86_PAE
8513 # include <asm/pgtable-3level_types.h>
8514-# define PMD_SIZE (1UL << PMD_SHIFT)
8515+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
8516 # define PMD_MASK (~(PMD_SIZE - 1))
8517 #else
8518 # include <asm/pgtable-2level_types.h>
8519@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
8520 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
8521 #endif
8522
8523+#ifdef CONFIG_PAX_KERNEXEC
8524+#ifndef __ASSEMBLY__
8525+extern unsigned char MODULES_EXEC_VADDR[];
8526+extern unsigned char MODULES_EXEC_END[];
8527+#endif
8528+#include <asm/boot.h>
8529+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
8530+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
8531+#else
8532+#define ktla_ktva(addr) (addr)
8533+#define ktva_ktla(addr) (addr)
8534+#endif
8535+
8536 #define MODULES_VADDR VMALLOC_START
8537 #define MODULES_END VMALLOC_END
8538 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8539diff -urNp linux-3.1.4/arch/x86/include/asm/pgtable-3level.h linux-3.1.4/arch/x86/include/asm/pgtable-3level.h
8540--- linux-3.1.4/arch/x86/include/asm/pgtable-3level.h 2011-11-11 15:19:27.000000000 -0500
8541+++ linux-3.1.4/arch/x86/include/asm/pgtable-3level.h 2011-11-16 18:39:07.000000000 -0500
8542@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8543
8544 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8545 {
8546+ pax_open_kernel();
8547 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8548+ pax_close_kernel();
8549 }
8550
8551 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8552 {
8553+ pax_open_kernel();
8554 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8555+ pax_close_kernel();
8556 }
8557
8558 /*
8559diff -urNp linux-3.1.4/arch/x86/include/asm/pgtable_64.h linux-3.1.4/arch/x86/include/asm/pgtable_64.h
8560--- linux-3.1.4/arch/x86/include/asm/pgtable_64.h 2011-11-11 15:19:27.000000000 -0500
8561+++ linux-3.1.4/arch/x86/include/asm/pgtable_64.h 2011-11-16 18:39:07.000000000 -0500
8562@@ -16,10 +16,13 @@
8563
8564 extern pud_t level3_kernel_pgt[512];
8565 extern pud_t level3_ident_pgt[512];
8566+extern pud_t level3_vmalloc_pgt[512];
8567+extern pud_t level3_vmemmap_pgt[512];
8568+extern pud_t level2_vmemmap_pgt[512];
8569 extern pmd_t level2_kernel_pgt[512];
8570 extern pmd_t level2_fixmap_pgt[512];
8571-extern pmd_t level2_ident_pgt[512];
8572-extern pgd_t init_level4_pgt[];
8573+extern pmd_t level2_ident_pgt[512*2];
8574+extern pgd_t init_level4_pgt[512];
8575
8576 #define swapper_pg_dir init_level4_pgt
8577
8578@@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8579
8580 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8581 {
8582+ pax_open_kernel();
8583 *pmdp = pmd;
8584+ pax_close_kernel();
8585 }
8586
8587 static inline void native_pmd_clear(pmd_t *pmd)
8588@@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_
8589
8590 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8591 {
8592+ pax_open_kernel();
8593+ *pgdp = pgd;
8594+ pax_close_kernel();
8595+}
8596+
8597+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8598+{
8599 *pgdp = pgd;
8600 }
8601
8602diff -urNp linux-3.1.4/arch/x86/include/asm/pgtable_64_types.h linux-3.1.4/arch/x86/include/asm/pgtable_64_types.h
8603--- linux-3.1.4/arch/x86/include/asm/pgtable_64_types.h 2011-11-11 15:19:27.000000000 -0500
8604+++ linux-3.1.4/arch/x86/include/asm/pgtable_64_types.h 2011-11-16 18:39:07.000000000 -0500
8605@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8606 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8607 #define MODULES_END _AC(0xffffffffff000000, UL)
8608 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8609+#define MODULES_EXEC_VADDR MODULES_VADDR
8610+#define MODULES_EXEC_END MODULES_END
8611+
8612+#define ktla_ktva(addr) (addr)
8613+#define ktva_ktla(addr) (addr)
8614
8615 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8616diff -urNp linux-3.1.4/arch/x86/include/asm/pgtable.h linux-3.1.4/arch/x86/include/asm/pgtable.h
8617--- linux-3.1.4/arch/x86/include/asm/pgtable.h 2011-11-11 15:19:27.000000000 -0500
8618+++ linux-3.1.4/arch/x86/include/asm/pgtable.h 2011-11-16 18:39:07.000000000 -0500
8619@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm
8620
8621 #ifndef __PAGETABLE_PUD_FOLDED
8622 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
8623+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
8624 #define pgd_clear(pgd) native_pgd_clear(pgd)
8625 #endif
8626
8627@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm
8628
8629 #define arch_end_context_switch(prev) do {} while(0)
8630
8631+#define pax_open_kernel() native_pax_open_kernel()
8632+#define pax_close_kernel() native_pax_close_kernel()
8633 #endif /* CONFIG_PARAVIRT */
8634
8635+#define __HAVE_ARCH_PAX_OPEN_KERNEL
8636+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8637+
8638+#ifdef CONFIG_PAX_KERNEXEC
8639+static inline unsigned long native_pax_open_kernel(void)
8640+{
8641+ unsigned long cr0;
8642+
8643+ preempt_disable();
8644+ barrier();
8645+ cr0 = read_cr0() ^ X86_CR0_WP;
8646+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
8647+ write_cr0(cr0);
8648+ return cr0 ^ X86_CR0_WP;
8649+}
8650+
8651+static inline unsigned long native_pax_close_kernel(void)
8652+{
8653+ unsigned long cr0;
8654+
8655+ cr0 = read_cr0() ^ X86_CR0_WP;
8656+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8657+ write_cr0(cr0);
8658+ barrier();
8659+ preempt_enable_no_resched();
8660+ return cr0 ^ X86_CR0_WP;
8661+}
8662+#else
8663+static inline unsigned long native_pax_open_kernel(void) { return 0; }
8664+static inline unsigned long native_pax_close_kernel(void) { return 0; }
8665+#endif
8666+
8667 /*
8668 * The following only work if pte_present() is true.
8669 * Undefined behaviour if not..
8670 */
8671+static inline int pte_user(pte_t pte)
8672+{
8673+ return pte_val(pte) & _PAGE_USER;
8674+}
8675+
8676 static inline int pte_dirty(pte_t pte)
8677 {
8678 return pte_flags(pte) & _PAGE_DIRTY;
8679@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t
8680 return pte_clear_flags(pte, _PAGE_RW);
8681 }
8682
8683+static inline pte_t pte_mkread(pte_t pte)
8684+{
8685+ return __pte(pte_val(pte) | _PAGE_USER);
8686+}
8687+
8688 static inline pte_t pte_mkexec(pte_t pte)
8689 {
8690- return pte_clear_flags(pte, _PAGE_NX);
8691+#ifdef CONFIG_X86_PAE
8692+ if (__supported_pte_mask & _PAGE_NX)
8693+ return pte_clear_flags(pte, _PAGE_NX);
8694+ else
8695+#endif
8696+ return pte_set_flags(pte, _PAGE_USER);
8697+}
8698+
8699+static inline pte_t pte_exprotect(pte_t pte)
8700+{
8701+#ifdef CONFIG_X86_PAE
8702+ if (__supported_pte_mask & _PAGE_NX)
8703+ return pte_set_flags(pte, _PAGE_NX);
8704+ else
8705+#endif
8706+ return pte_clear_flags(pte, _PAGE_USER);
8707 }
8708
8709 static inline pte_t pte_mkdirty(pte_t pte)
8710@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long
8711 #endif
8712
8713 #ifndef __ASSEMBLY__
8714+
8715+#ifdef CONFIG_PAX_PER_CPU_PGD
8716+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8717+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8718+{
8719+ return cpu_pgd[cpu];
8720+}
8721+#endif
8722+
8723 #include <linux/mm_types.h>
8724
8725 static inline int pte_none(pte_t pte)
8726@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *p
8727
8728 static inline int pgd_bad(pgd_t pgd)
8729 {
8730- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8731+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8732 }
8733
8734 static inline int pgd_none(pgd_t pgd)
8735@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
8736 * pgd_offset() returns a (pgd_t *)
8737 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8738 */
8739-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8740+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8741+
8742+#ifdef CONFIG_PAX_PER_CPU_PGD
8743+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8744+#endif
8745+
8746 /*
8747 * a shortcut which implies the use of the kernel's pgd, instead
8748 * of a process's
8749@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
8750 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8751 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8752
8753+#ifdef CONFIG_X86_32
8754+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8755+#else
8756+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8757+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8758+
8759+#ifdef CONFIG_PAX_MEMORY_UDEREF
8760+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8761+#else
8762+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8763+#endif
8764+
8765+#endif
8766+
8767 #ifndef __ASSEMBLY__
8768
8769 extern int direct_gbpages;
8770@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(st
8771 * dst and src can be on the same page, but the range must not overlap,
8772 * and must not cross a page boundary.
8773 */
8774-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8775+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8776 {
8777- memcpy(dst, src, count * sizeof(pgd_t));
8778+ pax_open_kernel();
8779+ while (count--)
8780+ *dst++ = *src++;
8781+ pax_close_kernel();
8782 }
8783
8784+#ifdef CONFIG_PAX_PER_CPU_PGD
8785+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8786+#endif
8787+
8788+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8789+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8790+#else
8791+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8792+#endif
8793
8794 #include <asm-generic/pgtable.h>
8795 #endif /* __ASSEMBLY__ */
8796diff -urNp linux-3.1.4/arch/x86/include/asm/pgtable_types.h linux-3.1.4/arch/x86/include/asm/pgtable_types.h
8797--- linux-3.1.4/arch/x86/include/asm/pgtable_types.h 2011-11-11 15:19:27.000000000 -0500
8798+++ linux-3.1.4/arch/x86/include/asm/pgtable_types.h 2011-11-16 18:39:07.000000000 -0500
8799@@ -16,13 +16,12 @@
8800 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8801 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8802 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8803-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8804+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8805 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8806 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8807 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8808-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8809-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8810-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8811+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8812+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8813 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8814
8815 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8816@@ -40,7 +39,6 @@
8817 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8818 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8819 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8820-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8821 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8822 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8823 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8824@@ -57,8 +55,10 @@
8825
8826 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8827 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8828-#else
8829+#elif defined(CONFIG_KMEMCHECK)
8830 #define _PAGE_NX (_AT(pteval_t, 0))
8831+#else
8832+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8833 #endif
8834
8835 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8836@@ -96,6 +96,9 @@
8837 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8838 _PAGE_ACCESSED)
8839
8840+#define PAGE_READONLY_NOEXEC PAGE_READONLY
8841+#define PAGE_SHARED_NOEXEC PAGE_SHARED
8842+
8843 #define __PAGE_KERNEL_EXEC \
8844 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8845 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8846@@ -106,7 +109,7 @@
8847 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8848 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8849 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8850-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8851+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8852 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
8853 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
8854 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8855@@ -168,8 +171,8 @@
8856 * bits are combined, this will alow user to access the high address mapped
8857 * VDSO in the presence of CONFIG_COMPAT_VDSO
8858 */
8859-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8860-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8861+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8862+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8863 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8864 #endif
8865
8866@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8867 {
8868 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8869 }
8870+#endif
8871
8872+#if PAGETABLE_LEVELS == 3
8873+#include <asm-generic/pgtable-nopud.h>
8874+#endif
8875+
8876+#if PAGETABLE_LEVELS == 2
8877+#include <asm-generic/pgtable-nopmd.h>
8878+#endif
8879+
8880+#ifndef __ASSEMBLY__
8881 #if PAGETABLE_LEVELS > 3
8882 typedef struct { pudval_t pud; } pud_t;
8883
8884@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pu
8885 return pud.pud;
8886 }
8887 #else
8888-#include <asm-generic/pgtable-nopud.h>
8889-
8890 static inline pudval_t native_pud_val(pud_t pud)
8891 {
8892 return native_pgd_val(pud.pgd);
8893@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pm
8894 return pmd.pmd;
8895 }
8896 #else
8897-#include <asm-generic/pgtable-nopmd.h>
8898-
8899 static inline pmdval_t native_pmd_val(pmd_t pmd)
8900 {
8901 return native_pgd_val(pmd.pud.pgd);
8902@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
8903
8904 extern pteval_t __supported_pte_mask;
8905 extern void set_nx(void);
8906-extern int nx_enabled;
8907
8908 #define pgprot_writecombine pgprot_writecombine
8909 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8910diff -urNp linux-3.1.4/arch/x86/include/asm/processor.h linux-3.1.4/arch/x86/include/asm/processor.h
8911--- linux-3.1.4/arch/x86/include/asm/processor.h 2011-11-11 15:19:27.000000000 -0500
8912+++ linux-3.1.4/arch/x86/include/asm/processor.h 2011-11-16 18:39:07.000000000 -0500
8913@@ -266,7 +266,7 @@ struct tss_struct {
8914
8915 } ____cacheline_aligned;
8916
8917-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8918+extern struct tss_struct init_tss[NR_CPUS];
8919
8920 /*
8921 * Save the original ist values for checking stack pointers during debugging
8922@@ -858,11 +858,18 @@ static inline void spin_lock_prefetch(co
8923 */
8924 #define TASK_SIZE PAGE_OFFSET
8925 #define TASK_SIZE_MAX TASK_SIZE
8926+
8927+#ifdef CONFIG_PAX_SEGMEXEC
8928+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8929+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8930+#else
8931 #define STACK_TOP TASK_SIZE
8932-#define STACK_TOP_MAX STACK_TOP
8933+#endif
8934+
8935+#define STACK_TOP_MAX TASK_SIZE
8936
8937 #define INIT_THREAD { \
8938- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8939+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8940 .vm86_info = NULL, \
8941 .sysenter_cs = __KERNEL_CS, \
8942 .io_bitmap_ptr = NULL, \
8943@@ -876,7 +883,7 @@ static inline void spin_lock_prefetch(co
8944 */
8945 #define INIT_TSS { \
8946 .x86_tss = { \
8947- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8948+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8949 .ss0 = __KERNEL_DS, \
8950 .ss1 = __KERNEL_CS, \
8951 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8952@@ -887,11 +894,7 @@ static inline void spin_lock_prefetch(co
8953 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8954
8955 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8956-#define KSTK_TOP(info) \
8957-({ \
8958- unsigned long *__ptr = (unsigned long *)(info); \
8959- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8960-})
8961+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8962
8963 /*
8964 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8965@@ -906,7 +909,7 @@ extern unsigned long thread_saved_pc(str
8966 #define task_pt_regs(task) \
8967 ({ \
8968 struct pt_regs *__regs__; \
8969- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8970+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8971 __regs__ - 1; \
8972 })
8973
8974@@ -916,13 +919,13 @@ extern unsigned long thread_saved_pc(str
8975 /*
8976 * User space process size. 47bits minus one guard page.
8977 */
8978-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8979+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8980
8981 /* This decides where the kernel will search for a free chunk of vm
8982 * space during mmap's.
8983 */
8984 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8985- 0xc0000000 : 0xFFFFe000)
8986+ 0xc0000000 : 0xFFFFf000)
8987
8988 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8989 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8990@@ -933,11 +936,11 @@ extern unsigned long thread_saved_pc(str
8991 #define STACK_TOP_MAX TASK_SIZE_MAX
8992
8993 #define INIT_THREAD { \
8994- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8995+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8996 }
8997
8998 #define INIT_TSS { \
8999- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9000+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9001 }
9002
9003 /*
9004@@ -959,6 +962,10 @@ extern void start_thread(struct pt_regs
9005 */
9006 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9007
9008+#ifdef CONFIG_PAX_SEGMEXEC
9009+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9010+#endif
9011+
9012 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9013
9014 /* Get/set a process' ability to use the timestamp counter instruction */
9015diff -urNp linux-3.1.4/arch/x86/include/asm/ptrace.h linux-3.1.4/arch/x86/include/asm/ptrace.h
9016--- linux-3.1.4/arch/x86/include/asm/ptrace.h 2011-11-11 15:19:27.000000000 -0500
9017+++ linux-3.1.4/arch/x86/include/asm/ptrace.h 2011-11-16 18:39:07.000000000 -0500
9018@@ -156,28 +156,29 @@ static inline unsigned long regs_return_
9019 }
9020
9021 /*
9022- * user_mode_vm(regs) determines whether a register set came from user mode.
9023+ * user_mode(regs) determines whether a register set came from user mode.
9024 * This is true if V8086 mode was enabled OR if the register set was from
9025 * protected mode with RPL-3 CS value. This tricky test checks that with
9026 * one comparison. Many places in the kernel can bypass this full check
9027- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9028+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9029+ * be used.
9030 */
9031-static inline int user_mode(struct pt_regs *regs)
9032+static inline int user_mode_novm(struct pt_regs *regs)
9033 {
9034 #ifdef CONFIG_X86_32
9035 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9036 #else
9037- return !!(regs->cs & 3);
9038+ return !!(regs->cs & SEGMENT_RPL_MASK);
9039 #endif
9040 }
9041
9042-static inline int user_mode_vm(struct pt_regs *regs)
9043+static inline int user_mode(struct pt_regs *regs)
9044 {
9045 #ifdef CONFIG_X86_32
9046 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9047 USER_RPL;
9048 #else
9049- return user_mode(regs);
9050+ return user_mode_novm(regs);
9051 #endif
9052 }
9053
9054@@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_r
9055 #ifdef CONFIG_X86_64
9056 static inline bool user_64bit_mode(struct pt_regs *regs)
9057 {
9058+ unsigned long cs = regs->cs & 0xffff;
9059 #ifndef CONFIG_PARAVIRT
9060 /*
9061 * On non-paravirt systems, this is the only long mode CPL 3
9062 * selector. We do not allow long mode selectors in the LDT.
9063 */
9064- return regs->cs == __USER_CS;
9065+ return cs == __USER_CS;
9066 #else
9067 /* Headers are too twisted for this to go in paravirt.h. */
9068- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
9069+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
9070 #endif
9071 }
9072 #endif
9073diff -urNp linux-3.1.4/arch/x86/include/asm/reboot.h linux-3.1.4/arch/x86/include/asm/reboot.h
9074--- linux-3.1.4/arch/x86/include/asm/reboot.h 2011-11-11 15:19:27.000000000 -0500
9075+++ linux-3.1.4/arch/x86/include/asm/reboot.h 2011-11-16 18:39:07.000000000 -0500
9076@@ -6,19 +6,19 @@
9077 struct pt_regs;
9078
9079 struct machine_ops {
9080- void (*restart)(char *cmd);
9081- void (*halt)(void);
9082- void (*power_off)(void);
9083+ void (* __noreturn restart)(char *cmd);
9084+ void (* __noreturn halt)(void);
9085+ void (* __noreturn power_off)(void);
9086 void (*shutdown)(void);
9087 void (*crash_shutdown)(struct pt_regs *);
9088- void (*emergency_restart)(void);
9089-};
9090+ void (* __noreturn emergency_restart)(void);
9091+} __no_const;
9092
9093 extern struct machine_ops machine_ops;
9094
9095 void native_machine_crash_shutdown(struct pt_regs *regs);
9096 void native_machine_shutdown(void);
9097-void machine_real_restart(unsigned int type);
9098+void machine_real_restart(unsigned int type) __noreturn;
9099 /* These must match dispatch_table in reboot_32.S */
9100 #define MRR_BIOS 0
9101 #define MRR_APM 1
9102diff -urNp linux-3.1.4/arch/x86/include/asm/rwsem.h linux-3.1.4/arch/x86/include/asm/rwsem.h
9103--- linux-3.1.4/arch/x86/include/asm/rwsem.h 2011-11-11 15:19:27.000000000 -0500
9104+++ linux-3.1.4/arch/x86/include/asm/rwsem.h 2011-11-16 18:39:07.000000000 -0500
9105@@ -64,6 +64,14 @@ static inline void __down_read(struct rw
9106 {
9107 asm volatile("# beginning down_read\n\t"
9108 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9109+
9110+#ifdef CONFIG_PAX_REFCOUNT
9111+ "jno 0f\n"
9112+ LOCK_PREFIX _ASM_DEC "(%1)\n"
9113+ "int $4\n0:\n"
9114+ _ASM_EXTABLE(0b, 0b)
9115+#endif
9116+
9117 /* adds 0x00000001 */
9118 " jns 1f\n"
9119 " call call_rwsem_down_read_failed\n"
9120@@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
9121 "1:\n\t"
9122 " mov %1,%2\n\t"
9123 " add %3,%2\n\t"
9124+
9125+#ifdef CONFIG_PAX_REFCOUNT
9126+ "jno 0f\n"
9127+ "sub %3,%2\n"
9128+ "int $4\n0:\n"
9129+ _ASM_EXTABLE(0b, 0b)
9130+#endif
9131+
9132 " jle 2f\n\t"
9133 LOCK_PREFIX " cmpxchg %2,%0\n\t"
9134 " jnz 1b\n\t"
9135@@ -104,6 +120,14 @@ static inline void __down_write_nested(s
9136 long tmp;
9137 asm volatile("# beginning down_write\n\t"
9138 LOCK_PREFIX " xadd %1,(%2)\n\t"
9139+
9140+#ifdef CONFIG_PAX_REFCOUNT
9141+ "jno 0f\n"
9142+ "mov %1,(%2)\n"
9143+ "int $4\n0:\n"
9144+ _ASM_EXTABLE(0b, 0b)
9145+#endif
9146+
9147 /* adds 0xffff0001, returns the old value */
9148 " test %1,%1\n\t"
9149 /* was the count 0 before? */
9150@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
9151 long tmp;
9152 asm volatile("# beginning __up_read\n\t"
9153 LOCK_PREFIX " xadd %1,(%2)\n\t"
9154+
9155+#ifdef CONFIG_PAX_REFCOUNT
9156+ "jno 0f\n"
9157+ "mov %1,(%2)\n"
9158+ "int $4\n0:\n"
9159+ _ASM_EXTABLE(0b, 0b)
9160+#endif
9161+
9162 /* subtracts 1, returns the old value */
9163 " jns 1f\n\t"
9164 " call call_rwsem_wake\n" /* expects old value in %edx */
9165@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
9166 long tmp;
9167 asm volatile("# beginning __up_write\n\t"
9168 LOCK_PREFIX " xadd %1,(%2)\n\t"
9169+
9170+#ifdef CONFIG_PAX_REFCOUNT
9171+ "jno 0f\n"
9172+ "mov %1,(%2)\n"
9173+ "int $4\n0:\n"
9174+ _ASM_EXTABLE(0b, 0b)
9175+#endif
9176+
9177 /* subtracts 0xffff0001, returns the old value */
9178 " jns 1f\n\t"
9179 " call call_rwsem_wake\n" /* expects old value in %edx */
9180@@ -176,6 +216,14 @@ static inline void __downgrade_write(str
9181 {
9182 asm volatile("# beginning __downgrade_write\n\t"
9183 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
9184+
9185+#ifdef CONFIG_PAX_REFCOUNT
9186+ "jno 0f\n"
9187+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
9188+ "int $4\n0:\n"
9189+ _ASM_EXTABLE(0b, 0b)
9190+#endif
9191+
9192 /*
9193 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
9194 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
9195@@ -194,7 +242,15 @@ static inline void __downgrade_write(str
9196 */
9197 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
9198 {
9199- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
9200+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
9201+
9202+#ifdef CONFIG_PAX_REFCOUNT
9203+ "jno 0f\n"
9204+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
9205+ "int $4\n0:\n"
9206+ _ASM_EXTABLE(0b, 0b)
9207+#endif
9208+
9209 : "+m" (sem->count)
9210 : "er" (delta));
9211 }
9212@@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
9213 {
9214 long tmp = delta;
9215
9216- asm volatile(LOCK_PREFIX "xadd %0,%1"
9217+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
9218+
9219+#ifdef CONFIG_PAX_REFCOUNT
9220+ "jno 0f\n"
9221+ "mov %0,%1\n"
9222+ "int $4\n0:\n"
9223+ _ASM_EXTABLE(0b, 0b)
9224+#endif
9225+
9226 : "+r" (tmp), "+m" (sem->count)
9227 : : "memory");
9228
9229diff -urNp linux-3.1.4/arch/x86/include/asm/segment.h linux-3.1.4/arch/x86/include/asm/segment.h
9230--- linux-3.1.4/arch/x86/include/asm/segment.h 2011-11-11 15:19:27.000000000 -0500
9231+++ linux-3.1.4/arch/x86/include/asm/segment.h 2011-11-16 18:39:07.000000000 -0500
9232@@ -64,10 +64,15 @@
9233 * 26 - ESPFIX small SS
9234 * 27 - per-cpu [ offset to per-cpu data area ]
9235 * 28 - stack_canary-20 [ for stack protector ]
9236- * 29 - unused
9237- * 30 - unused
9238+ * 29 - PCI BIOS CS
9239+ * 30 - PCI BIOS DS
9240 * 31 - TSS for double fault handler
9241 */
9242+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
9243+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
9244+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
9245+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
9246+
9247 #define GDT_ENTRY_TLS_MIN 6
9248 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
9249
9250@@ -79,6 +84,8 @@
9251
9252 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
9253
9254+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
9255+
9256 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
9257
9258 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
9259@@ -104,6 +111,12 @@
9260 #define __KERNEL_STACK_CANARY 0
9261 #endif
9262
9263+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
9264+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
9265+
9266+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
9267+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
9268+
9269 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
9270
9271 /*
9272@@ -141,7 +154,7 @@
9273 */
9274
9275 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
9276-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
9277+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
9278
9279
9280 #else
9281@@ -165,6 +178,8 @@
9282 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
9283 #define __USER32_DS __USER_DS
9284
9285+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
9286+
9287 #define GDT_ENTRY_TSS 8 /* needs two entries */
9288 #define GDT_ENTRY_LDT 10 /* needs two entries */
9289 #define GDT_ENTRY_TLS_MIN 12
9290@@ -185,6 +200,7 @@
9291 #endif
9292
9293 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
9294+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
9295 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
9296 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
9297 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
9298diff -urNp linux-3.1.4/arch/x86/include/asm/smp.h linux-3.1.4/arch/x86/include/asm/smp.h
9299--- linux-3.1.4/arch/x86/include/asm/smp.h 2011-11-11 15:19:27.000000000 -0500
9300+++ linux-3.1.4/arch/x86/include/asm/smp.h 2011-11-16 18:39:07.000000000 -0500
9301@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
9302 /* cpus sharing the last level cache: */
9303 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
9304 DECLARE_PER_CPU(u16, cpu_llc_id);
9305-DECLARE_PER_CPU(int, cpu_number);
9306+DECLARE_PER_CPU(unsigned int, cpu_number);
9307
9308 static inline struct cpumask *cpu_sibling_mask(int cpu)
9309 {
9310@@ -77,7 +77,7 @@ struct smp_ops {
9311
9312 void (*send_call_func_ipi)(const struct cpumask *mask);
9313 void (*send_call_func_single_ipi)(int cpu);
9314-};
9315+} __no_const;
9316
9317 /* Globals due to paravirt */
9318 extern void set_cpu_sibling_map(int cpu);
9319@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
9320 extern int safe_smp_processor_id(void);
9321
9322 #elif defined(CONFIG_X86_64_SMP)
9323-#define raw_smp_processor_id() (percpu_read(cpu_number))
9324-
9325-#define stack_smp_processor_id() \
9326-({ \
9327- struct thread_info *ti; \
9328- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
9329- ti->cpu; \
9330-})
9331+#define raw_smp_processor_id() (percpu_read(cpu_number))
9332+#define stack_smp_processor_id() raw_smp_processor_id()
9333 #define safe_smp_processor_id() smp_processor_id()
9334
9335 #endif
9336diff -urNp linux-3.1.4/arch/x86/include/asm/spinlock.h linux-3.1.4/arch/x86/include/asm/spinlock.h
9337--- linux-3.1.4/arch/x86/include/asm/spinlock.h 2011-11-11 15:19:27.000000000 -0500
9338+++ linux-3.1.4/arch/x86/include/asm/spinlock.h 2011-11-16 18:39:07.000000000 -0500
9339@@ -248,6 +248,14 @@ static inline int arch_write_can_lock(ar
9340 static inline void arch_read_lock(arch_rwlock_t *rw)
9341 {
9342 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
9343+
9344+#ifdef CONFIG_PAX_REFCOUNT
9345+ "jno 0f\n"
9346+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
9347+ "int $4\n0:\n"
9348+ _ASM_EXTABLE(0b, 0b)
9349+#endif
9350+
9351 "jns 1f\n"
9352 "call __read_lock_failed\n\t"
9353 "1:\n"
9354@@ -257,6 +265,14 @@ static inline void arch_read_lock(arch_r
9355 static inline void arch_write_lock(arch_rwlock_t *rw)
9356 {
9357 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
9358+
9359+#ifdef CONFIG_PAX_REFCOUNT
9360+ "jno 0f\n"
9361+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
9362+ "int $4\n0:\n"
9363+ _ASM_EXTABLE(0b, 0b)
9364+#endif
9365+
9366 "jz 1f\n"
9367 "call __write_lock_failed\n\t"
9368 "1:\n"
9369@@ -286,13 +302,29 @@ static inline int arch_write_trylock(arc
9370
9371 static inline void arch_read_unlock(arch_rwlock_t *rw)
9372 {
9373- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
9374+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
9375+
9376+#ifdef CONFIG_PAX_REFCOUNT
9377+ "jno 0f\n"
9378+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
9379+ "int $4\n0:\n"
9380+ _ASM_EXTABLE(0b, 0b)
9381+#endif
9382+
9383 :"+m" (rw->lock) : : "memory");
9384 }
9385
9386 static inline void arch_write_unlock(arch_rwlock_t *rw)
9387 {
9388- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
9389+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
9390+
9391+#ifdef CONFIG_PAX_REFCOUNT
9392+ "jno 0f\n"
9393+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
9394+ "int $4\n0:\n"
9395+ _ASM_EXTABLE(0b, 0b)
9396+#endif
9397+
9398 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
9399 }
9400
9401diff -urNp linux-3.1.4/arch/x86/include/asm/stackprotector.h linux-3.1.4/arch/x86/include/asm/stackprotector.h
9402--- linux-3.1.4/arch/x86/include/asm/stackprotector.h 2011-11-11 15:19:27.000000000 -0500
9403+++ linux-3.1.4/arch/x86/include/asm/stackprotector.h 2011-11-16 18:39:07.000000000 -0500
9404@@ -48,7 +48,7 @@
9405 * head_32 for boot CPU and setup_per_cpu_areas() for others.
9406 */
9407 #define GDT_STACK_CANARY_INIT \
9408- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
9409+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
9410
9411 /*
9412 * Initialize the stackprotector canary value.
9413@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
9414
9415 static inline void load_stack_canary_segment(void)
9416 {
9417-#ifdef CONFIG_X86_32
9418+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
9419 asm volatile ("mov %0, %%gs" : : "r" (0));
9420 #endif
9421 }
9422diff -urNp linux-3.1.4/arch/x86/include/asm/stacktrace.h linux-3.1.4/arch/x86/include/asm/stacktrace.h
9423--- linux-3.1.4/arch/x86/include/asm/stacktrace.h 2011-11-11 15:19:27.000000000 -0500
9424+++ linux-3.1.4/arch/x86/include/asm/stacktrace.h 2011-11-16 18:39:07.000000000 -0500
9425@@ -11,28 +11,20 @@
9426
9427 extern int kstack_depth_to_print;
9428
9429-struct thread_info;
9430+struct task_struct;
9431 struct stacktrace_ops;
9432
9433-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
9434- unsigned long *stack,
9435- unsigned long bp,
9436- const struct stacktrace_ops *ops,
9437- void *data,
9438- unsigned long *end,
9439- int *graph);
9440-
9441-extern unsigned long
9442-print_context_stack(struct thread_info *tinfo,
9443- unsigned long *stack, unsigned long bp,
9444- const struct stacktrace_ops *ops, void *data,
9445- unsigned long *end, int *graph);
9446-
9447-extern unsigned long
9448-print_context_stack_bp(struct thread_info *tinfo,
9449- unsigned long *stack, unsigned long bp,
9450- const struct stacktrace_ops *ops, void *data,
9451- unsigned long *end, int *graph);
9452+typedef unsigned long walk_stack_t(struct task_struct *task,
9453+ void *stack_start,
9454+ unsigned long *stack,
9455+ unsigned long bp,
9456+ const struct stacktrace_ops *ops,
9457+ void *data,
9458+ unsigned long *end,
9459+ int *graph);
9460+
9461+extern walk_stack_t print_context_stack;
9462+extern walk_stack_t print_context_stack_bp;
9463
9464 /* Generic stack tracer with callbacks */
9465
9466@@ -40,7 +32,7 @@ struct stacktrace_ops {
9467 void (*address)(void *data, unsigned long address, int reliable);
9468 /* On negative return stop dumping */
9469 int (*stack)(void *data, char *name);
9470- walk_stack_t walk_stack;
9471+ walk_stack_t *walk_stack;
9472 };
9473
9474 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
9475diff -urNp linux-3.1.4/arch/x86/include/asm/sys_ia32.h linux-3.1.4/arch/x86/include/asm/sys_ia32.h
9476--- linux-3.1.4/arch/x86/include/asm/sys_ia32.h 2011-11-11 15:19:27.000000000 -0500
9477+++ linux-3.1.4/arch/x86/include/asm/sys_ia32.h 2011-11-16 18:39:07.000000000 -0500
9478@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int
9479 compat_sigset_t __user *, unsigned int);
9480 asmlinkage long sys32_alarm(unsigned int);
9481
9482-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
9483+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
9484 asmlinkage long sys32_sysfs(int, u32, u32);
9485
9486 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
9487diff -urNp linux-3.1.4/arch/x86/include/asm/system.h linux-3.1.4/arch/x86/include/asm/system.h
9488--- linux-3.1.4/arch/x86/include/asm/system.h 2011-11-11 15:19:27.000000000 -0500
9489+++ linux-3.1.4/arch/x86/include/asm/system.h 2011-11-16 18:39:07.000000000 -0500
9490@@ -129,7 +129,7 @@ do { \
9491 "call __switch_to\n\t" \
9492 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
9493 __switch_canary \
9494- "movq %P[thread_info](%%rsi),%%r8\n\t" \
9495+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
9496 "movq %%rax,%%rdi\n\t" \
9497 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
9498 "jnz ret_from_fork\n\t" \
9499@@ -140,7 +140,7 @@ do { \
9500 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
9501 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
9502 [_tif_fork] "i" (_TIF_FORK), \
9503- [thread_info] "i" (offsetof(struct task_struct, stack)), \
9504+ [thread_info] "m" (current_tinfo), \
9505 [current_task] "m" (current_task) \
9506 __switch_canary_iparam \
9507 : "memory", "cc" __EXTRA_CLOBBER)
9508@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
9509 {
9510 unsigned long __limit;
9511 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
9512- return __limit + 1;
9513+ return __limit;
9514 }
9515
9516 static inline void native_clts(void)
9517@@ -397,12 +397,12 @@ void enable_hlt(void);
9518
9519 void cpu_idle_wait(void);
9520
9521-extern unsigned long arch_align_stack(unsigned long sp);
9522+#define arch_align_stack(x) ((x) & ~0xfUL)
9523 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
9524
9525 void default_idle(void);
9526
9527-void stop_this_cpu(void *dummy);
9528+void stop_this_cpu(void *dummy) __noreturn;
9529
9530 /*
9531 * Force strict CPU ordering.
9532diff -urNp linux-3.1.4/arch/x86/include/asm/thread_info.h linux-3.1.4/arch/x86/include/asm/thread_info.h
9533--- linux-3.1.4/arch/x86/include/asm/thread_info.h 2011-11-11 15:19:27.000000000 -0500
9534+++ linux-3.1.4/arch/x86/include/asm/thread_info.h 2011-11-16 18:39:07.000000000 -0500
9535@@ -10,6 +10,7 @@
9536 #include <linux/compiler.h>
9537 #include <asm/page.h>
9538 #include <asm/types.h>
9539+#include <asm/percpu.h>
9540
9541 /*
9542 * low level task data that entry.S needs immediate access to
9543@@ -24,7 +25,6 @@ struct exec_domain;
9544 #include <linux/atomic.h>
9545
9546 struct thread_info {
9547- struct task_struct *task; /* main task structure */
9548 struct exec_domain *exec_domain; /* execution domain */
9549 __u32 flags; /* low level flags */
9550 __u32 status; /* thread synchronous flags */
9551@@ -34,18 +34,12 @@ struct thread_info {
9552 mm_segment_t addr_limit;
9553 struct restart_block restart_block;
9554 void __user *sysenter_return;
9555-#ifdef CONFIG_X86_32
9556- unsigned long previous_esp; /* ESP of the previous stack in
9557- case of nested (IRQ) stacks
9558- */
9559- __u8 supervisor_stack[0];
9560-#endif
9561+ unsigned long lowest_stack;
9562 int uaccess_err;
9563 };
9564
9565-#define INIT_THREAD_INFO(tsk) \
9566+#define INIT_THREAD_INFO \
9567 { \
9568- .task = &tsk, \
9569 .exec_domain = &default_exec_domain, \
9570 .flags = 0, \
9571 .cpu = 0, \
9572@@ -56,7 +50,7 @@ struct thread_info {
9573 }, \
9574 }
9575
9576-#define init_thread_info (init_thread_union.thread_info)
9577+#define init_thread_info (init_thread_union.stack)
9578 #define init_stack (init_thread_union.stack)
9579
9580 #else /* !__ASSEMBLY__ */
9581@@ -170,6 +164,23 @@ struct thread_info {
9582 ret; \
9583 })
9584
9585+#ifdef __ASSEMBLY__
9586+/* how to get the thread information struct from ASM */
9587+#define GET_THREAD_INFO(reg) \
9588+ mov PER_CPU_VAR(current_tinfo), reg
9589+
9590+/* use this one if reg already contains %esp */
9591+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9592+#else
9593+/* how to get the thread information struct from C */
9594+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9595+
9596+static __always_inline struct thread_info *current_thread_info(void)
9597+{
9598+ return percpu_read_stable(current_tinfo);
9599+}
9600+#endif
9601+
9602 #ifdef CONFIG_X86_32
9603
9604 #define STACK_WARN (THREAD_SIZE/8)
9605@@ -180,35 +191,13 @@ struct thread_info {
9606 */
9607 #ifndef __ASSEMBLY__
9608
9609-
9610 /* how to get the current stack pointer from C */
9611 register unsigned long current_stack_pointer asm("esp") __used;
9612
9613-/* how to get the thread information struct from C */
9614-static inline struct thread_info *current_thread_info(void)
9615-{
9616- return (struct thread_info *)
9617- (current_stack_pointer & ~(THREAD_SIZE - 1));
9618-}
9619-
9620-#else /* !__ASSEMBLY__ */
9621-
9622-/* how to get the thread information struct from ASM */
9623-#define GET_THREAD_INFO(reg) \
9624- movl $-THREAD_SIZE, reg; \
9625- andl %esp, reg
9626-
9627-/* use this one if reg already contains %esp */
9628-#define GET_THREAD_INFO_WITH_ESP(reg) \
9629- andl $-THREAD_SIZE, reg
9630-
9631 #endif
9632
9633 #else /* X86_32 */
9634
9635-#include <asm/percpu.h>
9636-#define KERNEL_STACK_OFFSET (5*8)
9637-
9638 /*
9639 * macros/functions for gaining access to the thread information structure
9640 * preempt_count needs to be 1 initially, until the scheduler is functional.
9641@@ -216,21 +205,8 @@ static inline struct thread_info *curren
9642 #ifndef __ASSEMBLY__
9643 DECLARE_PER_CPU(unsigned long, kernel_stack);
9644
9645-static inline struct thread_info *current_thread_info(void)
9646-{
9647- struct thread_info *ti;
9648- ti = (void *)(percpu_read_stable(kernel_stack) +
9649- KERNEL_STACK_OFFSET - THREAD_SIZE);
9650- return ti;
9651-}
9652-
9653-#else /* !__ASSEMBLY__ */
9654-
9655-/* how to get the thread information struct from ASM */
9656-#define GET_THREAD_INFO(reg) \
9657- movq PER_CPU_VAR(kernel_stack),reg ; \
9658- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9659-
9660+/* how to get the current stack pointer from C */
9661+register unsigned long current_stack_pointer asm("rsp") __used;
9662 #endif
9663
9664 #endif /* !X86_32 */
9665@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9666 extern void free_thread_info(struct thread_info *ti);
9667 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9668 #define arch_task_cache_init arch_task_cache_init
9669+
9670+#define __HAVE_THREAD_FUNCTIONS
9671+#define task_thread_info(task) (&(task)->tinfo)
9672+#define task_stack_page(task) ((task)->stack)
9673+#define setup_thread_stack(p, org) do {} while (0)
9674+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9675+
9676+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9677+extern struct task_struct *alloc_task_struct_node(int node);
9678+extern void free_task_struct(struct task_struct *);
9679+
9680 #endif
9681 #endif /* _ASM_X86_THREAD_INFO_H */
9682diff -urNp linux-3.1.4/arch/x86/include/asm/uaccess_32.h linux-3.1.4/arch/x86/include/asm/uaccess_32.h
9683--- linux-3.1.4/arch/x86/include/asm/uaccess_32.h 2011-11-11 15:19:27.000000000 -0500
9684+++ linux-3.1.4/arch/x86/include/asm/uaccess_32.h 2011-11-16 18:40:08.000000000 -0500
9685@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_u
9686 static __always_inline unsigned long __must_check
9687 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9688 {
9689+ pax_track_stack();
9690+
9691+ if ((long)n < 0)
9692+ return n;
9693+
9694 if (__builtin_constant_p(n)) {
9695 unsigned long ret;
9696
9697@@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to,
9698 return ret;
9699 }
9700 }
9701+ if (!__builtin_constant_p(n))
9702+ check_object_size(from, n, true);
9703 return __copy_to_user_ll(to, from, n);
9704 }
9705
9706@@ -82,12 +89,16 @@ static __always_inline unsigned long __m
9707 __copy_to_user(void __user *to, const void *from, unsigned long n)
9708 {
9709 might_fault();
9710+
9711 return __copy_to_user_inatomic(to, from, n);
9712 }
9713
9714 static __always_inline unsigned long
9715 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9716 {
9717+ if ((long)n < 0)
9718+ return n;
9719+
9720 /* Avoid zeroing the tail if the copy fails..
9721 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9722 * but as the zeroing behaviour is only significant when n is not
9723@@ -137,6 +148,12 @@ static __always_inline unsigned long
9724 __copy_from_user(void *to, const void __user *from, unsigned long n)
9725 {
9726 might_fault();
9727+
9728+ pax_track_stack();
9729+
9730+ if ((long)n < 0)
9731+ return n;
9732+
9733 if (__builtin_constant_p(n)) {
9734 unsigned long ret;
9735
9736@@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __
9737 return ret;
9738 }
9739 }
9740+ if (!__builtin_constant_p(n))
9741+ check_object_size(to, n, false);
9742 return __copy_from_user_ll(to, from, n);
9743 }
9744
9745@@ -159,6 +178,10 @@ static __always_inline unsigned long __c
9746 const void __user *from, unsigned long n)
9747 {
9748 might_fault();
9749+
9750+ if ((long)n < 0)
9751+ return n;
9752+
9753 if (__builtin_constant_p(n)) {
9754 unsigned long ret;
9755
9756@@ -181,15 +204,19 @@ static __always_inline unsigned long
9757 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9758 unsigned long n)
9759 {
9760- return __copy_from_user_ll_nocache_nozero(to, from, n);
9761-}
9762+ if ((long)n < 0)
9763+ return n;
9764
9765-unsigned long __must_check copy_to_user(void __user *to,
9766- const void *from, unsigned long n);
9767-unsigned long __must_check _copy_from_user(void *to,
9768- const void __user *from,
9769- unsigned long n);
9770+ return __copy_from_user_ll_nocache_nozero(to, from, n);
9771+}
9772
9773+extern void copy_to_user_overflow(void)
9774+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9775+ __compiletime_error("copy_to_user() buffer size is not provably correct")
9776+#else
9777+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
9778+#endif
9779+;
9780
9781 extern void copy_from_user_overflow(void)
9782 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9783@@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void
9784 #endif
9785 ;
9786
9787-static inline unsigned long __must_check copy_from_user(void *to,
9788- const void __user *from,
9789- unsigned long n)
9790+/**
9791+ * copy_to_user: - Copy a block of data into user space.
9792+ * @to: Destination address, in user space.
9793+ * @from: Source address, in kernel space.
9794+ * @n: Number of bytes to copy.
9795+ *
9796+ * Context: User context only. This function may sleep.
9797+ *
9798+ * Copy data from kernel space to user space.
9799+ *
9800+ * Returns number of bytes that could not be copied.
9801+ * On success, this will be zero.
9802+ */
9803+static inline unsigned long __must_check
9804+copy_to_user(void __user *to, const void *from, unsigned long n)
9805+{
9806+ int sz = __compiletime_object_size(from);
9807+
9808+ if (unlikely(sz != -1 && sz < n))
9809+ copy_to_user_overflow();
9810+ else if (access_ok(VERIFY_WRITE, to, n))
9811+ n = __copy_to_user(to, from, n);
9812+ return n;
9813+}
9814+
9815+/**
9816+ * copy_from_user: - Copy a block of data from user space.
9817+ * @to: Destination address, in kernel space.
9818+ * @from: Source address, in user space.
9819+ * @n: Number of bytes to copy.
9820+ *
9821+ * Context: User context only. This function may sleep.
9822+ *
9823+ * Copy data from user space to kernel space.
9824+ *
9825+ * Returns number of bytes that could not be copied.
9826+ * On success, this will be zero.
9827+ *
9828+ * If some data could not be copied, this function will pad the copied
9829+ * data to the requested size using zero bytes.
9830+ */
9831+static inline unsigned long __must_check
9832+copy_from_user(void *to, const void __user *from, unsigned long n)
9833 {
9834 int sz = __compiletime_object_size(to);
9835
9836- if (likely(sz == -1 || sz >= n))
9837- n = _copy_from_user(to, from, n);
9838- else
9839+ if (unlikely(sz != -1 && sz < n))
9840 copy_from_user_overflow();
9841-
9842+ else if (access_ok(VERIFY_READ, from, n))
9843+ n = __copy_from_user(to, from, n);
9844+ else if ((long)n > 0) {
9845+ if (!__builtin_constant_p(n))
9846+ check_object_size(to, n, false);
9847+ memset(to, 0, n);
9848+ }
9849 return n;
9850 }
9851
9852diff -urNp linux-3.1.4/arch/x86/include/asm/uaccess_64.h linux-3.1.4/arch/x86/include/asm/uaccess_64.h
9853--- linux-3.1.4/arch/x86/include/asm/uaccess_64.h 2011-11-11 15:19:27.000000000 -0500
9854+++ linux-3.1.4/arch/x86/include/asm/uaccess_64.h 2011-11-16 18:40:08.000000000 -0500
9855@@ -10,6 +10,9 @@
9856 #include <asm/alternative.h>
9857 #include <asm/cpufeature.h>
9858 #include <asm/page.h>
9859+#include <asm/pgtable.h>
9860+
9861+#define set_fs(x) (current_thread_info()->addr_limit = (x))
9862
9863 /*
9864 * Copy To/From Userspace
9865@@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *
9866 return ret;
9867 }
9868
9869-__must_check unsigned long
9870-_copy_to_user(void __user *to, const void *from, unsigned len);
9871-__must_check unsigned long
9872-_copy_from_user(void *to, const void __user *from, unsigned len);
9873+static __always_inline __must_check unsigned long
9874+__copy_to_user(void __user *to, const void *from, unsigned len);
9875+static __always_inline __must_check unsigned long
9876+__copy_from_user(void *to, const void __user *from, unsigned len);
9877 __must_check unsigned long
9878 copy_in_user(void __user *to, const void __user *from, unsigned len);
9879
9880 static inline unsigned long __must_check copy_from_user(void *to,
9881 const void __user *from,
9882- unsigned long n)
9883+ unsigned n)
9884 {
9885- int sz = __compiletime_object_size(to);
9886-
9887 might_fault();
9888- if (likely(sz == -1 || sz >= n))
9889- n = _copy_from_user(to, from, n);
9890-#ifdef CONFIG_DEBUG_VM
9891- else
9892- WARN(1, "Buffer overflow detected!\n");
9893-#endif
9894+
9895+ if (access_ok(VERIFY_READ, from, n))
9896+ n = __copy_from_user(to, from, n);
9897+ else if ((int)n > 0) {
9898+ if (!__builtin_constant_p(n))
9899+ check_object_size(to, n, false);
9900+ memset(to, 0, n);
9901+ }
9902 return n;
9903 }
9904
9905@@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const
9906 {
9907 might_fault();
9908
9909- return _copy_to_user(dst, src, size);
9910+ if (access_ok(VERIFY_WRITE, dst, size))
9911+ size = __copy_to_user(dst, src, size);
9912+ return size;
9913 }
9914
9915 static __always_inline __must_check
9916-int __copy_from_user(void *dst, const void __user *src, unsigned size)
9917+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9918 {
9919- int ret = 0;
9920+ int sz = __compiletime_object_size(dst);
9921+ unsigned ret = 0;
9922
9923 might_fault();
9924- if (!__builtin_constant_p(size))
9925- return copy_user_generic(dst, (__force void *)src, size);
9926+
9927+ pax_track_stack();
9928+
9929+ if ((int)size < 0)
9930+ return size;
9931+
9932+#ifdef CONFIG_PAX_MEMORY_UDEREF
9933+ if (!__access_ok(VERIFY_READ, src, size))
9934+ return size;
9935+#endif
9936+
9937+ if (unlikely(sz != -1 && sz < size)) {
9938+#ifdef CONFIG_DEBUG_VM
9939+ WARN(1, "Buffer overflow detected!\n");
9940+#endif
9941+ return size;
9942+ }
9943+
9944+ if (!__builtin_constant_p(size)) {
9945+ check_object_size(dst, size, false);
9946+
9947+#ifdef CONFIG_PAX_MEMORY_UDEREF
9948+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9949+ src += PAX_USER_SHADOW_BASE;
9950+#endif
9951+
9952+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
9953+ }
9954 switch (size) {
9955- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9956+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9957 ret, "b", "b", "=q", 1);
9958 return ret;
9959- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9960+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9961 ret, "w", "w", "=r", 2);
9962 return ret;
9963- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9964+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9965 ret, "l", "k", "=r", 4);
9966 return ret;
9967- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9968+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9969 ret, "q", "", "=r", 8);
9970 return ret;
9971 case 10:
9972- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9973+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9974 ret, "q", "", "=r", 10);
9975 if (unlikely(ret))
9976 return ret;
9977 __get_user_asm(*(u16 *)(8 + (char *)dst),
9978- (u16 __user *)(8 + (char __user *)src),
9979+ (const u16 __user *)(8 + (const char __user *)src),
9980 ret, "w", "w", "=r", 2);
9981 return ret;
9982 case 16:
9983- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9984+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9985 ret, "q", "", "=r", 16);
9986 if (unlikely(ret))
9987 return ret;
9988 __get_user_asm(*(u64 *)(8 + (char *)dst),
9989- (u64 __user *)(8 + (char __user *)src),
9990+ (const u64 __user *)(8 + (const char __user *)src),
9991 ret, "q", "", "=r", 8);
9992 return ret;
9993 default:
9994- return copy_user_generic(dst, (__force void *)src, size);
9995+
9996+#ifdef CONFIG_PAX_MEMORY_UDEREF
9997+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9998+ src += PAX_USER_SHADOW_BASE;
9999+#endif
10000+
10001+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
10002 }
10003 }
10004
10005 static __always_inline __must_check
10006-int __copy_to_user(void __user *dst, const void *src, unsigned size)
10007+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10008 {
10009- int ret = 0;
10010+ int sz = __compiletime_object_size(src);
10011+ unsigned ret = 0;
10012
10013 might_fault();
10014- if (!__builtin_constant_p(size))
10015- return copy_user_generic((__force void *)dst, src, size);
10016+
10017+ pax_track_stack();
10018+
10019+ if ((int)size < 0)
10020+ return size;
10021+
10022+#ifdef CONFIG_PAX_MEMORY_UDEREF
10023+ if (!__access_ok(VERIFY_WRITE, dst, size))
10024+ return size;
10025+#endif
10026+
10027+ if (unlikely(sz != -1 && sz < size)) {
10028+#ifdef CONFIG_DEBUG_VM
10029+ WARN(1, "Buffer overflow detected!\n");
10030+#endif
10031+ return size;
10032+ }
10033+
10034+ if (!__builtin_constant_p(size)) {
10035+ check_object_size(src, size, true);
10036+
10037+#ifdef CONFIG_PAX_MEMORY_UDEREF
10038+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10039+ dst += PAX_USER_SHADOW_BASE;
10040+#endif
10041+
10042+ return copy_user_generic((__force_kernel void *)dst, src, size);
10043+ }
10044 switch (size) {
10045- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10046+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10047 ret, "b", "b", "iq", 1);
10048 return ret;
10049- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10050+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10051 ret, "w", "w", "ir", 2);
10052 return ret;
10053- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
10054+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
10055 ret, "l", "k", "ir", 4);
10056 return ret;
10057- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
10058+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10059 ret, "q", "", "er", 8);
10060 return ret;
10061 case 10:
10062- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10063+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10064 ret, "q", "", "er", 10);
10065 if (unlikely(ret))
10066 return ret;
10067 asm("":::"memory");
10068- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
10069+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
10070 ret, "w", "w", "ir", 2);
10071 return ret;
10072 case 16:
10073- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10074+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10075 ret, "q", "", "er", 16);
10076 if (unlikely(ret))
10077 return ret;
10078 asm("":::"memory");
10079- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
10080+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
10081 ret, "q", "", "er", 8);
10082 return ret;
10083 default:
10084- return copy_user_generic((__force void *)dst, src, size);
10085+
10086+#ifdef CONFIG_PAX_MEMORY_UDEREF
10087+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10088+ dst += PAX_USER_SHADOW_BASE;
10089+#endif
10090+
10091+ return copy_user_generic((__force_kernel void *)dst, src, size);
10092 }
10093 }
10094
10095 static __always_inline __must_check
10096-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10097+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10098 {
10099- int ret = 0;
10100+ unsigned ret = 0;
10101
10102 might_fault();
10103- if (!__builtin_constant_p(size))
10104- return copy_user_generic((__force void *)dst,
10105- (__force void *)src, size);
10106+
10107+ if ((int)size < 0)
10108+ return size;
10109+
10110+#ifdef CONFIG_PAX_MEMORY_UDEREF
10111+ if (!__access_ok(VERIFY_READ, src, size))
10112+ return size;
10113+ if (!__access_ok(VERIFY_WRITE, dst, size))
10114+ return size;
10115+#endif
10116+
10117+ if (!__builtin_constant_p(size)) {
10118+
10119+#ifdef CONFIG_PAX_MEMORY_UDEREF
10120+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10121+ src += PAX_USER_SHADOW_BASE;
10122+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10123+ dst += PAX_USER_SHADOW_BASE;
10124+#endif
10125+
10126+ return copy_user_generic((__force_kernel void *)dst,
10127+ (__force_kernel const void *)src, size);
10128+ }
10129 switch (size) {
10130 case 1: {
10131 u8 tmp;
10132- __get_user_asm(tmp, (u8 __user *)src,
10133+ __get_user_asm(tmp, (const u8 __user *)src,
10134 ret, "b", "b", "=q", 1);
10135 if (likely(!ret))
10136 __put_user_asm(tmp, (u8 __user *)dst,
10137@@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, con
10138 }
10139 case 2: {
10140 u16 tmp;
10141- __get_user_asm(tmp, (u16 __user *)src,
10142+ __get_user_asm(tmp, (const u16 __user *)src,
10143 ret, "w", "w", "=r", 2);
10144 if (likely(!ret))
10145 __put_user_asm(tmp, (u16 __user *)dst,
10146@@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, con
10147
10148 case 4: {
10149 u32 tmp;
10150- __get_user_asm(tmp, (u32 __user *)src,
10151+ __get_user_asm(tmp, (const u32 __user *)src,
10152 ret, "l", "k", "=r", 4);
10153 if (likely(!ret))
10154 __put_user_asm(tmp, (u32 __user *)dst,
10155@@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, con
10156 }
10157 case 8: {
10158 u64 tmp;
10159- __get_user_asm(tmp, (u64 __user *)src,
10160+ __get_user_asm(tmp, (const u64 __user *)src,
10161 ret, "q", "", "=r", 8);
10162 if (likely(!ret))
10163 __put_user_asm(tmp, (u64 __user *)dst,
10164@@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, con
10165 return ret;
10166 }
10167 default:
10168- return copy_user_generic((__force void *)dst,
10169- (__force void *)src, size);
10170+
10171+#ifdef CONFIG_PAX_MEMORY_UDEREF
10172+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10173+ src += PAX_USER_SHADOW_BASE;
10174+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10175+ dst += PAX_USER_SHADOW_BASE;
10176+#endif
10177+
10178+ return copy_user_generic((__force_kernel void *)dst,
10179+ (__force_kernel const void *)src, size);
10180 }
10181 }
10182
10183@@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(
10184 static __must_check __always_inline int
10185 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
10186 {
10187- return copy_user_generic(dst, (__force const void *)src, size);
10188+ pax_track_stack();
10189+
10190+ if ((int)size < 0)
10191+ return size;
10192+
10193+#ifdef CONFIG_PAX_MEMORY_UDEREF
10194+ if (!__access_ok(VERIFY_READ, src, size))
10195+ return size;
10196+
10197+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10198+ src += PAX_USER_SHADOW_BASE;
10199+#endif
10200+
10201+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
10202 }
10203
10204-static __must_check __always_inline int
10205+static __must_check __always_inline unsigned long
10206 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
10207 {
10208- return copy_user_generic((__force void *)dst, src, size);
10209+ if ((int)size < 0)
10210+ return size;
10211+
10212+#ifdef CONFIG_PAX_MEMORY_UDEREF
10213+ if (!__access_ok(VERIFY_WRITE, dst, size))
10214+ return size;
10215+
10216+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10217+ dst += PAX_USER_SHADOW_BASE;
10218+#endif
10219+
10220+ return copy_user_generic((__force_kernel void *)dst, src, size);
10221 }
10222
10223-extern long __copy_user_nocache(void *dst, const void __user *src,
10224+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
10225 unsigned size, int zerorest);
10226
10227-static inline int
10228-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10229+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10230 {
10231 might_sleep();
10232+
10233+ if ((int)size < 0)
10234+ return size;
10235+
10236+#ifdef CONFIG_PAX_MEMORY_UDEREF
10237+ if (!__access_ok(VERIFY_READ, src, size))
10238+ return size;
10239+#endif
10240+
10241 return __copy_user_nocache(dst, src, size, 1);
10242 }
10243
10244-static inline int
10245-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10246+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10247 unsigned size)
10248 {
10249+ if ((int)size < 0)
10250+ return size;
10251+
10252+#ifdef CONFIG_PAX_MEMORY_UDEREF
10253+ if (!__access_ok(VERIFY_READ, src, size))
10254+ return size;
10255+#endif
10256+
10257 return __copy_user_nocache(dst, src, size, 0);
10258 }
10259
10260-unsigned long
10261-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
10262+extern unsigned long
10263+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest);
10264
10265 #endif /* _ASM_X86_UACCESS_64_H */
10266diff -urNp linux-3.1.4/arch/x86/include/asm/uaccess.h linux-3.1.4/arch/x86/include/asm/uaccess.h
10267--- linux-3.1.4/arch/x86/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
10268+++ linux-3.1.4/arch/x86/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
10269@@ -7,12 +7,15 @@
10270 #include <linux/compiler.h>
10271 #include <linux/thread_info.h>
10272 #include <linux/string.h>
10273+#include <linux/sched.h>
10274 #include <asm/asm.h>
10275 #include <asm/page.h>
10276
10277 #define VERIFY_READ 0
10278 #define VERIFY_WRITE 1
10279
10280+extern void check_object_size(const void *ptr, unsigned long n, bool to);
10281+
10282 /*
10283 * The fs value determines whether argument validity checking should be
10284 * performed or not. If get_fs() == USER_DS, checking is performed, with
10285@@ -28,7 +31,12 @@
10286
10287 #define get_ds() (KERNEL_DS)
10288 #define get_fs() (current_thread_info()->addr_limit)
10289+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10290+void __set_fs(mm_segment_t x);
10291+void set_fs(mm_segment_t x);
10292+#else
10293 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10294+#endif
10295
10296 #define segment_eq(a, b) ((a).seg == (b).seg)
10297
10298@@ -76,7 +84,33 @@
10299 * checks that the pointer is in the user space range - after calling
10300 * this function, memory access functions may still return -EFAULT.
10301 */
10302-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10303+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10304+#define access_ok(type, addr, size) \
10305+({ \
10306+ long __size = size; \
10307+ unsigned long __addr = (unsigned long)addr; \
10308+ unsigned long __addr_ao = __addr & PAGE_MASK; \
10309+ unsigned long __end_ao = __addr + __size - 1; \
10310+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10311+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10312+ while(__addr_ao <= __end_ao) { \
10313+ char __c_ao; \
10314+ __addr_ao += PAGE_SIZE; \
10315+ if (__size > PAGE_SIZE) \
10316+ cond_resched(); \
10317+ if (__get_user(__c_ao, (char __user *)__addr)) \
10318+ break; \
10319+ if (type != VERIFY_WRITE) { \
10320+ __addr = __addr_ao; \
10321+ continue; \
10322+ } \
10323+ if (__put_user(__c_ao, (char __user *)__addr)) \
10324+ break; \
10325+ __addr = __addr_ao; \
10326+ } \
10327+ } \
10328+ __ret_ao; \
10329+})
10330
10331 /*
10332 * The exception table consists of pairs of addresses: the first is the
10333@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
10334 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10335 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10336
10337-
10338+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10339+#define __copyuser_seg "gs;"
10340+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10341+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10342+#else
10343+#define __copyuser_seg
10344+#define __COPYUSER_SET_ES
10345+#define __COPYUSER_RESTORE_ES
10346+#endif
10347
10348 #ifdef CONFIG_X86_32
10349 #define __put_user_asm_u64(x, addr, err, errret) \
10350- asm volatile("1: movl %%eax,0(%2)\n" \
10351- "2: movl %%edx,4(%2)\n" \
10352+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10353+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10354 "3:\n" \
10355 ".section .fixup,\"ax\"\n" \
10356 "4: movl %3,%0\n" \
10357@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10358 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10359
10360 #define __put_user_asm_ex_u64(x, addr) \
10361- asm volatile("1: movl %%eax,0(%1)\n" \
10362- "2: movl %%edx,4(%1)\n" \
10363+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10364+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10365 "3:\n" \
10366 _ASM_EXTABLE(1b, 2b - 1b) \
10367 _ASM_EXTABLE(2b, 3b - 2b) \
10368@@ -252,7 +294,7 @@ extern void __put_user_8(void);
10369 __typeof__(*(ptr)) __pu_val; \
10370 __chk_user_ptr(ptr); \
10371 might_fault(); \
10372- __pu_val = x; \
10373+ __pu_val = (x); \
10374 switch (sizeof(*(ptr))) { \
10375 case 1: \
10376 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10377@@ -373,7 +415,7 @@ do { \
10378 } while (0)
10379
10380 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10381- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10382+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10383 "2:\n" \
10384 ".section .fixup,\"ax\"\n" \
10385 "3: mov %3,%0\n" \
10386@@ -381,7 +423,7 @@ do { \
10387 " jmp 2b\n" \
10388 ".previous\n" \
10389 _ASM_EXTABLE(1b, 3b) \
10390- : "=r" (err), ltype(x) \
10391+ : "=r" (err), ltype (x) \
10392 : "m" (__m(addr)), "i" (errret), "0" (err))
10393
10394 #define __get_user_size_ex(x, ptr, size) \
10395@@ -406,7 +448,7 @@ do { \
10396 } while (0)
10397
10398 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10399- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10400+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10401 "2:\n" \
10402 _ASM_EXTABLE(1b, 2b - 1b) \
10403 : ltype(x) : "m" (__m(addr)))
10404@@ -423,13 +465,24 @@ do { \
10405 int __gu_err; \
10406 unsigned long __gu_val; \
10407 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10408- (x) = (__force __typeof__(*(ptr)))__gu_val; \
10409+ (x) = (__typeof__(*(ptr)))__gu_val; \
10410 __gu_err; \
10411 })
10412
10413 /* FIXME: this hack is definitely wrong -AK */
10414 struct __large_struct { unsigned long buf[100]; };
10415-#define __m(x) (*(struct __large_struct __user *)(x))
10416+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10417+#define ____m(x) \
10418+({ \
10419+ unsigned long ____x = (unsigned long)(x); \
10420+ if (____x < PAX_USER_SHADOW_BASE) \
10421+ ____x += PAX_USER_SHADOW_BASE; \
10422+ (void __user *)____x; \
10423+})
10424+#else
10425+#define ____m(x) (x)
10426+#endif
10427+#define __m(x) (*(struct __large_struct __user *)____m(x))
10428
10429 /*
10430 * Tell gcc we read from memory instead of writing: this is because
10431@@ -437,7 +490,7 @@ struct __large_struct { unsigned long bu
10432 * aliasing issues.
10433 */
10434 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10435- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10436+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10437 "2:\n" \
10438 ".section .fixup,\"ax\"\n" \
10439 "3: mov %3,%0\n" \
10440@@ -445,10 +498,10 @@ struct __large_struct { unsigned long bu
10441 ".previous\n" \
10442 _ASM_EXTABLE(1b, 3b) \
10443 : "=r"(err) \
10444- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10445+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10446
10447 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10448- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10449+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10450 "2:\n" \
10451 _ASM_EXTABLE(1b, 2b - 1b) \
10452 : : ltype(x), "m" (__m(addr)))
10453@@ -487,8 +540,12 @@ struct __large_struct { unsigned long bu
10454 * On error, the variable @x is set to zero.
10455 */
10456
10457+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10458+#define __get_user(x, ptr) get_user((x), (ptr))
10459+#else
10460 #define __get_user(x, ptr) \
10461 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10462+#endif
10463
10464 /**
10465 * __put_user: - Write a simple value into user space, with less checking.
10466@@ -510,8 +567,12 @@ struct __large_struct { unsigned long bu
10467 * Returns zero on success, or -EFAULT on error.
10468 */
10469
10470+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10471+#define __put_user(x, ptr) put_user((x), (ptr))
10472+#else
10473 #define __put_user(x, ptr) \
10474 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10475+#endif
10476
10477 #define __get_user_unaligned __get_user
10478 #define __put_user_unaligned __put_user
10479@@ -529,7 +590,7 @@ struct __large_struct { unsigned long bu
10480 #define get_user_ex(x, ptr) do { \
10481 unsigned long __gue_val; \
10482 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10483- (x) = (__force __typeof__(*(ptr)))__gue_val; \
10484+ (x) = (__typeof__(*(ptr)))__gue_val; \
10485 } while (0)
10486
10487 #ifdef CONFIG_X86_WP_WORKS_OK
10488diff -urNp linux-3.1.4/arch/x86/include/asm/vdso.h linux-3.1.4/arch/x86/include/asm/vdso.h
10489--- linux-3.1.4/arch/x86/include/asm/vdso.h 2011-11-11 15:19:27.000000000 -0500
10490+++ linux-3.1.4/arch/x86/include/asm/vdso.h 2011-11-16 18:39:07.000000000 -0500
10491@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
10492 #define VDSO32_SYMBOL(base, name) \
10493 ({ \
10494 extern const char VDSO32_##name[]; \
10495- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
10496+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
10497 })
10498 #endif
10499
10500diff -urNp linux-3.1.4/arch/x86/include/asm/x86_init.h linux-3.1.4/arch/x86/include/asm/x86_init.h
10501--- linux-3.1.4/arch/x86/include/asm/x86_init.h 2011-11-11 15:19:27.000000000 -0500
10502+++ linux-3.1.4/arch/x86/include/asm/x86_init.h 2011-11-16 18:39:07.000000000 -0500
10503@@ -28,7 +28,7 @@ struct x86_init_mpparse {
10504 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
10505 void (*find_smp_config)(void);
10506 void (*get_smp_config)(unsigned int early);
10507-};
10508+} __no_const;
10509
10510 /**
10511 * struct x86_init_resources - platform specific resource related ops
10512@@ -42,7 +42,7 @@ struct x86_init_resources {
10513 void (*probe_roms)(void);
10514 void (*reserve_resources)(void);
10515 char *(*memory_setup)(void);
10516-};
10517+} __no_const;
10518
10519 /**
10520 * struct x86_init_irqs - platform specific interrupt setup
10521@@ -55,7 +55,7 @@ struct x86_init_irqs {
10522 void (*pre_vector_init)(void);
10523 void (*intr_init)(void);
10524 void (*trap_init)(void);
10525-};
10526+} __no_const;
10527
10528 /**
10529 * struct x86_init_oem - oem platform specific customizing functions
10530@@ -65,7 +65,7 @@ struct x86_init_irqs {
10531 struct x86_init_oem {
10532 void (*arch_setup)(void);
10533 void (*banner)(void);
10534-};
10535+} __no_const;
10536
10537 /**
10538 * struct x86_init_mapping - platform specific initial kernel pagetable setup
10539@@ -76,7 +76,7 @@ struct x86_init_oem {
10540 */
10541 struct x86_init_mapping {
10542 void (*pagetable_reserve)(u64 start, u64 end);
10543-};
10544+} __no_const;
10545
10546 /**
10547 * struct x86_init_paging - platform specific paging functions
10548@@ -86,7 +86,7 @@ struct x86_init_mapping {
10549 struct x86_init_paging {
10550 void (*pagetable_setup_start)(pgd_t *base);
10551 void (*pagetable_setup_done)(pgd_t *base);
10552-};
10553+} __no_const;
10554
10555 /**
10556 * struct x86_init_timers - platform specific timer setup
10557@@ -101,7 +101,7 @@ struct x86_init_timers {
10558 void (*tsc_pre_init)(void);
10559 void (*timer_init)(void);
10560 void (*wallclock_init)(void);
10561-};
10562+} __no_const;
10563
10564 /**
10565 * struct x86_init_iommu - platform specific iommu setup
10566@@ -109,7 +109,7 @@ struct x86_init_timers {
10567 */
10568 struct x86_init_iommu {
10569 int (*iommu_init)(void);
10570-};
10571+} __no_const;
10572
10573 /**
10574 * struct x86_init_pci - platform specific pci init functions
10575@@ -123,7 +123,7 @@ struct x86_init_pci {
10576 int (*init)(void);
10577 void (*init_irq)(void);
10578 void (*fixup_irqs)(void);
10579-};
10580+} __no_const;
10581
10582 /**
10583 * struct x86_init_ops - functions for platform specific setup
10584@@ -139,7 +139,7 @@ struct x86_init_ops {
10585 struct x86_init_timers timers;
10586 struct x86_init_iommu iommu;
10587 struct x86_init_pci pci;
10588-};
10589+} __no_const;
10590
10591 /**
10592 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10593@@ -147,7 +147,7 @@ struct x86_init_ops {
10594 */
10595 struct x86_cpuinit_ops {
10596 void (*setup_percpu_clockev)(void);
10597-};
10598+} __no_const;
10599
10600 /**
10601 * struct x86_platform_ops - platform specific runtime functions
10602@@ -166,7 +166,7 @@ struct x86_platform_ops {
10603 bool (*is_untracked_pat_range)(u64 start, u64 end);
10604 void (*nmi_init)(void);
10605 int (*i8042_detect)(void);
10606-};
10607+} __no_const;
10608
10609 struct pci_dev;
10610
10611@@ -174,7 +174,7 @@ struct x86_msi_ops {
10612 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10613 void (*teardown_msi_irq)(unsigned int irq);
10614 void (*teardown_msi_irqs)(struct pci_dev *dev);
10615-};
10616+} __no_const;
10617
10618 extern struct x86_init_ops x86_init;
10619 extern struct x86_cpuinit_ops x86_cpuinit;
10620diff -urNp linux-3.1.4/arch/x86/include/asm/xsave.h linux-3.1.4/arch/x86/include/asm/xsave.h
10621--- linux-3.1.4/arch/x86/include/asm/xsave.h 2011-11-11 15:19:27.000000000 -0500
10622+++ linux-3.1.4/arch/x86/include/asm/xsave.h 2011-11-16 18:39:07.000000000 -0500
10623@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10624 {
10625 int err;
10626
10627+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10628+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10629+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10630+#endif
10631+
10632 /*
10633 * Clear the xsave header first, so that reserved fields are
10634 * initialized to zero.
10635@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsav
10636 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
10637 {
10638 int err;
10639- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
10640+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
10641 u32 lmask = mask;
10642 u32 hmask = mask >> 32;
10643
10644+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10645+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10646+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10647+#endif
10648+
10649 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10650 "2:\n"
10651 ".section .fixup,\"ax\"\n"
10652diff -urNp linux-3.1.4/arch/x86/Kconfig linux-3.1.4/arch/x86/Kconfig
10653--- linux-3.1.4/arch/x86/Kconfig 2011-11-11 15:19:27.000000000 -0500
10654+++ linux-3.1.4/arch/x86/Kconfig 2011-11-16 18:40:08.000000000 -0500
10655@@ -236,7 +236,7 @@ config X86_HT
10656
10657 config X86_32_LAZY_GS
10658 def_bool y
10659- depends on X86_32 && !CC_STACKPROTECTOR
10660+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10661
10662 config ARCH_HWEIGHT_CFLAGS
10663 string
10664@@ -1019,7 +1019,7 @@ choice
10665
10666 config NOHIGHMEM
10667 bool "off"
10668- depends on !X86_NUMAQ
10669+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10670 ---help---
10671 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10672 However, the address space of 32-bit x86 processors is only 4
10673@@ -1056,7 +1056,7 @@ config NOHIGHMEM
10674
10675 config HIGHMEM4G
10676 bool "4GB"
10677- depends on !X86_NUMAQ
10678+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10679 ---help---
10680 Select this if you have a 32-bit processor and between 1 and 4
10681 gigabytes of physical RAM.
10682@@ -1110,7 +1110,7 @@ config PAGE_OFFSET
10683 hex
10684 default 0xB0000000 if VMSPLIT_3G_OPT
10685 default 0x80000000 if VMSPLIT_2G
10686- default 0x78000000 if VMSPLIT_2G_OPT
10687+ default 0x70000000 if VMSPLIT_2G_OPT
10688 default 0x40000000 if VMSPLIT_1G
10689 default 0xC0000000
10690 depends on X86_32
10691@@ -1484,6 +1484,7 @@ config SECCOMP
10692
10693 config CC_STACKPROTECTOR
10694 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10695+ depends on X86_64 || !PAX_MEMORY_UDEREF
10696 ---help---
10697 This option turns on the -fstack-protector GCC feature. This
10698 feature puts, at the beginning of functions, a canary value on
10699@@ -1541,6 +1542,7 @@ config KEXEC_JUMP
10700 config PHYSICAL_START
10701 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10702 default "0x1000000"
10703+ range 0x400000 0x40000000
10704 ---help---
10705 This gives the physical address where the kernel is loaded.
10706
10707@@ -1604,6 +1606,7 @@ config X86_NEED_RELOCS
10708 config PHYSICAL_ALIGN
10709 hex "Alignment value to which kernel should be aligned" if X86_32
10710 default "0x1000000"
10711+ range 0x400000 0x1000000 if PAX_KERNEXEC
10712 range 0x2000 0x1000000
10713 ---help---
10714 This value puts the alignment restrictions on physical address
10715@@ -1635,9 +1638,10 @@ config HOTPLUG_CPU
10716 Say N if you want to disable CPU hotplug.
10717
10718 config COMPAT_VDSO
10719- def_bool y
10720+ def_bool n
10721 prompt "Compat VDSO support"
10722 depends on X86_32 || IA32_EMULATION
10723+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10724 ---help---
10725 Map the 32-bit VDSO to the predictable old-style address too.
10726
10727diff -urNp linux-3.1.4/arch/x86/Kconfig.cpu linux-3.1.4/arch/x86/Kconfig.cpu
10728--- linux-3.1.4/arch/x86/Kconfig.cpu 2011-11-11 15:19:27.000000000 -0500
10729+++ linux-3.1.4/arch/x86/Kconfig.cpu 2011-11-16 18:39:07.000000000 -0500
10730@@ -341,7 +341,7 @@ config X86_PPRO_FENCE
10731
10732 config X86_F00F_BUG
10733 def_bool y
10734- depends on M586MMX || M586TSC || M586 || M486 || M386
10735+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10736
10737 config X86_INVD_BUG
10738 def_bool y
10739@@ -365,7 +365,7 @@ config X86_POPAD_OK
10740
10741 config X86_ALIGNMENT_16
10742 def_bool y
10743- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10744+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10745
10746 config X86_INTEL_USERCOPY
10747 def_bool y
10748@@ -411,7 +411,7 @@ config X86_CMPXCHG64
10749 # generates cmov.
10750 config X86_CMOV
10751 def_bool y
10752- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10753+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10754
10755 config X86_MINIMUM_CPU_FAMILY
10756 int
10757diff -urNp linux-3.1.4/arch/x86/Kconfig.debug linux-3.1.4/arch/x86/Kconfig.debug
10758--- linux-3.1.4/arch/x86/Kconfig.debug 2011-11-11 15:19:27.000000000 -0500
10759+++ linux-3.1.4/arch/x86/Kconfig.debug 2011-11-16 18:39:07.000000000 -0500
10760@@ -81,7 +81,7 @@ config X86_PTDUMP
10761 config DEBUG_RODATA
10762 bool "Write protect kernel read-only data structures"
10763 default y
10764- depends on DEBUG_KERNEL
10765+ depends on DEBUG_KERNEL && BROKEN
10766 ---help---
10767 Mark the kernel read-only data as write-protected in the pagetables,
10768 in order to catch accidental (and incorrect) writes to such const
10769@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
10770
10771 config DEBUG_SET_MODULE_RONX
10772 bool "Set loadable kernel module data as NX and text as RO"
10773- depends on MODULES
10774+ depends on MODULES && BROKEN
10775 ---help---
10776 This option helps catch unintended modifications to loadable
10777 kernel module's text and read-only data. It also prevents execution
10778diff -urNp linux-3.1.4/arch/x86/kernel/acpi/realmode/Makefile linux-3.1.4/arch/x86/kernel/acpi/realmode/Makefile
10779--- linux-3.1.4/arch/x86/kernel/acpi/realmode/Makefile 2011-11-11 15:19:27.000000000 -0500
10780+++ linux-3.1.4/arch/x86/kernel/acpi/realmode/Makefile 2011-11-16 18:39:07.000000000 -0500
10781@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10782 $(call cc-option, -fno-stack-protector) \
10783 $(call cc-option, -mpreferred-stack-boundary=2)
10784 KBUILD_CFLAGS += $(call cc-option, -m32)
10785+ifdef CONSTIFY_PLUGIN
10786+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10787+endif
10788 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10789 GCOV_PROFILE := n
10790
10791diff -urNp linux-3.1.4/arch/x86/kernel/acpi/realmode/wakeup.S linux-3.1.4/arch/x86/kernel/acpi/realmode/wakeup.S
10792--- linux-3.1.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-11-11 15:19:27.000000000 -0500
10793+++ linux-3.1.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-11-16 18:40:08.000000000 -0500
10794@@ -108,6 +108,9 @@ wakeup_code:
10795 /* Do any other stuff... */
10796
10797 #ifndef CONFIG_64BIT
10798+ /* Recheck NX bit overrides (64bit path does this in trampoline */
10799+ call verify_cpu
10800+
10801 /* This could also be done in C code... */
10802 movl pmode_cr3, %eax
10803 movl %eax, %cr3
10804@@ -131,6 +134,7 @@ wakeup_code:
10805 movl pmode_cr0, %eax
10806 movl %eax, %cr0
10807 jmp pmode_return
10808+# include "../../verify_cpu.S"
10809 #else
10810 pushw $0
10811 pushw trampoline_segment
10812diff -urNp linux-3.1.4/arch/x86/kernel/acpi/sleep.c linux-3.1.4/arch/x86/kernel/acpi/sleep.c
10813--- linux-3.1.4/arch/x86/kernel/acpi/sleep.c 2011-11-11 15:19:27.000000000 -0500
10814+++ linux-3.1.4/arch/x86/kernel/acpi/sleep.c 2011-11-16 18:39:07.000000000 -0500
10815@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10816 header->trampoline_segment = trampoline_address() >> 4;
10817 #ifdef CONFIG_SMP
10818 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10819+
10820+ pax_open_kernel();
10821 early_gdt_descr.address =
10822 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10823+ pax_close_kernel();
10824+
10825 initial_gs = per_cpu_offset(smp_processor_id());
10826 #endif
10827 initial_code = (unsigned long)wakeup_long64;
10828diff -urNp linux-3.1.4/arch/x86/kernel/acpi/wakeup_32.S linux-3.1.4/arch/x86/kernel/acpi/wakeup_32.S
10829--- linux-3.1.4/arch/x86/kernel/acpi/wakeup_32.S 2011-11-11 15:19:27.000000000 -0500
10830+++ linux-3.1.4/arch/x86/kernel/acpi/wakeup_32.S 2011-11-16 18:39:07.000000000 -0500
10831@@ -30,13 +30,11 @@ wakeup_pmode_return:
10832 # and restore the stack ... but you need gdt for this to work
10833 movl saved_context_esp, %esp
10834
10835- movl %cs:saved_magic, %eax
10836- cmpl $0x12345678, %eax
10837+ cmpl $0x12345678, saved_magic
10838 jne bogus_magic
10839
10840 # jump to place where we left off
10841- movl saved_eip, %eax
10842- jmp *%eax
10843+ jmp *(saved_eip)
10844
10845 bogus_magic:
10846 jmp bogus_magic
10847diff -urNp linux-3.1.4/arch/x86/kernel/alternative.c linux-3.1.4/arch/x86/kernel/alternative.c
10848--- linux-3.1.4/arch/x86/kernel/alternative.c 2011-11-11 15:19:27.000000000 -0500
10849+++ linux-3.1.4/arch/x86/kernel/alternative.c 2011-11-16 18:39:07.000000000 -0500
10850@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives
10851 */
10852 for (a = start; a < end; a++) {
10853 instr = (u8 *)&a->instr_offset + a->instr_offset;
10854+
10855+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10856+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10857+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
10858+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10859+#endif
10860+
10861 replacement = (u8 *)&a->repl_offset + a->repl_offset;
10862 BUG_ON(a->replacementlen > a->instrlen);
10863 BUG_ON(a->instrlen > sizeof(insnbuf));
10864@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const
10865 for (poff = start; poff < end; poff++) {
10866 u8 *ptr = (u8 *)poff + *poff;
10867
10868+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10869+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10870+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
10871+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10872+#endif
10873+
10874 if (!*poff || ptr < text || ptr >= text_end)
10875 continue;
10876 /* turn DS segment override prefix into lock prefix */
10877- if (*ptr == 0x3e)
10878+ if (*ktla_ktva(ptr) == 0x3e)
10879 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10880 };
10881 mutex_unlock(&text_mutex);
10882@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(cons
10883 for (poff = start; poff < end; poff++) {
10884 u8 *ptr = (u8 *)poff + *poff;
10885
10886+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10887+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10888+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
10889+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10890+#endif
10891+
10892 if (!*poff || ptr < text || ptr >= text_end)
10893 continue;
10894 /* turn lock prefix into DS segment override prefix */
10895- if (*ptr == 0xf0)
10896+ if (*ktla_ktva(ptr) == 0xf0)
10897 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10898 };
10899 mutex_unlock(&text_mutex);
10900@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(str
10901
10902 BUG_ON(p->len > MAX_PATCH_LEN);
10903 /* prep the buffer with the original instructions */
10904- memcpy(insnbuf, p->instr, p->len);
10905+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10906 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10907 (unsigned long)p->instr, p->len);
10908
10909@@ -568,7 +587,7 @@ void __init alternative_instructions(voi
10910 if (smp_alt_once)
10911 free_init_pages("SMP alternatives",
10912 (unsigned long)__smp_locks,
10913- (unsigned long)__smp_locks_end);
10914+ PAGE_ALIGN((unsigned long)__smp_locks_end));
10915
10916 restart_nmi();
10917 }
10918@@ -585,13 +604,17 @@ void __init alternative_instructions(voi
10919 * instructions. And on the local CPU you need to be protected again NMI or MCE
10920 * handlers seeing an inconsistent instruction while you patch.
10921 */
10922-void *__init_or_module text_poke_early(void *addr, const void *opcode,
10923+void *__kprobes text_poke_early(void *addr, const void *opcode,
10924 size_t len)
10925 {
10926 unsigned long flags;
10927 local_irq_save(flags);
10928- memcpy(addr, opcode, len);
10929+
10930+ pax_open_kernel();
10931+ memcpy(ktla_ktva(addr), opcode, len);
10932 sync_core();
10933+ pax_close_kernel();
10934+
10935 local_irq_restore(flags);
10936 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10937 that causes hangs on some VIA CPUs. */
10938@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(v
10939 */
10940 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10941 {
10942- unsigned long flags;
10943- char *vaddr;
10944+ unsigned char *vaddr = ktla_ktva(addr);
10945 struct page *pages[2];
10946- int i;
10947+ size_t i;
10948
10949 if (!core_kernel_text((unsigned long)addr)) {
10950- pages[0] = vmalloc_to_page(addr);
10951- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10952+ pages[0] = vmalloc_to_page(vaddr);
10953+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10954 } else {
10955- pages[0] = virt_to_page(addr);
10956+ pages[0] = virt_to_page(vaddr);
10957 WARN_ON(!PageReserved(pages[0]));
10958- pages[1] = virt_to_page(addr + PAGE_SIZE);
10959+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10960 }
10961 BUG_ON(!pages[0]);
10962- local_irq_save(flags);
10963- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10964- if (pages[1])
10965- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10966- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10967- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10968- clear_fixmap(FIX_TEXT_POKE0);
10969- if (pages[1])
10970- clear_fixmap(FIX_TEXT_POKE1);
10971- local_flush_tlb();
10972- sync_core();
10973- /* Could also do a CLFLUSH here to speed up CPU recovery; but
10974- that causes hangs on some VIA CPUs. */
10975+ text_poke_early(addr, opcode, len);
10976 for (i = 0; i < len; i++)
10977- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10978- local_irq_restore(flags);
10979+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10980 return addr;
10981 }
10982
10983diff -urNp linux-3.1.4/arch/x86/kernel/apic/apic.c linux-3.1.4/arch/x86/kernel/apic/apic.c
10984--- linux-3.1.4/arch/x86/kernel/apic/apic.c 2011-11-11 15:19:27.000000000 -0500
10985+++ linux-3.1.4/arch/x86/kernel/apic/apic.c 2011-11-16 18:40:08.000000000 -0500
10986@@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
10987 /*
10988 * Debug level, exported for io_apic.c
10989 */
10990-unsigned int apic_verbosity;
10991+int apic_verbosity;
10992
10993 int pic_mode;
10994
10995@@ -1835,7 +1835,7 @@ void smp_error_interrupt(struct pt_regs
10996 apic_write(APIC_ESR, 0);
10997 v1 = apic_read(APIC_ESR);
10998 ack_APIC_irq();
10999- atomic_inc(&irq_err_count);
11000+ atomic_inc_unchecked(&irq_err_count);
11001
11002 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
11003 smp_processor_id(), v0 , v1);
11004@@ -2209,6 +2209,8 @@ static int __cpuinit apic_cluster_num(vo
11005 u16 *bios_cpu_apicid;
11006 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11007
11008+ pax_track_stack();
11009+
11010 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11011 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11012
11013diff -urNp linux-3.1.4/arch/x86/kernel/apic/io_apic.c linux-3.1.4/arch/x86/kernel/apic/io_apic.c
11014--- linux-3.1.4/arch/x86/kernel/apic/io_apic.c 2011-11-11 15:19:27.000000000 -0500
11015+++ linux-3.1.4/arch/x86/kernel/apic/io_apic.c 2011-11-16 18:39:07.000000000 -0500
11016@@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
11017 }
11018 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11019
11020-void lock_vector_lock(void)
11021+void lock_vector_lock(void) __acquires(vector_lock)
11022 {
11023 /* Used to the online set of cpus does not change
11024 * during assign_irq_vector.
11025@@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
11026 raw_spin_lock(&vector_lock);
11027 }
11028
11029-void unlock_vector_lock(void)
11030+void unlock_vector_lock(void) __releases(vector_lock)
11031 {
11032 raw_spin_unlock(&vector_lock);
11033 }
11034@@ -2405,7 +2405,7 @@ static void ack_apic_edge(struct irq_dat
11035 ack_APIC_irq();
11036 }
11037
11038-atomic_t irq_mis_count;
11039+atomic_unchecked_t irq_mis_count;
11040
11041 /*
11042 * IO-APIC versions below 0x20 don't support EOI register.
11043@@ -2513,7 +2513,7 @@ static void ack_apic_level(struct irq_da
11044 * at the cpu.
11045 */
11046 if (!(v & (1 << (i & 0x1f)))) {
11047- atomic_inc(&irq_mis_count);
11048+ atomic_inc_unchecked(&irq_mis_count);
11049
11050 eoi_ioapic_irq(irq, cfg);
11051 }
11052diff -urNp linux-3.1.4/arch/x86/kernel/apm_32.c linux-3.1.4/arch/x86/kernel/apm_32.c
11053--- linux-3.1.4/arch/x86/kernel/apm_32.c 2011-11-11 15:19:27.000000000 -0500
11054+++ linux-3.1.4/arch/x86/kernel/apm_32.c 2011-11-16 18:39:07.000000000 -0500
11055@@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
11056 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11057 * even though they are called in protected mode.
11058 */
11059-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11060+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11061 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11062
11063 static const char driver_version[] = "1.16ac"; /* no spaces */
11064@@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
11065 BUG_ON(cpu != 0);
11066 gdt = get_cpu_gdt_table(cpu);
11067 save_desc_40 = gdt[0x40 / 8];
11068+
11069+ pax_open_kernel();
11070 gdt[0x40 / 8] = bad_bios_desc;
11071+ pax_close_kernel();
11072
11073 apm_irq_save(flags);
11074 APM_DO_SAVE_SEGS;
11075@@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
11076 &call->esi);
11077 APM_DO_RESTORE_SEGS;
11078 apm_irq_restore(flags);
11079+
11080+ pax_open_kernel();
11081 gdt[0x40 / 8] = save_desc_40;
11082+ pax_close_kernel();
11083+
11084 put_cpu();
11085
11086 return call->eax & 0xff;
11087@@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void
11088 BUG_ON(cpu != 0);
11089 gdt = get_cpu_gdt_table(cpu);
11090 save_desc_40 = gdt[0x40 / 8];
11091+
11092+ pax_open_kernel();
11093 gdt[0x40 / 8] = bad_bios_desc;
11094+ pax_close_kernel();
11095
11096 apm_irq_save(flags);
11097 APM_DO_SAVE_SEGS;
11098@@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void
11099 &call->eax);
11100 APM_DO_RESTORE_SEGS;
11101 apm_irq_restore(flags);
11102+
11103+ pax_open_kernel();
11104 gdt[0x40 / 8] = save_desc_40;
11105+ pax_close_kernel();
11106+
11107 put_cpu();
11108 return error;
11109 }
11110@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
11111 * code to that CPU.
11112 */
11113 gdt = get_cpu_gdt_table(0);
11114+
11115+ pax_open_kernel();
11116 set_desc_base(&gdt[APM_CS >> 3],
11117 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11118 set_desc_base(&gdt[APM_CS_16 >> 3],
11119 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11120 set_desc_base(&gdt[APM_DS >> 3],
11121 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11122+ pax_close_kernel();
11123
11124 proc_create("apm", 0, NULL, &apm_file_ops);
11125
11126diff -urNp linux-3.1.4/arch/x86/kernel/asm-offsets_64.c linux-3.1.4/arch/x86/kernel/asm-offsets_64.c
11127--- linux-3.1.4/arch/x86/kernel/asm-offsets_64.c 2011-11-11 15:19:27.000000000 -0500
11128+++ linux-3.1.4/arch/x86/kernel/asm-offsets_64.c 2011-11-16 18:39:07.000000000 -0500
11129@@ -69,6 +69,7 @@ int main(void)
11130 BLANK();
11131 #undef ENTRY
11132
11133+ DEFINE(TSS_size, sizeof(struct tss_struct));
11134 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
11135 BLANK();
11136
11137diff -urNp linux-3.1.4/arch/x86/kernel/asm-offsets.c linux-3.1.4/arch/x86/kernel/asm-offsets.c
11138--- linux-3.1.4/arch/x86/kernel/asm-offsets.c 2011-11-11 15:19:27.000000000 -0500
11139+++ linux-3.1.4/arch/x86/kernel/asm-offsets.c 2011-11-16 18:39:07.000000000 -0500
11140@@ -33,6 +33,8 @@ void common(void) {
11141 OFFSET(TI_status, thread_info, status);
11142 OFFSET(TI_addr_limit, thread_info, addr_limit);
11143 OFFSET(TI_preempt_count, thread_info, preempt_count);
11144+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11145+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11146
11147 BLANK();
11148 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
11149@@ -53,8 +55,26 @@ void common(void) {
11150 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11151 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11152 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11153+
11154+#ifdef CONFIG_PAX_KERNEXEC
11155+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11156+#endif
11157+
11158+#ifdef CONFIG_PAX_MEMORY_UDEREF
11159+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11160+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11161+#ifdef CONFIG_X86_64
11162+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
11163+#endif
11164 #endif
11165
11166+#endif
11167+
11168+ BLANK();
11169+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11170+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11171+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11172+
11173 #ifdef CONFIG_XEN
11174 BLANK();
11175 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
11176diff -urNp linux-3.1.4/arch/x86/kernel/cpu/amd.c linux-3.1.4/arch/x86/kernel/cpu/amd.c
11177--- linux-3.1.4/arch/x86/kernel/cpu/amd.c 2011-11-11 15:19:27.000000000 -0500
11178+++ linux-3.1.4/arch/x86/kernel/cpu/amd.c 2011-11-16 18:39:07.000000000 -0500
11179@@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
11180 unsigned int size)
11181 {
11182 /* AMD errata T13 (order #21922) */
11183- if ((c->x86 == 6)) {
11184+ if (c->x86 == 6) {
11185 /* Duron Rev A0 */
11186 if (c->x86_model == 3 && c->x86_mask == 0)
11187 size = 64;
11188diff -urNp linux-3.1.4/arch/x86/kernel/cpu/common.c linux-3.1.4/arch/x86/kernel/cpu/common.c
11189--- linux-3.1.4/arch/x86/kernel/cpu/common.c 2011-11-11 15:19:27.000000000 -0500
11190+++ linux-3.1.4/arch/x86/kernel/cpu/common.c 2011-11-16 18:39:07.000000000 -0500
11191@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
11192
11193 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11194
11195-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11196-#ifdef CONFIG_X86_64
11197- /*
11198- * We need valid kernel segments for data and code in long mode too
11199- * IRET will check the segment types kkeil 2000/10/28
11200- * Also sysret mandates a special GDT layout
11201- *
11202- * TLS descriptors are currently at a different place compared to i386.
11203- * Hopefully nobody expects them at a fixed place (Wine?)
11204- */
11205- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11206- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11207- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11208- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11209- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11210- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
11211-#else
11212- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11213- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11214- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11215- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
11216- /*
11217- * Segments used for calling PnP BIOS have byte granularity.
11218- * They code segments and data segments have fixed 64k limits,
11219- * the transfer segment sizes are set at run time.
11220- */
11221- /* 32-bit code */
11222- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11223- /* 16-bit code */
11224- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11225- /* 16-bit data */
11226- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
11227- /* 16-bit data */
11228- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
11229- /* 16-bit data */
11230- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
11231- /*
11232- * The APM segments have byte granularity and their bases
11233- * are set at run time. All have 64k limits.
11234- */
11235- /* 32-bit code */
11236- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11237- /* 16-bit code */
11238- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11239- /* data */
11240- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
11241-
11242- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11243- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11244- GDT_STACK_CANARY_INIT
11245-#endif
11246-} };
11247-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
11248-
11249 static int __init x86_xsave_setup(char *s)
11250 {
11251 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
11252@@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
11253 {
11254 struct desc_ptr gdt_descr;
11255
11256- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
11257+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
11258 gdt_descr.size = GDT_SIZE - 1;
11259 load_gdt(&gdt_descr);
11260 /* Reload the per-cpu base */
11261@@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struc
11262 /* Filter out anything that depends on CPUID levels we don't have */
11263 filter_cpuid_features(c, true);
11264
11265+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
11266+ setup_clear_cpu_cap(X86_FEATURE_SEP);
11267+#endif
11268+
11269 /* If the model name is still unset, do table lookup. */
11270 if (!c->x86_model_id[0]) {
11271 const char *p;
11272@@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(cha
11273 }
11274 __setup("clearcpuid=", setup_disablecpuid);
11275
11276+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
11277+EXPORT_PER_CPU_SYMBOL(current_tinfo);
11278+
11279 #ifdef CONFIG_X86_64
11280 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
11281
11282@@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
11283 EXPORT_PER_CPU_SYMBOL(current_task);
11284
11285 DEFINE_PER_CPU(unsigned long, kernel_stack) =
11286- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
11287+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
11288 EXPORT_PER_CPU_SYMBOL(kernel_stack);
11289
11290 DEFINE_PER_CPU(char *, irq_stack_ptr) =
11291@@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(str
11292 {
11293 memset(regs, 0, sizeof(struct pt_regs));
11294 regs->fs = __KERNEL_PERCPU;
11295- regs->gs = __KERNEL_STACK_CANARY;
11296+ savesegment(gs, regs->gs);
11297
11298 return regs;
11299 }
11300@@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
11301 int i;
11302
11303 cpu = stack_smp_processor_id();
11304- t = &per_cpu(init_tss, cpu);
11305+ t = init_tss + cpu;
11306 oist = &per_cpu(orig_ist, cpu);
11307
11308 #ifdef CONFIG_NUMA
11309@@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
11310 switch_to_new_gdt(cpu);
11311 loadsegment(fs, 0);
11312
11313- load_idt((const struct desc_ptr *)&idt_descr);
11314+ load_idt(&idt_descr);
11315
11316 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
11317 syscall_init();
11318@@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
11319 wrmsrl(MSR_KERNEL_GS_BASE, 0);
11320 barrier();
11321
11322- x86_configure_nx();
11323 if (cpu != 0)
11324 enable_x2apic();
11325
11326@@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
11327 {
11328 int cpu = smp_processor_id();
11329 struct task_struct *curr = current;
11330- struct tss_struct *t = &per_cpu(init_tss, cpu);
11331+ struct tss_struct *t = init_tss + cpu;
11332 struct thread_struct *thread = &curr->thread;
11333
11334 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
11335diff -urNp linux-3.1.4/arch/x86/kernel/cpu/intel.c linux-3.1.4/arch/x86/kernel/cpu/intel.c
11336--- linux-3.1.4/arch/x86/kernel/cpu/intel.c 2011-11-11 15:19:27.000000000 -0500
11337+++ linux-3.1.4/arch/x86/kernel/cpu/intel.c 2011-11-16 18:39:07.000000000 -0500
11338@@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug
11339 * Update the IDT descriptor and reload the IDT so that
11340 * it uses the read-only mapped virtual address.
11341 */
11342- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
11343+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
11344 load_idt(&idt_descr);
11345 }
11346 #endif
11347diff -urNp linux-3.1.4/arch/x86/kernel/cpu/Makefile linux-3.1.4/arch/x86/kernel/cpu/Makefile
11348--- linux-3.1.4/arch/x86/kernel/cpu/Makefile 2011-11-11 15:19:27.000000000 -0500
11349+++ linux-3.1.4/arch/x86/kernel/cpu/Makefile 2011-11-16 18:39:07.000000000 -0500
11350@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
11351 CFLAGS_REMOVE_perf_event.o = -pg
11352 endif
11353
11354-# Make sure load_percpu_segment has no stackprotector
11355-nostackp := $(call cc-option, -fno-stack-protector)
11356-CFLAGS_common.o := $(nostackp)
11357-
11358 obj-y := intel_cacheinfo.o scattered.o topology.o
11359 obj-y += proc.o capflags.o powerflags.o common.o
11360 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
11361diff -urNp linux-3.1.4/arch/x86/kernel/cpu/mcheck/mce.c linux-3.1.4/arch/x86/kernel/cpu/mcheck/mce.c
11362--- linux-3.1.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-11-11 15:19:27.000000000 -0500
11363+++ linux-3.1.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-12-02 17:38:47.000000000 -0500
11364@@ -42,6 +42,7 @@
11365 #include <asm/processor.h>
11366 #include <asm/mce.h>
11367 #include <asm/msr.h>
11368+#include <asm/local.h>
11369
11370 #include "mce-internal.h"
11371
11372@@ -205,7 +206,7 @@ static void print_mce(struct mce *m)
11373 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
11374 m->cs, m->ip);
11375
11376- if (m->cs == __KERNEL_CS)
11377+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
11378 print_symbol("{%s}", m->ip);
11379 pr_cont("\n");
11380 }
11381@@ -233,10 +234,10 @@ static void print_mce(struct mce *m)
11382
11383 #define PANIC_TIMEOUT 5 /* 5 seconds */
11384
11385-static atomic_t mce_paniced;
11386+static atomic_unchecked_t mce_paniced;
11387
11388 static int fake_panic;
11389-static atomic_t mce_fake_paniced;
11390+static atomic_unchecked_t mce_fake_paniced;
11391
11392 /* Panic in progress. Enable interrupts and wait for final IPI */
11393 static void wait_for_panic(void)
11394@@ -260,7 +261,7 @@ static void mce_panic(char *msg, struct
11395 /*
11396 * Make sure only one CPU runs in machine check panic
11397 */
11398- if (atomic_inc_return(&mce_paniced) > 1)
11399+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
11400 wait_for_panic();
11401 barrier();
11402
11403@@ -268,7 +269,7 @@ static void mce_panic(char *msg, struct
11404 console_verbose();
11405 } else {
11406 /* Don't log too much for fake panic */
11407- if (atomic_inc_return(&mce_fake_paniced) > 1)
11408+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
11409 return;
11410 }
11411 /* First print corrected ones that are still unlogged */
11412@@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
11413 * might have been modified by someone else.
11414 */
11415 rmb();
11416- if (atomic_read(&mce_paniced))
11417+ if (atomic_read_unchecked(&mce_paniced))
11418 wait_for_panic();
11419 if (!monarch_timeout)
11420 goto out;
11421@@ -1392,7 +1393,7 @@ static void unexpected_machine_check(str
11422 }
11423
11424 /* Call the installed machine check handler for this CPU setup. */
11425-void (*machine_check_vector)(struct pt_regs *, long error_code) =
11426+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
11427 unexpected_machine_check;
11428
11429 /*
11430@@ -1415,7 +1416,9 @@ void __cpuinit mcheck_cpu_init(struct cp
11431 return;
11432 }
11433
11434+ pax_open_kernel();
11435 machine_check_vector = do_machine_check;
11436+ pax_close_kernel();
11437
11438 __mcheck_cpu_init_generic();
11439 __mcheck_cpu_init_vendor(c);
11440@@ -1429,7 +1432,7 @@ void __cpuinit mcheck_cpu_init(struct cp
11441 */
11442
11443 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
11444-static int mce_chrdev_open_count; /* #times opened */
11445+static local_t mce_chrdev_open_count; /* #times opened */
11446 static int mce_chrdev_open_exclu; /* already open exclusive? */
11447
11448 static int mce_chrdev_open(struct inode *inode, struct file *file)
11449@@ -1437,7 +1440,7 @@ static int mce_chrdev_open(struct inode
11450 spin_lock(&mce_chrdev_state_lock);
11451
11452 if (mce_chrdev_open_exclu ||
11453- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
11454+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
11455 spin_unlock(&mce_chrdev_state_lock);
11456
11457 return -EBUSY;
11458@@ -1445,7 +1448,7 @@ static int mce_chrdev_open(struct inode
11459
11460 if (file->f_flags & O_EXCL)
11461 mce_chrdev_open_exclu = 1;
11462- mce_chrdev_open_count++;
11463+ local_inc(&mce_chrdev_open_count);
11464
11465 spin_unlock(&mce_chrdev_state_lock);
11466
11467@@ -1456,7 +1459,7 @@ static int mce_chrdev_release(struct ino
11468 {
11469 spin_lock(&mce_chrdev_state_lock);
11470
11471- mce_chrdev_open_count--;
11472+ local_dec(&mce_chrdev_open_count);
11473 mce_chrdev_open_exclu = 0;
11474
11475 spin_unlock(&mce_chrdev_state_lock);
11476@@ -2147,7 +2150,7 @@ struct dentry *mce_get_debugfs_dir(void)
11477 static void mce_reset(void)
11478 {
11479 cpu_missing = 0;
11480- atomic_set(&mce_fake_paniced, 0);
11481+ atomic_set_unchecked(&mce_fake_paniced, 0);
11482 atomic_set(&mce_executing, 0);
11483 atomic_set(&mce_callin, 0);
11484 atomic_set(&global_nwo, 0);
11485diff -urNp linux-3.1.4/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-3.1.4/arch/x86/kernel/cpu/mcheck/mce-inject.c
11486--- linux-3.1.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-11-11 15:19:27.000000000 -0500
11487+++ linux-3.1.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-11-16 18:39:07.000000000 -0500
11488@@ -215,7 +215,9 @@ static int inject_init(void)
11489 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
11490 return -ENOMEM;
11491 printk(KERN_INFO "Machine check injector initialized\n");
11492- mce_chrdev_ops.write = mce_write;
11493+ pax_open_kernel();
11494+ *(void **)&mce_chrdev_ops.write = mce_write;
11495+ pax_close_kernel();
11496 register_die_notifier(&mce_raise_nb);
11497 return 0;
11498 }
11499diff -urNp linux-3.1.4/arch/x86/kernel/cpu/mcheck/p5.c linux-3.1.4/arch/x86/kernel/cpu/mcheck/p5.c
11500--- linux-3.1.4/arch/x86/kernel/cpu/mcheck/p5.c 2011-11-11 15:19:27.000000000 -0500
11501+++ linux-3.1.4/arch/x86/kernel/cpu/mcheck/p5.c 2011-12-02 17:38:47.000000000 -0500
11502@@ -50,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo
11503 if (!cpu_has(c, X86_FEATURE_MCE))
11504 return;
11505
11506+ pax_open_kernel();
11507 machine_check_vector = pentium_machine_check;
11508+ pax_close_kernel();
11509 /* Make sure the vector pointer is visible before we enable MCEs: */
11510 wmb();
11511
11512diff -urNp linux-3.1.4/arch/x86/kernel/cpu/mcheck/winchip.c linux-3.1.4/arch/x86/kernel/cpu/mcheck/winchip.c
11513--- linux-3.1.4/arch/x86/kernel/cpu/mcheck/winchip.c 2011-11-11 15:19:27.000000000 -0500
11514+++ linux-3.1.4/arch/x86/kernel/cpu/mcheck/winchip.c 2011-12-02 17:38:47.000000000 -0500
11515@@ -24,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_
11516 {
11517 u32 lo, hi;
11518
11519+ pax_open_kernel();
11520 machine_check_vector = winchip_machine_check;
11521+ pax_close_kernel();
11522 /* Make sure the vector pointer is visible before we enable MCEs: */
11523 wmb();
11524
11525diff -urNp linux-3.1.4/arch/x86/kernel/cpu/mtrr/main.c linux-3.1.4/arch/x86/kernel/cpu/mtrr/main.c
11526--- linux-3.1.4/arch/x86/kernel/cpu/mtrr/main.c 2011-11-11 15:19:27.000000000 -0500
11527+++ linux-3.1.4/arch/x86/kernel/cpu/mtrr/main.c 2011-11-16 18:39:07.000000000 -0500
11528@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
11529 u64 size_or_mask, size_and_mask;
11530 static bool mtrr_aps_delayed_init;
11531
11532-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
11533+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
11534
11535 const struct mtrr_ops *mtrr_if;
11536
11537diff -urNp linux-3.1.4/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.1.4/arch/x86/kernel/cpu/mtrr/mtrr.h
11538--- linux-3.1.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-11-11 15:19:27.000000000 -0500
11539+++ linux-3.1.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-11-16 18:39:07.000000000 -0500
11540@@ -25,7 +25,7 @@ struct mtrr_ops {
11541 int (*validate_add_page)(unsigned long base, unsigned long size,
11542 unsigned int type);
11543 int (*have_wrcomb)(void);
11544-};
11545+} __do_const;
11546
11547 extern int generic_get_free_region(unsigned long base, unsigned long size,
11548 int replace_reg);
11549diff -urNp linux-3.1.4/arch/x86/kernel/cpu/perf_event.c linux-3.1.4/arch/x86/kernel/cpu/perf_event.c
11550--- linux-3.1.4/arch/x86/kernel/cpu/perf_event.c 2011-11-11 15:19:27.000000000 -0500
11551+++ linux-3.1.4/arch/x86/kernel/cpu/perf_event.c 2011-11-16 18:40:08.000000000 -0500
11552@@ -795,6 +795,8 @@ static int x86_schedule_events(struct cp
11553 int i, j, w, wmax, num = 0;
11554 struct hw_perf_event *hwc;
11555
11556+ pax_track_stack();
11557+
11558 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
11559
11560 for (i = 0; i < n; i++) {
11561@@ -1919,7 +1921,7 @@ perf_callchain_user(struct perf_callchai
11562 break;
11563
11564 perf_callchain_store(entry, frame.return_address);
11565- fp = frame.next_frame;
11566+ fp = (const void __force_user *)frame.next_frame;
11567 }
11568 }
11569
11570diff -urNp linux-3.1.4/arch/x86/kernel/crash.c linux-3.1.4/arch/x86/kernel/crash.c
11571--- linux-3.1.4/arch/x86/kernel/crash.c 2011-11-11 15:19:27.000000000 -0500
11572+++ linux-3.1.4/arch/x86/kernel/crash.c 2011-11-16 18:39:07.000000000 -0500
11573@@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
11574 regs = args->regs;
11575
11576 #ifdef CONFIG_X86_32
11577- if (!user_mode_vm(regs)) {
11578+ if (!user_mode(regs)) {
11579 crash_fixup_ss_esp(&fixed_regs, regs);
11580 regs = &fixed_regs;
11581 }
11582diff -urNp linux-3.1.4/arch/x86/kernel/doublefault_32.c linux-3.1.4/arch/x86/kernel/doublefault_32.c
11583--- linux-3.1.4/arch/x86/kernel/doublefault_32.c 2011-11-11 15:19:27.000000000 -0500
11584+++ linux-3.1.4/arch/x86/kernel/doublefault_32.c 2011-11-16 18:39:07.000000000 -0500
11585@@ -11,7 +11,7 @@
11586
11587 #define DOUBLEFAULT_STACKSIZE (1024)
11588 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
11589-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
11590+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
11591
11592 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
11593
11594@@ -21,7 +21,7 @@ static void doublefault_fn(void)
11595 unsigned long gdt, tss;
11596
11597 store_gdt(&gdt_desc);
11598- gdt = gdt_desc.address;
11599+ gdt = (unsigned long)gdt_desc.address;
11600
11601 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
11602
11603@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
11604 /* 0x2 bit is always set */
11605 .flags = X86_EFLAGS_SF | 0x2,
11606 .sp = STACK_START,
11607- .es = __USER_DS,
11608+ .es = __KERNEL_DS,
11609 .cs = __KERNEL_CS,
11610 .ss = __KERNEL_DS,
11611- .ds = __USER_DS,
11612+ .ds = __KERNEL_DS,
11613 .fs = __KERNEL_PERCPU,
11614
11615 .__cr3 = __pa_nodebug(swapper_pg_dir),
11616diff -urNp linux-3.1.4/arch/x86/kernel/dumpstack_32.c linux-3.1.4/arch/x86/kernel/dumpstack_32.c
11617--- linux-3.1.4/arch/x86/kernel/dumpstack_32.c 2011-11-11 15:19:27.000000000 -0500
11618+++ linux-3.1.4/arch/x86/kernel/dumpstack_32.c 2011-11-16 18:39:07.000000000 -0500
11619@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
11620 bp = stack_frame(task, regs);
11621
11622 for (;;) {
11623- struct thread_info *context;
11624+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11625
11626- context = (struct thread_info *)
11627- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
11628- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
11629+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11630
11631- stack = (unsigned long *)context->previous_esp;
11632- if (!stack)
11633+ if (stack_start == task_stack_page(task))
11634 break;
11635+ stack = *(unsigned long **)stack_start;
11636 if (ops->stack(data, "IRQ") < 0)
11637 break;
11638 touch_nmi_watchdog();
11639@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
11640 * When in-kernel, we also print out the stack and code at the
11641 * time of the fault..
11642 */
11643- if (!user_mode_vm(regs)) {
11644+ if (!user_mode(regs)) {
11645 unsigned int code_prologue = code_bytes * 43 / 64;
11646 unsigned int code_len = code_bytes;
11647 unsigned char c;
11648 u8 *ip;
11649+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
11650
11651 printk(KERN_EMERG "Stack:\n");
11652 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
11653
11654 printk(KERN_EMERG "Code: ");
11655
11656- ip = (u8 *)regs->ip - code_prologue;
11657+ ip = (u8 *)regs->ip - code_prologue + cs_base;
11658 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
11659 /* try starting at IP */
11660- ip = (u8 *)regs->ip;
11661+ ip = (u8 *)regs->ip + cs_base;
11662 code_len = code_len - code_prologue + 1;
11663 }
11664 for (i = 0; i < code_len; i++, ip++) {
11665@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11666 printk(" Bad EIP value.");
11667 break;
11668 }
11669- if (ip == (u8 *)regs->ip)
11670+ if (ip == (u8 *)regs->ip + cs_base)
11671 printk("<%02x> ", c);
11672 else
11673 printk("%02x ", c);
11674@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11675 {
11676 unsigned short ud2;
11677
11678+ ip = ktla_ktva(ip);
11679 if (ip < PAGE_OFFSET)
11680 return 0;
11681 if (probe_kernel_address((unsigned short *)ip, ud2))
11682@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
11683
11684 return ud2 == 0x0b0f;
11685 }
11686+
11687+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11688+void pax_check_alloca(unsigned long size)
11689+{
11690+ unsigned long sp = (unsigned long)&sp, stack_left;
11691+
11692+ /* all kernel stacks are of the same size */
11693+ stack_left = sp & (THREAD_SIZE - 1);
11694+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
11695+}
11696+EXPORT_SYMBOL(pax_check_alloca);
11697+#endif
11698diff -urNp linux-3.1.4/arch/x86/kernel/dumpstack_64.c linux-3.1.4/arch/x86/kernel/dumpstack_64.c
11699--- linux-3.1.4/arch/x86/kernel/dumpstack_64.c 2011-11-11 15:19:27.000000000 -0500
11700+++ linux-3.1.4/arch/x86/kernel/dumpstack_64.c 2011-11-16 18:39:07.000000000 -0500
11701@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task
11702 unsigned long *irq_stack_end =
11703 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11704 unsigned used = 0;
11705- struct thread_info *tinfo;
11706 int graph = 0;
11707 unsigned long dummy;
11708+ void *stack_start;
11709
11710 if (!task)
11711 task = current;
11712@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task
11713 * current stack address. If the stacks consist of nested
11714 * exceptions
11715 */
11716- tinfo = task_thread_info(task);
11717 for (;;) {
11718 char *id;
11719 unsigned long *estack_end;
11720+
11721 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11722 &used, &id);
11723
11724@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task
11725 if (ops->stack(data, id) < 0)
11726 break;
11727
11728- bp = ops->walk_stack(tinfo, stack, bp, ops,
11729+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11730 data, estack_end, &graph);
11731 ops->stack(data, "<EOE>");
11732 /*
11733@@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task
11734 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11735 if (ops->stack(data, "IRQ") < 0)
11736 break;
11737- bp = ops->walk_stack(tinfo, stack, bp,
11738+ bp = ops->walk_stack(task, irq_stack, stack, bp,
11739 ops, data, irq_stack_end, &graph);
11740 /*
11741 * We link to the next stack (which would be
11742@@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task
11743 /*
11744 * This handles the process stack:
11745 */
11746- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11747+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11748+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11749 put_cpu();
11750 }
11751 EXPORT_SYMBOL(dump_trace);
11752@@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
11753
11754 return ud2 == 0x0b0f;
11755 }
11756+
11757+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11758+void pax_check_alloca(unsigned long size)
11759+{
11760+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
11761+ unsigned cpu, used;
11762+ char *id;
11763+
11764+ /* check the process stack first */
11765+ stack_start = (unsigned long)task_stack_page(current);
11766+ stack_end = stack_start + THREAD_SIZE;
11767+ if (likely(stack_start <= sp && sp < stack_end)) {
11768+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
11769+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
11770+ return;
11771+ }
11772+
11773+ cpu = get_cpu();
11774+
11775+ /* check the irq stacks */
11776+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
11777+ stack_start = stack_end - IRQ_STACK_SIZE;
11778+ if (stack_start <= sp && sp < stack_end) {
11779+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
11780+ put_cpu();
11781+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
11782+ return;
11783+ }
11784+
11785+ /* check the exception stacks */
11786+ used = 0;
11787+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
11788+ stack_start = stack_end - EXCEPTION_STKSZ;
11789+ if (stack_end && stack_start <= sp && sp < stack_end) {
11790+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
11791+ put_cpu();
11792+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
11793+ return;
11794+ }
11795+
11796+ put_cpu();
11797+
11798+ /* unknown stack */
11799+ BUG();
11800+}
11801+EXPORT_SYMBOL(pax_check_alloca);
11802+#endif
11803diff -urNp linux-3.1.4/arch/x86/kernel/dumpstack.c linux-3.1.4/arch/x86/kernel/dumpstack.c
11804--- linux-3.1.4/arch/x86/kernel/dumpstack.c 2011-11-11 15:19:27.000000000 -0500
11805+++ linux-3.1.4/arch/x86/kernel/dumpstack.c 2011-11-16 18:40:08.000000000 -0500
11806@@ -2,6 +2,9 @@
11807 * Copyright (C) 1991, 1992 Linus Torvalds
11808 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11809 */
11810+#ifdef CONFIG_GRKERNSEC_HIDESYM
11811+#define __INCLUDED_BY_HIDESYM 1
11812+#endif
11813 #include <linux/kallsyms.h>
11814 #include <linux/kprobes.h>
11815 #include <linux/uaccess.h>
11816@@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11817 static void
11818 print_ftrace_graph_addr(unsigned long addr, void *data,
11819 const struct stacktrace_ops *ops,
11820- struct thread_info *tinfo, int *graph)
11821+ struct task_struct *task, int *graph)
11822 {
11823- struct task_struct *task = tinfo->task;
11824 unsigned long ret_addr;
11825 int index = task->curr_ret_stack;
11826
11827@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11828 static inline void
11829 print_ftrace_graph_addr(unsigned long addr, void *data,
11830 const struct stacktrace_ops *ops,
11831- struct thread_info *tinfo, int *graph)
11832+ struct task_struct *task, int *graph)
11833 { }
11834 #endif
11835
11836@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11837 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11838 */
11839
11840-static inline int valid_stack_ptr(struct thread_info *tinfo,
11841- void *p, unsigned int size, void *end)
11842+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11843 {
11844- void *t = tinfo;
11845 if (end) {
11846 if (p < end && p >= (end-THREAD_SIZE))
11847 return 1;
11848@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11849 }
11850
11851 unsigned long
11852-print_context_stack(struct thread_info *tinfo,
11853+print_context_stack(struct task_struct *task, void *stack_start,
11854 unsigned long *stack, unsigned long bp,
11855 const struct stacktrace_ops *ops, void *data,
11856 unsigned long *end, int *graph)
11857 {
11858 struct stack_frame *frame = (struct stack_frame *)bp;
11859
11860- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11861+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11862 unsigned long addr;
11863
11864 addr = *stack;
11865@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11866 } else {
11867 ops->address(data, addr, 0);
11868 }
11869- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11870+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11871 }
11872 stack++;
11873 }
11874@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11875 EXPORT_SYMBOL_GPL(print_context_stack);
11876
11877 unsigned long
11878-print_context_stack_bp(struct thread_info *tinfo,
11879+print_context_stack_bp(struct task_struct *task, void *stack_start,
11880 unsigned long *stack, unsigned long bp,
11881 const struct stacktrace_ops *ops, void *data,
11882 unsigned long *end, int *graph)
11883@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11884 struct stack_frame *frame = (struct stack_frame *)bp;
11885 unsigned long *ret_addr = &frame->return_address;
11886
11887- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11888+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11889 unsigned long addr = *ret_addr;
11890
11891 if (!__kernel_text_address(addr))
11892@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11893 ops->address(data, addr, 1);
11894 frame = frame->next_frame;
11895 ret_addr = &frame->return_address;
11896- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11897+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11898 }
11899
11900 return (unsigned long)frame;
11901@@ -186,7 +186,7 @@ void dump_stack(void)
11902
11903 bp = stack_frame(current, NULL);
11904 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11905- current->pid, current->comm, print_tainted(),
11906+ task_pid_nr(current), current->comm, print_tainted(),
11907 init_utsname()->release,
11908 (int)strcspn(init_utsname()->version, " "),
11909 init_utsname()->version);
11910@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
11911 }
11912 EXPORT_SYMBOL_GPL(oops_begin);
11913
11914+extern void gr_handle_kernel_exploit(void);
11915+
11916 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11917 {
11918 if (regs && kexec_should_crash(current))
11919@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long fl
11920 panic("Fatal exception in interrupt");
11921 if (panic_on_oops)
11922 panic("Fatal exception");
11923- do_exit(signr);
11924+
11925+ gr_handle_kernel_exploit();
11926+
11927+ do_group_exit(signr);
11928 }
11929
11930 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11931@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, str
11932
11933 show_registers(regs);
11934 #ifdef CONFIG_X86_32
11935- if (user_mode_vm(regs)) {
11936+ if (user_mode(regs)) {
11937 sp = regs->sp;
11938 ss = regs->ss & 0xffff;
11939 } else {
11940@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs
11941 unsigned long flags = oops_begin();
11942 int sig = SIGSEGV;
11943
11944- if (!user_mode_vm(regs))
11945+ if (!user_mode(regs))
11946 report_bug(regs->ip, regs);
11947
11948 if (__die(str, regs, err))
11949diff -urNp linux-3.1.4/arch/x86/kernel/early_printk.c linux-3.1.4/arch/x86/kernel/early_printk.c
11950--- linux-3.1.4/arch/x86/kernel/early_printk.c 2011-11-11 15:19:27.000000000 -0500
11951+++ linux-3.1.4/arch/x86/kernel/early_printk.c 2011-11-16 18:40:08.000000000 -0500
11952@@ -7,6 +7,7 @@
11953 #include <linux/pci_regs.h>
11954 #include <linux/pci_ids.h>
11955 #include <linux/errno.h>
11956+#include <linux/sched.h>
11957 #include <asm/io.h>
11958 #include <asm/processor.h>
11959 #include <asm/fcntl.h>
11960@@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11961 int n;
11962 va_list ap;
11963
11964+ pax_track_stack();
11965+
11966 va_start(ap, fmt);
11967 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11968 early_console->write(early_console, buf, n);
11969diff -urNp linux-3.1.4/arch/x86/kernel/entry_32.S linux-3.1.4/arch/x86/kernel/entry_32.S
11970--- linux-3.1.4/arch/x86/kernel/entry_32.S 2011-11-11 15:19:27.000000000 -0500
11971+++ linux-3.1.4/arch/x86/kernel/entry_32.S 2011-11-16 18:40:08.000000000 -0500
11972@@ -186,13 +186,146 @@
11973 /*CFI_REL_OFFSET gs, PT_GS*/
11974 .endm
11975 .macro SET_KERNEL_GS reg
11976+
11977+#ifdef CONFIG_CC_STACKPROTECTOR
11978 movl $(__KERNEL_STACK_CANARY), \reg
11979+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11980+ movl $(__USER_DS), \reg
11981+#else
11982+ xorl \reg, \reg
11983+#endif
11984+
11985 movl \reg, %gs
11986 .endm
11987
11988 #endif /* CONFIG_X86_32_LAZY_GS */
11989
11990-.macro SAVE_ALL
11991+.macro pax_enter_kernel
11992+#ifdef CONFIG_PAX_KERNEXEC
11993+ call pax_enter_kernel
11994+#endif
11995+.endm
11996+
11997+.macro pax_exit_kernel
11998+#ifdef CONFIG_PAX_KERNEXEC
11999+ call pax_exit_kernel
12000+#endif
12001+.endm
12002+
12003+#ifdef CONFIG_PAX_KERNEXEC
12004+ENTRY(pax_enter_kernel)
12005+#ifdef CONFIG_PARAVIRT
12006+ pushl %eax
12007+ pushl %ecx
12008+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12009+ mov %eax, %esi
12010+#else
12011+ mov %cr0, %esi
12012+#endif
12013+ bts $16, %esi
12014+ jnc 1f
12015+ mov %cs, %esi
12016+ cmp $__KERNEL_CS, %esi
12017+ jz 3f
12018+ ljmp $__KERNEL_CS, $3f
12019+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
12020+2:
12021+#ifdef CONFIG_PARAVIRT
12022+ mov %esi, %eax
12023+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12024+#else
12025+ mov %esi, %cr0
12026+#endif
12027+3:
12028+#ifdef CONFIG_PARAVIRT
12029+ popl %ecx
12030+ popl %eax
12031+#endif
12032+ ret
12033+ENDPROC(pax_enter_kernel)
12034+
12035+ENTRY(pax_exit_kernel)
12036+#ifdef CONFIG_PARAVIRT
12037+ pushl %eax
12038+ pushl %ecx
12039+#endif
12040+ mov %cs, %esi
12041+ cmp $__KERNEXEC_KERNEL_CS, %esi
12042+ jnz 2f
12043+#ifdef CONFIG_PARAVIRT
12044+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
12045+ mov %eax, %esi
12046+#else
12047+ mov %cr0, %esi
12048+#endif
12049+ btr $16, %esi
12050+ ljmp $__KERNEL_CS, $1f
12051+1:
12052+#ifdef CONFIG_PARAVIRT
12053+ mov %esi, %eax
12054+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
12055+#else
12056+ mov %esi, %cr0
12057+#endif
12058+2:
12059+#ifdef CONFIG_PARAVIRT
12060+ popl %ecx
12061+ popl %eax
12062+#endif
12063+ ret
12064+ENDPROC(pax_exit_kernel)
12065+#endif
12066+
12067+.macro pax_erase_kstack
12068+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12069+ call pax_erase_kstack
12070+#endif
12071+.endm
12072+
12073+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12074+/*
12075+ * ebp: thread_info
12076+ * ecx, edx: can be clobbered
12077+ */
12078+ENTRY(pax_erase_kstack)
12079+ pushl %edi
12080+ pushl %eax
12081+
12082+ mov TI_lowest_stack(%ebp), %edi
12083+ mov $-0xBEEF, %eax
12084+ std
12085+
12086+1: mov %edi, %ecx
12087+ and $THREAD_SIZE_asm - 1, %ecx
12088+ shr $2, %ecx
12089+ repne scasl
12090+ jecxz 2f
12091+
12092+ cmp $2*16, %ecx
12093+ jc 2f
12094+
12095+ mov $2*16, %ecx
12096+ repe scasl
12097+ jecxz 2f
12098+ jne 1b
12099+
12100+2: cld
12101+ mov %esp, %ecx
12102+ sub %edi, %ecx
12103+ shr $2, %ecx
12104+ rep stosl
12105+
12106+ mov TI_task_thread_sp0(%ebp), %edi
12107+ sub $128, %edi
12108+ mov %edi, TI_lowest_stack(%ebp)
12109+
12110+ popl %eax
12111+ popl %edi
12112+ ret
12113+ENDPROC(pax_erase_kstack)
12114+#endif
12115+
12116+.macro __SAVE_ALL _DS
12117 cld
12118 PUSH_GS
12119 pushl_cfi %fs
12120@@ -215,7 +348,7 @@
12121 CFI_REL_OFFSET ecx, 0
12122 pushl_cfi %ebx
12123 CFI_REL_OFFSET ebx, 0
12124- movl $(__USER_DS), %edx
12125+ movl $\_DS, %edx
12126 movl %edx, %ds
12127 movl %edx, %es
12128 movl $(__KERNEL_PERCPU), %edx
12129@@ -223,6 +356,15 @@
12130 SET_KERNEL_GS %edx
12131 .endm
12132
12133+.macro SAVE_ALL
12134+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
12135+ __SAVE_ALL __KERNEL_DS
12136+ pax_enter_kernel
12137+#else
12138+ __SAVE_ALL __USER_DS
12139+#endif
12140+.endm
12141+
12142 .macro RESTORE_INT_REGS
12143 popl_cfi %ebx
12144 CFI_RESTORE ebx
12145@@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
12146 popfl_cfi
12147 jmp syscall_exit
12148 CFI_ENDPROC
12149-END(ret_from_fork)
12150+ENDPROC(ret_from_fork)
12151
12152 /*
12153 * Interrupt exit functions should be protected against kprobes
12154@@ -333,7 +475,15 @@ check_userspace:
12155 movb PT_CS(%esp), %al
12156 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
12157 cmpl $USER_RPL, %eax
12158+
12159+#ifdef CONFIG_PAX_KERNEXEC
12160+ jae resume_userspace
12161+
12162+ PAX_EXIT_KERNEL
12163+ jmp resume_kernel
12164+#else
12165 jb resume_kernel # not returning to v8086 or userspace
12166+#endif
12167
12168 ENTRY(resume_userspace)
12169 LOCKDEP_SYS_EXIT
12170@@ -345,8 +495,8 @@ ENTRY(resume_userspace)
12171 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
12172 # int/exception return?
12173 jne work_pending
12174- jmp restore_all
12175-END(ret_from_exception)
12176+ jmp restore_all_pax
12177+ENDPROC(ret_from_exception)
12178
12179 #ifdef CONFIG_PREEMPT
12180 ENTRY(resume_kernel)
12181@@ -361,7 +511,7 @@ need_resched:
12182 jz restore_all
12183 call preempt_schedule_irq
12184 jmp need_resched
12185-END(resume_kernel)
12186+ENDPROC(resume_kernel)
12187 #endif
12188 CFI_ENDPROC
12189 /*
12190@@ -395,23 +545,34 @@ sysenter_past_esp:
12191 /*CFI_REL_OFFSET cs, 0*/
12192 /*
12193 * Push current_thread_info()->sysenter_return to the stack.
12194- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
12195- * pushed above; +8 corresponds to copy_thread's esp0 setting.
12196 */
12197- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
12198+ pushl_cfi $0
12199 CFI_REL_OFFSET eip, 0
12200
12201 pushl_cfi %eax
12202 SAVE_ALL
12203+ GET_THREAD_INFO(%ebp)
12204+ movl TI_sysenter_return(%ebp),%ebp
12205+ movl %ebp,PT_EIP(%esp)
12206 ENABLE_INTERRUPTS(CLBR_NONE)
12207
12208 /*
12209 * Load the potential sixth argument from user stack.
12210 * Careful about security.
12211 */
12212+ movl PT_OLDESP(%esp),%ebp
12213+
12214+#ifdef CONFIG_PAX_MEMORY_UDEREF
12215+ mov PT_OLDSS(%esp),%ds
12216+1: movl %ds:(%ebp),%ebp
12217+ push %ss
12218+ pop %ds
12219+#else
12220 cmpl $__PAGE_OFFSET-3,%ebp
12221 jae syscall_fault
12222 1: movl (%ebp),%ebp
12223+#endif
12224+
12225 movl %ebp,PT_EBP(%esp)
12226 .section __ex_table,"a"
12227 .align 4
12228@@ -434,12 +595,24 @@ sysenter_do_call:
12229 testl $_TIF_ALLWORK_MASK, %ecx
12230 jne sysexit_audit
12231 sysenter_exit:
12232+
12233+#ifdef CONFIG_PAX_RANDKSTACK
12234+ pushl_cfi %eax
12235+ movl %esp, %eax
12236+ call pax_randomize_kstack
12237+ popl_cfi %eax
12238+#endif
12239+
12240+ pax_erase_kstack
12241+
12242 /* if something modifies registers it must also disable sysexit */
12243 movl PT_EIP(%esp), %edx
12244 movl PT_OLDESP(%esp), %ecx
12245 xorl %ebp,%ebp
12246 TRACE_IRQS_ON
12247 1: mov PT_FS(%esp), %fs
12248+2: mov PT_DS(%esp), %ds
12249+3: mov PT_ES(%esp), %es
12250 PTGS_TO_GS
12251 ENABLE_INTERRUPTS_SYSEXIT
12252
12253@@ -456,6 +629,9 @@ sysenter_audit:
12254 movl %eax,%edx /* 2nd arg: syscall number */
12255 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
12256 call audit_syscall_entry
12257+
12258+ pax_erase_kstack
12259+
12260 pushl_cfi %ebx
12261 movl PT_EAX(%esp),%eax /* reload syscall number */
12262 jmp sysenter_do_call
12263@@ -482,11 +658,17 @@ sysexit_audit:
12264
12265 CFI_ENDPROC
12266 .pushsection .fixup,"ax"
12267-2: movl $0,PT_FS(%esp)
12268+4: movl $0,PT_FS(%esp)
12269+ jmp 1b
12270+5: movl $0,PT_DS(%esp)
12271+ jmp 1b
12272+6: movl $0,PT_ES(%esp)
12273 jmp 1b
12274 .section __ex_table,"a"
12275 .align 4
12276- .long 1b,2b
12277+ .long 1b,4b
12278+ .long 2b,5b
12279+ .long 3b,6b
12280 .popsection
12281 PTGS_TO_GS_EX
12282 ENDPROC(ia32_sysenter_target)
12283@@ -519,6 +701,15 @@ syscall_exit:
12284 testl $_TIF_ALLWORK_MASK, %ecx # current->work
12285 jne syscall_exit_work
12286
12287+restore_all_pax:
12288+
12289+#ifdef CONFIG_PAX_RANDKSTACK
12290+ movl %esp, %eax
12291+ call pax_randomize_kstack
12292+#endif
12293+
12294+ pax_erase_kstack
12295+
12296 restore_all:
12297 TRACE_IRQS_IRET
12298 restore_all_notrace:
12299@@ -578,14 +769,34 @@ ldt_ss:
12300 * compensating for the offset by changing to the ESPFIX segment with
12301 * a base address that matches for the difference.
12302 */
12303-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
12304+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
12305 mov %esp, %edx /* load kernel esp */
12306 mov PT_OLDESP(%esp), %eax /* load userspace esp */
12307 mov %dx, %ax /* eax: new kernel esp */
12308 sub %eax, %edx /* offset (low word is 0) */
12309+#ifdef CONFIG_SMP
12310+ movl PER_CPU_VAR(cpu_number), %ebx
12311+ shll $PAGE_SHIFT_asm, %ebx
12312+ addl $cpu_gdt_table, %ebx
12313+#else
12314+ movl $cpu_gdt_table, %ebx
12315+#endif
12316 shr $16, %edx
12317- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
12318- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
12319+
12320+#ifdef CONFIG_PAX_KERNEXEC
12321+ mov %cr0, %esi
12322+ btr $16, %esi
12323+ mov %esi, %cr0
12324+#endif
12325+
12326+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
12327+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
12328+
12329+#ifdef CONFIG_PAX_KERNEXEC
12330+ bts $16, %esi
12331+ mov %esi, %cr0
12332+#endif
12333+
12334 pushl_cfi $__ESPFIX_SS
12335 pushl_cfi %eax /* new kernel esp */
12336 /* Disable interrupts, but do not irqtrace this section: we
12337@@ -614,34 +825,28 @@ work_resched:
12338 movl TI_flags(%ebp), %ecx
12339 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
12340 # than syscall tracing?
12341- jz restore_all
12342+ jz restore_all_pax
12343 testb $_TIF_NEED_RESCHED, %cl
12344 jnz work_resched
12345
12346 work_notifysig: # deal with pending signals and
12347 # notify-resume requests
12348+ movl %esp, %eax
12349 #ifdef CONFIG_VM86
12350 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
12351- movl %esp, %eax
12352- jne work_notifysig_v86 # returning to kernel-space or
12353+ jz 1f # returning to kernel-space or
12354 # vm86-space
12355- xorl %edx, %edx
12356- call do_notify_resume
12357- jmp resume_userspace_sig
12358
12359- ALIGN
12360-work_notifysig_v86:
12361 pushl_cfi %ecx # save ti_flags for do_notify_resume
12362 call save_v86_state # %eax contains pt_regs pointer
12363 popl_cfi %ecx
12364 movl %eax, %esp
12365-#else
12366- movl %esp, %eax
12367+1:
12368 #endif
12369 xorl %edx, %edx
12370 call do_notify_resume
12371 jmp resume_userspace_sig
12372-END(work_pending)
12373+ENDPROC(work_pending)
12374
12375 # perform syscall exit tracing
12376 ALIGN
12377@@ -649,11 +854,14 @@ syscall_trace_entry:
12378 movl $-ENOSYS,PT_EAX(%esp)
12379 movl %esp, %eax
12380 call syscall_trace_enter
12381+
12382+ pax_erase_kstack
12383+
12384 /* What it returned is what we'll actually use. */
12385 cmpl $(nr_syscalls), %eax
12386 jnae syscall_call
12387 jmp syscall_exit
12388-END(syscall_trace_entry)
12389+ENDPROC(syscall_trace_entry)
12390
12391 # perform syscall exit tracing
12392 ALIGN
12393@@ -666,20 +874,24 @@ syscall_exit_work:
12394 movl %esp, %eax
12395 call syscall_trace_leave
12396 jmp resume_userspace
12397-END(syscall_exit_work)
12398+ENDPROC(syscall_exit_work)
12399 CFI_ENDPROC
12400
12401 RING0_INT_FRAME # can't unwind into user space anyway
12402 syscall_fault:
12403+#ifdef CONFIG_PAX_MEMORY_UDEREF
12404+ push %ss
12405+ pop %ds
12406+#endif
12407 GET_THREAD_INFO(%ebp)
12408 movl $-EFAULT,PT_EAX(%esp)
12409 jmp resume_userspace
12410-END(syscall_fault)
12411+ENDPROC(syscall_fault)
12412
12413 syscall_badsys:
12414 movl $-ENOSYS,PT_EAX(%esp)
12415 jmp resume_userspace
12416-END(syscall_badsys)
12417+ENDPROC(syscall_badsys)
12418 CFI_ENDPROC
12419 /*
12420 * End of kprobes section
12421@@ -753,6 +965,36 @@ ptregs_clone:
12422 CFI_ENDPROC
12423 ENDPROC(ptregs_clone)
12424
12425+ ALIGN;
12426+ENTRY(kernel_execve)
12427+ CFI_STARTPROC
12428+ pushl_cfi %ebp
12429+ sub $PT_OLDSS+4,%esp
12430+ pushl_cfi %edi
12431+ pushl_cfi %ecx
12432+ pushl_cfi %eax
12433+ lea 3*4(%esp),%edi
12434+ mov $PT_OLDSS/4+1,%ecx
12435+ xorl %eax,%eax
12436+ rep stosl
12437+ popl_cfi %eax
12438+ popl_cfi %ecx
12439+ popl_cfi %edi
12440+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
12441+ pushl_cfi %esp
12442+ call sys_execve
12443+ add $4,%esp
12444+ CFI_ADJUST_CFA_OFFSET -4
12445+ GET_THREAD_INFO(%ebp)
12446+ test %eax,%eax
12447+ jz syscall_exit
12448+ add $PT_OLDSS+4,%esp
12449+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
12450+ popl_cfi %ebp
12451+ ret
12452+ CFI_ENDPROC
12453+ENDPROC(kernel_execve)
12454+
12455 .macro FIXUP_ESPFIX_STACK
12456 /*
12457 * Switch back for ESPFIX stack to the normal zerobased stack
12458@@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
12459 * normal stack and adjusts ESP with the matching offset.
12460 */
12461 /* fixup the stack */
12462- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
12463- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
12464+#ifdef CONFIG_SMP
12465+ movl PER_CPU_VAR(cpu_number), %ebx
12466+ shll $PAGE_SHIFT_asm, %ebx
12467+ addl $cpu_gdt_table, %ebx
12468+#else
12469+ movl $cpu_gdt_table, %ebx
12470+#endif
12471+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
12472+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
12473 shl $16, %eax
12474 addl %esp, %eax /* the adjusted stack pointer */
12475 pushl_cfi $__KERNEL_DS
12476@@ -816,7 +1065,7 @@ vector=vector+1
12477 .endr
12478 2: jmp common_interrupt
12479 .endr
12480-END(irq_entries_start)
12481+ENDPROC(irq_entries_start)
12482
12483 .previous
12484 END(interrupt)
12485@@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
12486 pushl_cfi $do_coprocessor_error
12487 jmp error_code
12488 CFI_ENDPROC
12489-END(coprocessor_error)
12490+ENDPROC(coprocessor_error)
12491
12492 ENTRY(simd_coprocessor_error)
12493 RING0_INT_FRAME
12494@@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
12495 #endif
12496 jmp error_code
12497 CFI_ENDPROC
12498-END(simd_coprocessor_error)
12499+ENDPROC(simd_coprocessor_error)
12500
12501 ENTRY(device_not_available)
12502 RING0_INT_FRAME
12503@@ -893,7 +1142,7 @@ ENTRY(device_not_available)
12504 pushl_cfi $do_device_not_available
12505 jmp error_code
12506 CFI_ENDPROC
12507-END(device_not_available)
12508+ENDPROC(device_not_available)
12509
12510 #ifdef CONFIG_PARAVIRT
12511 ENTRY(native_iret)
12512@@ -902,12 +1151,12 @@ ENTRY(native_iret)
12513 .align 4
12514 .long native_iret, iret_exc
12515 .previous
12516-END(native_iret)
12517+ENDPROC(native_iret)
12518
12519 ENTRY(native_irq_enable_sysexit)
12520 sti
12521 sysexit
12522-END(native_irq_enable_sysexit)
12523+ENDPROC(native_irq_enable_sysexit)
12524 #endif
12525
12526 ENTRY(overflow)
12527@@ -916,7 +1165,7 @@ ENTRY(overflow)
12528 pushl_cfi $do_overflow
12529 jmp error_code
12530 CFI_ENDPROC
12531-END(overflow)
12532+ENDPROC(overflow)
12533
12534 ENTRY(bounds)
12535 RING0_INT_FRAME
12536@@ -924,7 +1173,7 @@ ENTRY(bounds)
12537 pushl_cfi $do_bounds
12538 jmp error_code
12539 CFI_ENDPROC
12540-END(bounds)
12541+ENDPROC(bounds)
12542
12543 ENTRY(invalid_op)
12544 RING0_INT_FRAME
12545@@ -932,7 +1181,7 @@ ENTRY(invalid_op)
12546 pushl_cfi $do_invalid_op
12547 jmp error_code
12548 CFI_ENDPROC
12549-END(invalid_op)
12550+ENDPROC(invalid_op)
12551
12552 ENTRY(coprocessor_segment_overrun)
12553 RING0_INT_FRAME
12554@@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
12555 pushl_cfi $do_coprocessor_segment_overrun
12556 jmp error_code
12557 CFI_ENDPROC
12558-END(coprocessor_segment_overrun)
12559+ENDPROC(coprocessor_segment_overrun)
12560
12561 ENTRY(invalid_TSS)
12562 RING0_EC_FRAME
12563 pushl_cfi $do_invalid_TSS
12564 jmp error_code
12565 CFI_ENDPROC
12566-END(invalid_TSS)
12567+ENDPROC(invalid_TSS)
12568
12569 ENTRY(segment_not_present)
12570 RING0_EC_FRAME
12571 pushl_cfi $do_segment_not_present
12572 jmp error_code
12573 CFI_ENDPROC
12574-END(segment_not_present)
12575+ENDPROC(segment_not_present)
12576
12577 ENTRY(stack_segment)
12578 RING0_EC_FRAME
12579 pushl_cfi $do_stack_segment
12580 jmp error_code
12581 CFI_ENDPROC
12582-END(stack_segment)
12583+ENDPROC(stack_segment)
12584
12585 ENTRY(alignment_check)
12586 RING0_EC_FRAME
12587 pushl_cfi $do_alignment_check
12588 jmp error_code
12589 CFI_ENDPROC
12590-END(alignment_check)
12591+ENDPROC(alignment_check)
12592
12593 ENTRY(divide_error)
12594 RING0_INT_FRAME
12595@@ -976,7 +1225,7 @@ ENTRY(divide_error)
12596 pushl_cfi $do_divide_error
12597 jmp error_code
12598 CFI_ENDPROC
12599-END(divide_error)
12600+ENDPROC(divide_error)
12601
12602 #ifdef CONFIG_X86_MCE
12603 ENTRY(machine_check)
12604@@ -985,7 +1234,7 @@ ENTRY(machine_check)
12605 pushl_cfi machine_check_vector
12606 jmp error_code
12607 CFI_ENDPROC
12608-END(machine_check)
12609+ENDPROC(machine_check)
12610 #endif
12611
12612 ENTRY(spurious_interrupt_bug)
12613@@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
12614 pushl_cfi $do_spurious_interrupt_bug
12615 jmp error_code
12616 CFI_ENDPROC
12617-END(spurious_interrupt_bug)
12618+ENDPROC(spurious_interrupt_bug)
12619 /*
12620 * End of kprobes section
12621 */
12622@@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector
12623
12624 ENTRY(mcount)
12625 ret
12626-END(mcount)
12627+ENDPROC(mcount)
12628
12629 ENTRY(ftrace_caller)
12630 cmpl $0, function_trace_stop
12631@@ -1138,7 +1387,7 @@ ftrace_graph_call:
12632 .globl ftrace_stub
12633 ftrace_stub:
12634 ret
12635-END(ftrace_caller)
12636+ENDPROC(ftrace_caller)
12637
12638 #else /* ! CONFIG_DYNAMIC_FTRACE */
12639
12640@@ -1174,7 +1423,7 @@ trace:
12641 popl %ecx
12642 popl %eax
12643 jmp ftrace_stub
12644-END(mcount)
12645+ENDPROC(mcount)
12646 #endif /* CONFIG_DYNAMIC_FTRACE */
12647 #endif /* CONFIG_FUNCTION_TRACER */
12648
12649@@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
12650 popl %ecx
12651 popl %eax
12652 ret
12653-END(ftrace_graph_caller)
12654+ENDPROC(ftrace_graph_caller)
12655
12656 .globl return_to_handler
12657 return_to_handler:
12658@@ -1209,7 +1458,6 @@ return_to_handler:
12659 jmp *%ecx
12660 #endif
12661
12662-.section .rodata,"a"
12663 #include "syscall_table_32.S"
12664
12665 syscall_table_size=(.-sys_call_table)
12666@@ -1255,15 +1503,18 @@ error_code:
12667 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
12668 REG_TO_PTGS %ecx
12669 SET_KERNEL_GS %ecx
12670- movl $(__USER_DS), %ecx
12671+ movl $(__KERNEL_DS), %ecx
12672 movl %ecx, %ds
12673 movl %ecx, %es
12674+
12675+ pax_enter_kernel
12676+
12677 TRACE_IRQS_OFF
12678 movl %esp,%eax # pt_regs pointer
12679 call *%edi
12680 jmp ret_from_exception
12681 CFI_ENDPROC
12682-END(page_fault)
12683+ENDPROC(page_fault)
12684
12685 /*
12686 * Debug traps and NMI can happen at the one SYSENTER instruction
12687@@ -1305,7 +1556,7 @@ debug_stack_correct:
12688 call do_debug
12689 jmp ret_from_exception
12690 CFI_ENDPROC
12691-END(debug)
12692+ENDPROC(debug)
12693
12694 /*
12695 * NMI is doubly nasty. It can happen _while_ we're handling
12696@@ -1342,6 +1593,9 @@ nmi_stack_correct:
12697 xorl %edx,%edx # zero error code
12698 movl %esp,%eax # pt_regs pointer
12699 call do_nmi
12700+
12701+ pax_exit_kernel
12702+
12703 jmp restore_all_notrace
12704 CFI_ENDPROC
12705
12706@@ -1378,12 +1632,15 @@ nmi_espfix_stack:
12707 FIXUP_ESPFIX_STACK # %eax == %esp
12708 xorl %edx,%edx # zero error code
12709 call do_nmi
12710+
12711+ pax_exit_kernel
12712+
12713 RESTORE_REGS
12714 lss 12+4(%esp), %esp # back to espfix stack
12715 CFI_ADJUST_CFA_OFFSET -24
12716 jmp irq_return
12717 CFI_ENDPROC
12718-END(nmi)
12719+ENDPROC(nmi)
12720
12721 ENTRY(int3)
12722 RING0_INT_FRAME
12723@@ -1395,14 +1652,14 @@ ENTRY(int3)
12724 call do_int3
12725 jmp ret_from_exception
12726 CFI_ENDPROC
12727-END(int3)
12728+ENDPROC(int3)
12729
12730 ENTRY(general_protection)
12731 RING0_EC_FRAME
12732 pushl_cfi $do_general_protection
12733 jmp error_code
12734 CFI_ENDPROC
12735-END(general_protection)
12736+ENDPROC(general_protection)
12737
12738 #ifdef CONFIG_KVM_GUEST
12739 ENTRY(async_page_fault)
12740@@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
12741 pushl_cfi $do_async_page_fault
12742 jmp error_code
12743 CFI_ENDPROC
12744-END(async_page_fault)
12745+ENDPROC(async_page_fault)
12746 #endif
12747
12748 /*
12749diff -urNp linux-3.1.4/arch/x86/kernel/entry_64.S linux-3.1.4/arch/x86/kernel/entry_64.S
12750--- linux-3.1.4/arch/x86/kernel/entry_64.S 2011-11-11 15:19:27.000000000 -0500
12751+++ linux-3.1.4/arch/x86/kernel/entry_64.S 2011-12-02 17:40:13.000000000 -0500
12752@@ -55,6 +55,8 @@
12753 #include <asm/paravirt.h>
12754 #include <asm/ftrace.h>
12755 #include <asm/percpu.h>
12756+#include <asm/pgtable.h>
12757+#include <asm/alternative-asm.h>
12758
12759 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
12760 #include <linux/elf-em.h>
12761@@ -68,8 +70,9 @@
12762 #ifdef CONFIG_FUNCTION_TRACER
12763 #ifdef CONFIG_DYNAMIC_FTRACE
12764 ENTRY(mcount)
12765+ pax_force_retaddr
12766 retq
12767-END(mcount)
12768+ENDPROC(mcount)
12769
12770 ENTRY(ftrace_caller)
12771 cmpl $0, function_trace_stop
12772@@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
12773 #endif
12774
12775 GLOBAL(ftrace_stub)
12776+ pax_force_retaddr
12777 retq
12778-END(ftrace_caller)
12779+ENDPROC(ftrace_caller)
12780
12781 #else /* ! CONFIG_DYNAMIC_FTRACE */
12782 ENTRY(mcount)
12783@@ -112,6 +116,7 @@ ENTRY(mcount)
12784 #endif
12785
12786 GLOBAL(ftrace_stub)
12787+ pax_force_retaddr
12788 retq
12789
12790 trace:
12791@@ -121,12 +126,13 @@ trace:
12792 movq 8(%rbp), %rsi
12793 subq $MCOUNT_INSN_SIZE, %rdi
12794
12795+ pax_force_fptr ftrace_trace_function
12796 call *ftrace_trace_function
12797
12798 MCOUNT_RESTORE_FRAME
12799
12800 jmp ftrace_stub
12801-END(mcount)
12802+ENDPROC(mcount)
12803 #endif /* CONFIG_DYNAMIC_FTRACE */
12804 #endif /* CONFIG_FUNCTION_TRACER */
12805
12806@@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
12807
12808 MCOUNT_RESTORE_FRAME
12809
12810+ pax_force_retaddr
12811 retq
12812-END(ftrace_graph_caller)
12813+ENDPROC(ftrace_graph_caller)
12814
12815 GLOBAL(return_to_handler)
12816 subq $24, %rsp
12817@@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
12818 movq 8(%rsp), %rdx
12819 movq (%rsp), %rax
12820 addq $24, %rsp
12821+ pax_force_fptr %rdi
12822 jmp *%rdi
12823 #endif
12824
12825@@ -178,6 +186,282 @@ ENTRY(native_usergs_sysret64)
12826 ENDPROC(native_usergs_sysret64)
12827 #endif /* CONFIG_PARAVIRT */
12828
12829+ .macro ljmpq sel, off
12830+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
12831+ .byte 0x48; ljmp *1234f(%rip)
12832+ .pushsection .rodata
12833+ .align 16
12834+ 1234: .quad \off; .word \sel
12835+ .popsection
12836+#else
12837+ pushq $\sel
12838+ pushq $\off
12839+ lretq
12840+#endif
12841+ .endm
12842+
12843+ .macro pax_enter_kernel
12844+ pax_set_fptr_mask
12845+#ifdef CONFIG_PAX_KERNEXEC
12846+ call pax_enter_kernel
12847+#endif
12848+ .endm
12849+
12850+ .macro pax_exit_kernel
12851+#ifdef CONFIG_PAX_KERNEXEC
12852+ call pax_exit_kernel
12853+#endif
12854+ .endm
12855+
12856+#ifdef CONFIG_PAX_KERNEXEC
12857+ENTRY(pax_enter_kernel)
12858+ pushq %rdi
12859+
12860+#ifdef CONFIG_PARAVIRT
12861+ PV_SAVE_REGS(CLBR_RDI)
12862+#endif
12863+
12864+ GET_CR0_INTO_RDI
12865+ bts $16,%rdi
12866+ jnc 3f
12867+ mov %cs,%edi
12868+ cmp $__KERNEL_CS,%edi
12869+ jnz 2f
12870+1:
12871+
12872+#ifdef CONFIG_PARAVIRT
12873+ PV_RESTORE_REGS(CLBR_RDI)
12874+#endif
12875+
12876+ popq %rdi
12877+ pax_force_retaddr
12878+ retq
12879+
12880+2: ljmpq __KERNEL_CS,1f
12881+3: ljmpq __KERNEXEC_KERNEL_CS,4f
12882+4: SET_RDI_INTO_CR0
12883+ jmp 1b
12884+ENDPROC(pax_enter_kernel)
12885+
12886+ENTRY(pax_exit_kernel)
12887+ pushq %rdi
12888+
12889+#ifdef CONFIG_PARAVIRT
12890+ PV_SAVE_REGS(CLBR_RDI)
12891+#endif
12892+
12893+ mov %cs,%rdi
12894+ cmp $__KERNEXEC_KERNEL_CS,%edi
12895+ jz 2f
12896+1:
12897+
12898+#ifdef CONFIG_PARAVIRT
12899+ PV_RESTORE_REGS(CLBR_RDI);
12900+#endif
12901+
12902+ popq %rdi
12903+ pax_force_retaddr
12904+ retq
12905+
12906+2: GET_CR0_INTO_RDI
12907+ btr $16,%rdi
12908+ ljmpq __KERNEL_CS,3f
12909+3: SET_RDI_INTO_CR0
12910+ jmp 1b
12911+#ifdef CONFIG_PARAVIRT
12912+ PV_RESTORE_REGS(CLBR_RDI);
12913+#endif
12914+
12915+ popq %rdi
12916+ pax_force_retaddr
12917+ retq
12918+ENDPROC(pax_exit_kernel)
12919+#endif
12920+
12921+ .macro pax_enter_kernel_user
12922+ pax_set_fptr_mask
12923+#ifdef CONFIG_PAX_MEMORY_UDEREF
12924+ call pax_enter_kernel_user
12925+#endif
12926+ .endm
12927+
12928+ .macro pax_exit_kernel_user
12929+#ifdef CONFIG_PAX_MEMORY_UDEREF
12930+ call pax_exit_kernel_user
12931+#endif
12932+#ifdef CONFIG_PAX_RANDKSTACK
12933+ push %rax
12934+ call pax_randomize_kstack
12935+ pop %rax
12936+#endif
12937+ .endm
12938+
12939+#ifdef CONFIG_PAX_MEMORY_UDEREF
12940+ENTRY(pax_enter_kernel_user)
12941+ pushq %rdi
12942+ pushq %rbx
12943+
12944+#ifdef CONFIG_PARAVIRT
12945+ PV_SAVE_REGS(CLBR_RDI)
12946+#endif
12947+
12948+ GET_CR3_INTO_RDI
12949+ mov %rdi,%rbx
12950+ add $__START_KERNEL_map,%rbx
12951+ sub phys_base(%rip),%rbx
12952+
12953+#ifdef CONFIG_PARAVIRT
12954+ pushq %rdi
12955+ cmpl $0, pv_info+PARAVIRT_enabled
12956+ jz 1f
12957+ i = 0
12958+ .rept USER_PGD_PTRS
12959+ mov i*8(%rbx),%rsi
12960+ mov $0,%sil
12961+ lea i*8(%rbx),%rdi
12962+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
12963+ i = i + 1
12964+ .endr
12965+ jmp 2f
12966+1:
12967+#endif
12968+
12969+ i = 0
12970+ .rept USER_PGD_PTRS
12971+ movb $0,i*8(%rbx)
12972+ i = i + 1
12973+ .endr
12974+
12975+#ifdef CONFIG_PARAVIRT
12976+2: popq %rdi
12977+#endif
12978+ SET_RDI_INTO_CR3
12979+
12980+#ifdef CONFIG_PAX_KERNEXEC
12981+ GET_CR0_INTO_RDI
12982+ bts $16,%rdi
12983+ SET_RDI_INTO_CR0
12984+#endif
12985+
12986+#ifdef CONFIG_PARAVIRT
12987+ PV_RESTORE_REGS(CLBR_RDI)
12988+#endif
12989+
12990+ popq %rbx
12991+ popq %rdi
12992+ pax_force_retaddr
12993+ retq
12994+ENDPROC(pax_enter_kernel_user)
12995+
12996+ENTRY(pax_exit_kernel_user)
12997+ push %rdi
12998+
12999+#ifdef CONFIG_PARAVIRT
13000+ pushq %rbx
13001+ PV_SAVE_REGS(CLBR_RDI)
13002+#endif
13003+
13004+#ifdef CONFIG_PAX_KERNEXEC
13005+ GET_CR0_INTO_RDI
13006+ btr $16,%rdi
13007+ SET_RDI_INTO_CR0
13008+#endif
13009+
13010+ GET_CR3_INTO_RDI
13011+ add $__START_KERNEL_map,%rdi
13012+ sub phys_base(%rip),%rdi
13013+
13014+#ifdef CONFIG_PARAVIRT
13015+ cmpl $0, pv_info+PARAVIRT_enabled
13016+ jz 1f
13017+ mov %rdi,%rbx
13018+ i = 0
13019+ .rept USER_PGD_PTRS
13020+ mov i*8(%rbx),%rsi
13021+ mov $0x67,%sil
13022+ lea i*8(%rbx),%rdi
13023+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13024+ i = i + 1
13025+ .endr
13026+ jmp 2f
13027+1:
13028+#endif
13029+
13030+ i = 0
13031+ .rept USER_PGD_PTRS
13032+ movb $0x67,i*8(%rdi)
13033+ i = i + 1
13034+ .endr
13035+
13036+#ifdef CONFIG_PARAVIRT
13037+2: PV_RESTORE_REGS(CLBR_RDI)
13038+ popq %rbx
13039+#endif
13040+
13041+ popq %rdi
13042+ pax_force_retaddr
13043+ retq
13044+ENDPROC(pax_exit_kernel_user)
13045+#endif
13046+
13047+.macro pax_erase_kstack
13048+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13049+ call pax_erase_kstack
13050+#endif
13051+.endm
13052+
13053+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13054+/*
13055+ * r11: thread_info
13056+ * rcx, rdx: can be clobbered
13057+ */
13058+ENTRY(pax_erase_kstack)
13059+ pushq %rdi
13060+ pushq %rax
13061+ pushq %r11
13062+
13063+ GET_THREAD_INFO(%r11)
13064+ mov TI_lowest_stack(%r11), %rdi
13065+ mov $-0xBEEF, %rax
13066+ std
13067+
13068+1: mov %edi, %ecx
13069+ and $THREAD_SIZE_asm - 1, %ecx
13070+ shr $3, %ecx
13071+ repne scasq
13072+ jecxz 2f
13073+
13074+ cmp $2*8, %ecx
13075+ jc 2f
13076+
13077+ mov $2*8, %ecx
13078+ repe scasq
13079+ jecxz 2f
13080+ jne 1b
13081+
13082+2: cld
13083+ mov %esp, %ecx
13084+ sub %edi, %ecx
13085+
13086+ cmp $THREAD_SIZE_asm, %rcx
13087+ jb 3f
13088+ ud2
13089+3:
13090+
13091+ shr $3, %ecx
13092+ rep stosq
13093+
13094+ mov TI_task_thread_sp0(%r11), %rdi
13095+ sub $256, %rdi
13096+ mov %rdi, TI_lowest_stack(%r11)
13097+
13098+ popq %r11
13099+ popq %rax
13100+ popq %rdi
13101+ pax_force_retaddr
13102+ ret
13103+ENDPROC(pax_erase_kstack)
13104+#endif
13105
13106 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13107 #ifdef CONFIG_TRACE_IRQFLAGS
13108@@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64)
13109 movq %rsp, %rsi
13110
13111 leaq -RBP(%rsp),%rdi /* arg1 for handler */
13112- testl $3, CS(%rdi)
13113+ testb $3, CS(%rdi)
13114 je 1f
13115 SWAPGS
13116 /*
13117@@ -350,9 +634,10 @@ ENTRY(save_rest)
13118 movq_cfi r15, R15+16
13119 movq %r11, 8(%rsp) /* return address */
13120 FIXUP_TOP_OF_STACK %r11, 16
13121+ pax_force_retaddr
13122 ret
13123 CFI_ENDPROC
13124-END(save_rest)
13125+ENDPROC(save_rest)
13126
13127 /* save complete stack frame */
13128 .pushsection .kprobes.text, "ax"
13129@@ -381,9 +666,10 @@ ENTRY(save_paranoid)
13130 js 1f /* negative -> in kernel */
13131 SWAPGS
13132 xorl %ebx,%ebx
13133-1: ret
13134+1: pax_force_retaddr_bts
13135+ ret
13136 CFI_ENDPROC
13137-END(save_paranoid)
13138+ENDPROC(save_paranoid)
13139 .popsection
13140
13141 /*
13142@@ -405,7 +691,7 @@ ENTRY(ret_from_fork)
13143
13144 RESTORE_REST
13145
13146- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13147+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13148 je int_ret_from_sys_call
13149
13150 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13151@@ -415,7 +701,7 @@ ENTRY(ret_from_fork)
13152 jmp ret_from_sys_call # go to the SYSRET fastpath
13153
13154 CFI_ENDPROC
13155-END(ret_from_fork)
13156+ENDPROC(ret_from_fork)
13157
13158 /*
13159 * System call entry. Up to 6 arguments in registers are supported.
13160@@ -451,7 +737,7 @@ END(ret_from_fork)
13161 ENTRY(system_call)
13162 CFI_STARTPROC simple
13163 CFI_SIGNAL_FRAME
13164- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13165+ CFI_DEF_CFA rsp,0
13166 CFI_REGISTER rip,rcx
13167 /*CFI_REGISTER rflags,r11*/
13168 SWAPGS_UNSAFE_STACK
13169@@ -464,12 +750,13 @@ ENTRY(system_call_after_swapgs)
13170
13171 movq %rsp,PER_CPU_VAR(old_rsp)
13172 movq PER_CPU_VAR(kernel_stack),%rsp
13173+ SAVE_ARGS 8*6,0
13174+ pax_enter_kernel_user
13175 /*
13176 * No need to follow this irqs off/on section - it's straight
13177 * and short:
13178 */
13179 ENABLE_INTERRUPTS(CLBR_NONE)
13180- SAVE_ARGS 8,0
13181 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13182 movq %rcx,RIP-ARGOFFSET(%rsp)
13183 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13184@@ -479,7 +766,7 @@ ENTRY(system_call_after_swapgs)
13185 system_call_fastpath:
13186 cmpq $__NR_syscall_max,%rax
13187 ja badsys
13188- movq %r10,%rcx
13189+ movq R10-ARGOFFSET(%rsp),%rcx
13190 call *sys_call_table(,%rax,8) # XXX: rip relative
13191 movq %rax,RAX-ARGOFFSET(%rsp)
13192 /*
13193@@ -498,6 +785,8 @@ sysret_check:
13194 andl %edi,%edx
13195 jnz sysret_careful
13196 CFI_REMEMBER_STATE
13197+ pax_exit_kernel_user
13198+ pax_erase_kstack
13199 /*
13200 * sysretq will re-enable interrupts:
13201 */
13202@@ -549,14 +838,18 @@ badsys:
13203 * jump back to the normal fast path.
13204 */
13205 auditsys:
13206- movq %r10,%r9 /* 6th arg: 4th syscall arg */
13207+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
13208 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
13209 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
13210 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
13211 movq %rax,%rsi /* 2nd arg: syscall number */
13212 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13213 call audit_syscall_entry
13214+
13215+ pax_erase_kstack
13216+
13217 LOAD_ARGS 0 /* reload call-clobbered registers */
13218+ pax_set_fptr_mask
13219 jmp system_call_fastpath
13220
13221 /*
13222@@ -586,16 +879,20 @@ tracesys:
13223 FIXUP_TOP_OF_STACK %rdi
13224 movq %rsp,%rdi
13225 call syscall_trace_enter
13226+
13227+ pax_erase_kstack
13228+
13229 /*
13230 * Reload arg registers from stack in case ptrace changed them.
13231 * We don't reload %rax because syscall_trace_enter() returned
13232 * the value it wants us to use in the table lookup.
13233 */
13234 LOAD_ARGS ARGOFFSET, 1
13235+ pax_set_fptr_mask
13236 RESTORE_REST
13237 cmpq $__NR_syscall_max,%rax
13238 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
13239- movq %r10,%rcx /* fixup for C */
13240+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
13241 call *sys_call_table(,%rax,8)
13242 movq %rax,RAX-ARGOFFSET(%rsp)
13243 /* Use IRET because user could have changed frame */
13244@@ -607,7 +904,7 @@ tracesys:
13245 GLOBAL(int_ret_from_sys_call)
13246 DISABLE_INTERRUPTS(CLBR_NONE)
13247 TRACE_IRQS_OFF
13248- testl $3,CS-ARGOFFSET(%rsp)
13249+ testb $3,CS-ARGOFFSET(%rsp)
13250 je retint_restore_args
13251 movl $_TIF_ALLWORK_MASK,%edi
13252 /* edi: mask to check */
13253@@ -664,7 +961,7 @@ int_restore_rest:
13254 TRACE_IRQS_OFF
13255 jmp int_with_check
13256 CFI_ENDPROC
13257-END(system_call)
13258+ENDPROC(system_call)
13259
13260 /*
13261 * Certain special system calls that need to save a complete full stack frame.
13262@@ -680,7 +977,7 @@ ENTRY(\label)
13263 call \func
13264 jmp ptregscall_common
13265 CFI_ENDPROC
13266-END(\label)
13267+ENDPROC(\label)
13268 .endm
13269
13270 PTREGSCALL stub_clone, sys_clone, %r8
13271@@ -698,9 +995,10 @@ ENTRY(ptregscall_common)
13272 movq_cfi_restore R12+8, r12
13273 movq_cfi_restore RBP+8, rbp
13274 movq_cfi_restore RBX+8, rbx
13275+ pax_force_retaddr
13276 ret $REST_SKIP /* pop extended registers */
13277 CFI_ENDPROC
13278-END(ptregscall_common)
13279+ENDPROC(ptregscall_common)
13280
13281 ENTRY(stub_execve)
13282 CFI_STARTPROC
13283@@ -715,7 +1013,7 @@ ENTRY(stub_execve)
13284 RESTORE_REST
13285 jmp int_ret_from_sys_call
13286 CFI_ENDPROC
13287-END(stub_execve)
13288+ENDPROC(stub_execve)
13289
13290 /*
13291 * sigreturn is special because it needs to restore all registers on return.
13292@@ -733,7 +1031,7 @@ ENTRY(stub_rt_sigreturn)
13293 RESTORE_REST
13294 jmp int_ret_from_sys_call
13295 CFI_ENDPROC
13296-END(stub_rt_sigreturn)
13297+ENDPROC(stub_rt_sigreturn)
13298
13299 /*
13300 * Build the entry stubs and pointer table with some assembler magic.
13301@@ -768,7 +1066,7 @@ vector=vector+1
13302 2: jmp common_interrupt
13303 .endr
13304 CFI_ENDPROC
13305-END(irq_entries_start)
13306+ENDPROC(irq_entries_start)
13307
13308 .previous
13309 END(interrupt)
13310@@ -789,6 +1087,16 @@ END(interrupt)
13311 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
13312 SAVE_ARGS_IRQ
13313 PARTIAL_FRAME 0
13314+#ifdef CONFIG_PAX_MEMORY_UDEREF
13315+ testb $3, CS(%rdi)
13316+ jnz 1f
13317+ pax_enter_kernel
13318+ jmp 2f
13319+1: pax_enter_kernel_user
13320+2:
13321+#else
13322+ pax_enter_kernel
13323+#endif
13324 call \func
13325 .endm
13326
13327@@ -820,7 +1128,7 @@ ret_from_intr:
13328
13329 exit_intr:
13330 GET_THREAD_INFO(%rcx)
13331- testl $3,CS-ARGOFFSET(%rsp)
13332+ testb $3,CS-ARGOFFSET(%rsp)
13333 je retint_kernel
13334
13335 /* Interrupt came from user space */
13336@@ -842,12 +1150,16 @@ retint_swapgs: /* return to user-space
13337 * The iretq could re-enable interrupts:
13338 */
13339 DISABLE_INTERRUPTS(CLBR_ANY)
13340+ pax_exit_kernel_user
13341+ pax_erase_kstack
13342 TRACE_IRQS_IRETQ
13343 SWAPGS
13344 jmp restore_args
13345
13346 retint_restore_args: /* return to kernel space */
13347 DISABLE_INTERRUPTS(CLBR_ANY)
13348+ pax_exit_kernel
13349+ pax_force_retaddr RIP-ARGOFFSET
13350 /*
13351 * The iretq could re-enable interrupts:
13352 */
13353@@ -936,7 +1248,7 @@ ENTRY(retint_kernel)
13354 #endif
13355
13356 CFI_ENDPROC
13357-END(common_interrupt)
13358+ENDPROC(common_interrupt)
13359 /*
13360 * End of kprobes section
13361 */
13362@@ -952,7 +1264,7 @@ ENTRY(\sym)
13363 interrupt \do_sym
13364 jmp ret_from_intr
13365 CFI_ENDPROC
13366-END(\sym)
13367+ENDPROC(\sym)
13368 .endm
13369
13370 #ifdef CONFIG_SMP
13371@@ -1017,12 +1329,22 @@ ENTRY(\sym)
13372 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
13373 call error_entry
13374 DEFAULT_FRAME 0
13375+#ifdef CONFIG_PAX_MEMORY_UDEREF
13376+ testb $3, CS(%rsp)
13377+ jnz 1f
13378+ pax_enter_kernel
13379+ jmp 2f
13380+1: pax_enter_kernel_user
13381+2:
13382+#else
13383+ pax_enter_kernel
13384+#endif
13385 movq %rsp,%rdi /* pt_regs pointer */
13386 xorl %esi,%esi /* no error code */
13387 call \do_sym
13388 jmp error_exit /* %ebx: no swapgs flag */
13389 CFI_ENDPROC
13390-END(\sym)
13391+ENDPROC(\sym)
13392 .endm
13393
13394 .macro paranoidzeroentry sym do_sym
13395@@ -1034,15 +1356,25 @@ ENTRY(\sym)
13396 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
13397 call save_paranoid
13398 TRACE_IRQS_OFF
13399+#ifdef CONFIG_PAX_MEMORY_UDEREF
13400+ testb $3, CS(%rsp)
13401+ jnz 1f
13402+ pax_enter_kernel
13403+ jmp 2f
13404+1: pax_enter_kernel_user
13405+2:
13406+#else
13407+ pax_enter_kernel
13408+#endif
13409 movq %rsp,%rdi /* pt_regs pointer */
13410 xorl %esi,%esi /* no error code */
13411 call \do_sym
13412 jmp paranoid_exit /* %ebx: no swapgs flag */
13413 CFI_ENDPROC
13414-END(\sym)
13415+ENDPROC(\sym)
13416 .endm
13417
13418-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
13419+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
13420 .macro paranoidzeroentry_ist sym do_sym ist
13421 ENTRY(\sym)
13422 INTR_FRAME
13423@@ -1052,14 +1384,30 @@ ENTRY(\sym)
13424 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
13425 call save_paranoid
13426 TRACE_IRQS_OFF
13427+#ifdef CONFIG_PAX_MEMORY_UDEREF
13428+ testb $3, CS(%rsp)
13429+ jnz 1f
13430+ pax_enter_kernel
13431+ jmp 2f
13432+1: pax_enter_kernel_user
13433+2:
13434+#else
13435+ pax_enter_kernel
13436+#endif
13437 movq %rsp,%rdi /* pt_regs pointer */
13438 xorl %esi,%esi /* no error code */
13439+#ifdef CONFIG_SMP
13440+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
13441+ lea init_tss(%r12), %r12
13442+#else
13443+ lea init_tss(%rip), %r12
13444+#endif
13445 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
13446 call \do_sym
13447 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
13448 jmp paranoid_exit /* %ebx: no swapgs flag */
13449 CFI_ENDPROC
13450-END(\sym)
13451+ENDPROC(\sym)
13452 .endm
13453
13454 .macro errorentry sym do_sym
13455@@ -1070,13 +1418,23 @@ ENTRY(\sym)
13456 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
13457 call error_entry
13458 DEFAULT_FRAME 0
13459+#ifdef CONFIG_PAX_MEMORY_UDEREF
13460+ testb $3, CS(%rsp)
13461+ jnz 1f
13462+ pax_enter_kernel
13463+ jmp 2f
13464+1: pax_enter_kernel_user
13465+2:
13466+#else
13467+ pax_enter_kernel
13468+#endif
13469 movq %rsp,%rdi /* pt_regs pointer */
13470 movq ORIG_RAX(%rsp),%rsi /* get error code */
13471 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13472 call \do_sym
13473 jmp error_exit /* %ebx: no swapgs flag */
13474 CFI_ENDPROC
13475-END(\sym)
13476+ENDPROC(\sym)
13477 .endm
13478
13479 /* error code is on the stack already */
13480@@ -1089,13 +1447,23 @@ ENTRY(\sym)
13481 call save_paranoid
13482 DEFAULT_FRAME 0
13483 TRACE_IRQS_OFF
13484+#ifdef CONFIG_PAX_MEMORY_UDEREF
13485+ testb $3, CS(%rsp)
13486+ jnz 1f
13487+ pax_enter_kernel
13488+ jmp 2f
13489+1: pax_enter_kernel_user
13490+2:
13491+#else
13492+ pax_enter_kernel
13493+#endif
13494 movq %rsp,%rdi /* pt_regs pointer */
13495 movq ORIG_RAX(%rsp),%rsi /* get error code */
13496 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13497 call \do_sym
13498 jmp paranoid_exit /* %ebx: no swapgs flag */
13499 CFI_ENDPROC
13500-END(\sym)
13501+ENDPROC(\sym)
13502 .endm
13503
13504 zeroentry divide_error do_divide_error
13505@@ -1125,9 +1493,10 @@ gs_change:
13506 2: mfence /* workaround */
13507 SWAPGS
13508 popfq_cfi
13509+ pax_force_retaddr
13510 ret
13511 CFI_ENDPROC
13512-END(native_load_gs_index)
13513+ENDPROC(native_load_gs_index)
13514
13515 .section __ex_table,"a"
13516 .align 8
13517@@ -1149,13 +1518,14 @@ ENTRY(kernel_thread_helper)
13518 * Here we are in the child and the registers are set as they were
13519 * at kernel_thread() invocation in the parent.
13520 */
13521+ pax_force_fptr %rsi
13522 call *%rsi
13523 # exit
13524 mov %eax, %edi
13525 call do_exit
13526 ud2 # padding for call trace
13527 CFI_ENDPROC
13528-END(kernel_thread_helper)
13529+ENDPROC(kernel_thread_helper)
13530
13531 /*
13532 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
13533@@ -1184,9 +1554,10 @@ ENTRY(kernel_execve)
13534 je int_ret_from_sys_call
13535 RESTORE_ARGS
13536 UNFAKE_STACK_FRAME
13537+ pax_force_retaddr
13538 ret
13539 CFI_ENDPROC
13540-END(kernel_execve)
13541+ENDPROC(kernel_execve)
13542
13543 /* Call softirq on interrupt stack. Interrupts are off. */
13544 ENTRY(call_softirq)
13545@@ -1204,9 +1575,10 @@ ENTRY(call_softirq)
13546 CFI_DEF_CFA_REGISTER rsp
13547 CFI_ADJUST_CFA_OFFSET -8
13548 decl PER_CPU_VAR(irq_count)
13549+ pax_force_retaddr
13550 ret
13551 CFI_ENDPROC
13552-END(call_softirq)
13553+ENDPROC(call_softirq)
13554
13555 #ifdef CONFIG_XEN
13556 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
13557@@ -1244,7 +1616,7 @@ ENTRY(xen_do_hypervisor_callback) # do
13558 decl PER_CPU_VAR(irq_count)
13559 jmp error_exit
13560 CFI_ENDPROC
13561-END(xen_do_hypervisor_callback)
13562+ENDPROC(xen_do_hypervisor_callback)
13563
13564 /*
13565 * Hypervisor uses this for application faults while it executes.
13566@@ -1303,7 +1675,7 @@ ENTRY(xen_failsafe_callback)
13567 SAVE_ALL
13568 jmp error_exit
13569 CFI_ENDPROC
13570-END(xen_failsafe_callback)
13571+ENDPROC(xen_failsafe_callback)
13572
13573 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
13574 xen_hvm_callback_vector xen_evtchn_do_upcall
13575@@ -1352,16 +1724,31 @@ ENTRY(paranoid_exit)
13576 TRACE_IRQS_OFF
13577 testl %ebx,%ebx /* swapgs needed? */
13578 jnz paranoid_restore
13579- testl $3,CS(%rsp)
13580+ testb $3,CS(%rsp)
13581 jnz paranoid_userspace
13582+#ifdef CONFIG_PAX_MEMORY_UDEREF
13583+ pax_exit_kernel
13584+ TRACE_IRQS_IRETQ 0
13585+ SWAPGS_UNSAFE_STACK
13586+ RESTORE_ALL 8
13587+ pax_force_retaddr_bts
13588+ jmp irq_return
13589+#endif
13590 paranoid_swapgs:
13591+#ifdef CONFIG_PAX_MEMORY_UDEREF
13592+ pax_exit_kernel_user
13593+#else
13594+ pax_exit_kernel
13595+#endif
13596 TRACE_IRQS_IRETQ 0
13597 SWAPGS_UNSAFE_STACK
13598 RESTORE_ALL 8
13599 jmp irq_return
13600 paranoid_restore:
13601+ pax_exit_kernel
13602 TRACE_IRQS_IRETQ 0
13603 RESTORE_ALL 8
13604+ pax_force_retaddr_bts
13605 jmp irq_return
13606 paranoid_userspace:
13607 GET_THREAD_INFO(%rcx)
13608@@ -1390,7 +1777,7 @@ paranoid_schedule:
13609 TRACE_IRQS_OFF
13610 jmp paranoid_userspace
13611 CFI_ENDPROC
13612-END(paranoid_exit)
13613+ENDPROC(paranoid_exit)
13614
13615 /*
13616 * Exception entry point. This expects an error code/orig_rax on the stack.
13617@@ -1417,12 +1804,13 @@ ENTRY(error_entry)
13618 movq_cfi r14, R14+8
13619 movq_cfi r15, R15+8
13620 xorl %ebx,%ebx
13621- testl $3,CS+8(%rsp)
13622+ testb $3,CS+8(%rsp)
13623 je error_kernelspace
13624 error_swapgs:
13625 SWAPGS
13626 error_sti:
13627 TRACE_IRQS_OFF
13628+ pax_force_retaddr_bts
13629 ret
13630
13631 /*
13632@@ -1449,7 +1837,7 @@ bstep_iret:
13633 movq %rcx,RIP+8(%rsp)
13634 jmp error_swapgs
13635 CFI_ENDPROC
13636-END(error_entry)
13637+ENDPROC(error_entry)
13638
13639
13640 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
13641@@ -1469,7 +1857,7 @@ ENTRY(error_exit)
13642 jnz retint_careful
13643 jmp retint_swapgs
13644 CFI_ENDPROC
13645-END(error_exit)
13646+ENDPROC(error_exit)
13647
13648
13649 /* runs on exception stack */
13650@@ -1481,6 +1869,16 @@ ENTRY(nmi)
13651 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
13652 call save_paranoid
13653 DEFAULT_FRAME 0
13654+#ifdef CONFIG_PAX_MEMORY_UDEREF
13655+ testb $3, CS(%rsp)
13656+ jnz 1f
13657+ pax_enter_kernel
13658+ jmp 2f
13659+1: pax_enter_kernel_user
13660+2:
13661+#else
13662+ pax_enter_kernel
13663+#endif
13664 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
13665 movq %rsp,%rdi
13666 movq $-1,%rsi
13667@@ -1491,12 +1889,28 @@ ENTRY(nmi)
13668 DISABLE_INTERRUPTS(CLBR_NONE)
13669 testl %ebx,%ebx /* swapgs needed? */
13670 jnz nmi_restore
13671- testl $3,CS(%rsp)
13672+ testb $3,CS(%rsp)
13673 jnz nmi_userspace
13674+#ifdef CONFIG_PAX_MEMORY_UDEREF
13675+ pax_exit_kernel
13676+ SWAPGS_UNSAFE_STACK
13677+ RESTORE_ALL 8
13678+ pax_force_retaddr_bts
13679+ jmp irq_return
13680+#endif
13681 nmi_swapgs:
13682+#ifdef CONFIG_PAX_MEMORY_UDEREF
13683+ pax_exit_kernel_user
13684+#else
13685+ pax_exit_kernel
13686+#endif
13687 SWAPGS_UNSAFE_STACK
13688+ RESTORE_ALL 8
13689+ jmp irq_return
13690 nmi_restore:
13691+ pax_exit_kernel
13692 RESTORE_ALL 8
13693+ pax_force_retaddr_bts
13694 jmp irq_return
13695 nmi_userspace:
13696 GET_THREAD_INFO(%rcx)
13697@@ -1525,14 +1939,14 @@ nmi_schedule:
13698 jmp paranoid_exit
13699 CFI_ENDPROC
13700 #endif
13701-END(nmi)
13702+ENDPROC(nmi)
13703
13704 ENTRY(ignore_sysret)
13705 CFI_STARTPROC
13706 mov $-ENOSYS,%eax
13707 sysret
13708 CFI_ENDPROC
13709-END(ignore_sysret)
13710+ENDPROC(ignore_sysret)
13711
13712 /*
13713 * End of kprobes section
13714diff -urNp linux-3.1.4/arch/x86/kernel/ftrace.c linux-3.1.4/arch/x86/kernel/ftrace.c
13715--- linux-3.1.4/arch/x86/kernel/ftrace.c 2011-11-11 15:19:27.000000000 -0500
13716+++ linux-3.1.4/arch/x86/kernel/ftrace.c 2011-11-16 18:39:07.000000000 -0500
13717@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
13718 static const void *mod_code_newcode; /* holds the text to write to the IP */
13719
13720 static unsigned nmi_wait_count;
13721-static atomic_t nmi_update_count = ATOMIC_INIT(0);
13722+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
13723
13724 int ftrace_arch_read_dyn_info(char *buf, int size)
13725 {
13726@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
13727
13728 r = snprintf(buf, size, "%u %u",
13729 nmi_wait_count,
13730- atomic_read(&nmi_update_count));
13731+ atomic_read_unchecked(&nmi_update_count));
13732 return r;
13733 }
13734
13735@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
13736
13737 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
13738 smp_rmb();
13739+ pax_open_kernel();
13740 ftrace_mod_code();
13741- atomic_inc(&nmi_update_count);
13742+ pax_close_kernel();
13743+ atomic_inc_unchecked(&nmi_update_count);
13744 }
13745 /* Must have previous changes seen before executions */
13746 smp_mb();
13747@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
13748 {
13749 unsigned char replaced[MCOUNT_INSN_SIZE];
13750
13751+ ip = ktla_ktva(ip);
13752+
13753 /*
13754 * Note: Due to modules and __init, code can
13755 * disappear and change, we need to protect against faulting
13756@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
13757 unsigned char old[MCOUNT_INSN_SIZE], *new;
13758 int ret;
13759
13760- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
13761+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
13762 new = ftrace_call_replace(ip, (unsigned long)func);
13763 ret = ftrace_modify_code(ip, old, new);
13764
13765@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
13766 {
13767 unsigned char code[MCOUNT_INSN_SIZE];
13768
13769+ ip = ktla_ktva(ip);
13770+
13771 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
13772 return -EFAULT;
13773
13774diff -urNp linux-3.1.4/arch/x86/kernel/head32.c linux-3.1.4/arch/x86/kernel/head32.c
13775--- linux-3.1.4/arch/x86/kernel/head32.c 2011-11-11 15:19:27.000000000 -0500
13776+++ linux-3.1.4/arch/x86/kernel/head32.c 2011-11-16 18:39:07.000000000 -0500
13777@@ -19,6 +19,7 @@
13778 #include <asm/io_apic.h>
13779 #include <asm/bios_ebda.h>
13780 #include <asm/tlbflush.h>
13781+#include <asm/boot.h>
13782
13783 static void __init i386_default_early_setup(void)
13784 {
13785@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
13786 {
13787 memblock_init();
13788
13789- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13790+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13791
13792 #ifdef CONFIG_BLK_DEV_INITRD
13793 /* Reserve INITRD */
13794diff -urNp linux-3.1.4/arch/x86/kernel/head_32.S linux-3.1.4/arch/x86/kernel/head_32.S
13795--- linux-3.1.4/arch/x86/kernel/head_32.S 2011-11-11 15:19:27.000000000 -0500
13796+++ linux-3.1.4/arch/x86/kernel/head_32.S 2011-11-16 18:39:07.000000000 -0500
13797@@ -25,6 +25,12 @@
13798 /* Physical address */
13799 #define pa(X) ((X) - __PAGE_OFFSET)
13800
13801+#ifdef CONFIG_PAX_KERNEXEC
13802+#define ta(X) (X)
13803+#else
13804+#define ta(X) ((X) - __PAGE_OFFSET)
13805+#endif
13806+
13807 /*
13808 * References to members of the new_cpu_data structure.
13809 */
13810@@ -54,11 +60,7 @@
13811 * and small than max_low_pfn, otherwise will waste some page table entries
13812 */
13813
13814-#if PTRS_PER_PMD > 1
13815-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
13816-#else
13817-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
13818-#endif
13819+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
13820
13821 /* Number of possible pages in the lowmem region */
13822 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
13823@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
13824 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13825
13826 /*
13827+ * Real beginning of normal "text" segment
13828+ */
13829+ENTRY(stext)
13830+ENTRY(_stext)
13831+
13832+/*
13833 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
13834 * %esi points to the real-mode code as a 32-bit pointer.
13835 * CS and DS must be 4 GB flat segments, but we don't depend on
13836@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13837 * can.
13838 */
13839 __HEAD
13840+
13841+#ifdef CONFIG_PAX_KERNEXEC
13842+ jmp startup_32
13843+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
13844+.fill PAGE_SIZE-5,1,0xcc
13845+#endif
13846+
13847 ENTRY(startup_32)
13848 movl pa(stack_start),%ecx
13849
13850@@ -105,6 +120,57 @@ ENTRY(startup_32)
13851 2:
13852 leal -__PAGE_OFFSET(%ecx),%esp
13853
13854+#ifdef CONFIG_SMP
13855+ movl $pa(cpu_gdt_table),%edi
13856+ movl $__per_cpu_load,%eax
13857+ movw %ax,__KERNEL_PERCPU + 2(%edi)
13858+ rorl $16,%eax
13859+ movb %al,__KERNEL_PERCPU + 4(%edi)
13860+ movb %ah,__KERNEL_PERCPU + 7(%edi)
13861+ movl $__per_cpu_end - 1,%eax
13862+ subl $__per_cpu_start,%eax
13863+ movw %ax,__KERNEL_PERCPU + 0(%edi)
13864+#endif
13865+
13866+#ifdef CONFIG_PAX_MEMORY_UDEREF
13867+ movl $NR_CPUS,%ecx
13868+ movl $pa(cpu_gdt_table),%edi
13869+1:
13870+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
13871+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
13872+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
13873+ addl $PAGE_SIZE_asm,%edi
13874+ loop 1b
13875+#endif
13876+
13877+#ifdef CONFIG_PAX_KERNEXEC
13878+ movl $pa(boot_gdt),%edi
13879+ movl $__LOAD_PHYSICAL_ADDR,%eax
13880+ movw %ax,__BOOT_CS + 2(%edi)
13881+ rorl $16,%eax
13882+ movb %al,__BOOT_CS + 4(%edi)
13883+ movb %ah,__BOOT_CS + 7(%edi)
13884+ rorl $16,%eax
13885+
13886+ ljmp $(__BOOT_CS),$1f
13887+1:
13888+
13889+ movl $NR_CPUS,%ecx
13890+ movl $pa(cpu_gdt_table),%edi
13891+ addl $__PAGE_OFFSET,%eax
13892+1:
13893+ movw %ax,__KERNEL_CS + 2(%edi)
13894+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
13895+ rorl $16,%eax
13896+ movb %al,__KERNEL_CS + 4(%edi)
13897+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
13898+ movb %ah,__KERNEL_CS + 7(%edi)
13899+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
13900+ rorl $16,%eax
13901+ addl $PAGE_SIZE_asm,%edi
13902+ loop 1b
13903+#endif
13904+
13905 /*
13906 * Clear BSS first so that there are no surprises...
13907 */
13908@@ -195,8 +261,11 @@ ENTRY(startup_32)
13909 movl %eax, pa(max_pfn_mapped)
13910
13911 /* Do early initialization of the fixmap area */
13912- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
13913- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
13914+#ifdef CONFIG_COMPAT_VDSO
13915+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
13916+#else
13917+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
13918+#endif
13919 #else /* Not PAE */
13920
13921 page_pde_offset = (__PAGE_OFFSET >> 20);
13922@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
13923 movl %eax, pa(max_pfn_mapped)
13924
13925 /* Do early initialization of the fixmap area */
13926- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
13927- movl %eax,pa(initial_page_table+0xffc)
13928+#ifdef CONFIG_COMPAT_VDSO
13929+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
13930+#else
13931+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
13932+#endif
13933 #endif
13934
13935 #ifdef CONFIG_PARAVIRT
13936@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
13937 cmpl $num_subarch_entries, %eax
13938 jae bad_subarch
13939
13940- movl pa(subarch_entries)(,%eax,4), %eax
13941- subl $__PAGE_OFFSET, %eax
13942- jmp *%eax
13943+ jmp *pa(subarch_entries)(,%eax,4)
13944
13945 bad_subarch:
13946 WEAK(lguest_entry)
13947@@ -255,10 +325,10 @@ WEAK(xen_entry)
13948 __INITDATA
13949
13950 subarch_entries:
13951- .long default_entry /* normal x86/PC */
13952- .long lguest_entry /* lguest hypervisor */
13953- .long xen_entry /* Xen hypervisor */
13954- .long default_entry /* Moorestown MID */
13955+ .long ta(default_entry) /* normal x86/PC */
13956+ .long ta(lguest_entry) /* lguest hypervisor */
13957+ .long ta(xen_entry) /* Xen hypervisor */
13958+ .long ta(default_entry) /* Moorestown MID */
13959 num_subarch_entries = (. - subarch_entries) / 4
13960 .previous
13961 #else
13962@@ -312,6 +382,7 @@ default_entry:
13963 orl %edx,%eax
13964 movl %eax,%cr4
13965
13966+#ifdef CONFIG_X86_PAE
13967 testb $X86_CR4_PAE, %al # check if PAE is enabled
13968 jz 6f
13969
13970@@ -340,6 +411,9 @@ default_entry:
13971 /* Make changes effective */
13972 wrmsr
13973
13974+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
13975+#endif
13976+
13977 6:
13978
13979 /*
13980@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
13981 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
13982 movl %eax,%ss # after changing gdt.
13983
13984- movl $(__USER_DS),%eax # DS/ES contains default USER segment
13985+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
13986 movl %eax,%ds
13987 movl %eax,%es
13988
13989@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
13990 */
13991 cmpb $0,ready
13992 jne 1f
13993- movl $gdt_page,%eax
13994+ movl $cpu_gdt_table,%eax
13995 movl $stack_canary,%ecx
13996+#ifdef CONFIG_SMP
13997+ addl $__per_cpu_load,%ecx
13998+#endif
13999 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14000 shrl $16, %ecx
14001 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14002 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14003 1:
14004-#endif
14005 movl $(__KERNEL_STACK_CANARY),%eax
14006+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14007+ movl $(__USER_DS),%eax
14008+#else
14009+ xorl %eax,%eax
14010+#endif
14011 movl %eax,%gs
14012
14013 xorl %eax,%eax # Clear LDT
14014@@ -558,22 +639,22 @@ early_page_fault:
14015 jmp early_fault
14016
14017 early_fault:
14018- cld
14019 #ifdef CONFIG_PRINTK
14020+ cmpl $1,%ss:early_recursion_flag
14021+ je hlt_loop
14022+ incl %ss:early_recursion_flag
14023+ cld
14024 pusha
14025 movl $(__KERNEL_DS),%eax
14026 movl %eax,%ds
14027 movl %eax,%es
14028- cmpl $2,early_recursion_flag
14029- je hlt_loop
14030- incl early_recursion_flag
14031 movl %cr2,%eax
14032 pushl %eax
14033 pushl %edx /* trapno */
14034 pushl $fault_msg
14035 call printk
14036+; call dump_stack
14037 #endif
14038- call dump_stack
14039 hlt_loop:
14040 hlt
14041 jmp hlt_loop
14042@@ -581,8 +662,11 @@ hlt_loop:
14043 /* This is the default interrupt "handler" :-) */
14044 ALIGN
14045 ignore_int:
14046- cld
14047 #ifdef CONFIG_PRINTK
14048+ cmpl $2,%ss:early_recursion_flag
14049+ je hlt_loop
14050+ incl %ss:early_recursion_flag
14051+ cld
14052 pushl %eax
14053 pushl %ecx
14054 pushl %edx
14055@@ -591,9 +675,6 @@ ignore_int:
14056 movl $(__KERNEL_DS),%eax
14057 movl %eax,%ds
14058 movl %eax,%es
14059- cmpl $2,early_recursion_flag
14060- je hlt_loop
14061- incl early_recursion_flag
14062 pushl 16(%esp)
14063 pushl 24(%esp)
14064 pushl 32(%esp)
14065@@ -622,29 +703,43 @@ ENTRY(initial_code)
14066 /*
14067 * BSS section
14068 */
14069-__PAGE_ALIGNED_BSS
14070- .align PAGE_SIZE
14071 #ifdef CONFIG_X86_PAE
14072+.section .initial_pg_pmd,"a",@progbits
14073 initial_pg_pmd:
14074 .fill 1024*KPMDS,4,0
14075 #else
14076+.section .initial_page_table,"a",@progbits
14077 ENTRY(initial_page_table)
14078 .fill 1024,4,0
14079 #endif
14080+.section .initial_pg_fixmap,"a",@progbits
14081 initial_pg_fixmap:
14082 .fill 1024,4,0
14083+.section .empty_zero_page,"a",@progbits
14084 ENTRY(empty_zero_page)
14085 .fill 4096,1,0
14086+.section .swapper_pg_dir,"a",@progbits
14087 ENTRY(swapper_pg_dir)
14088+#ifdef CONFIG_X86_PAE
14089+ .fill 4,8,0
14090+#else
14091 .fill 1024,4,0
14092+#endif
14093+
14094+/*
14095+ * The IDT has to be page-aligned to simplify the Pentium
14096+ * F0 0F bug workaround.. We have a special link segment
14097+ * for this.
14098+ */
14099+.section .idt,"a",@progbits
14100+ENTRY(idt_table)
14101+ .fill 256,8,0
14102
14103 /*
14104 * This starts the data section.
14105 */
14106 #ifdef CONFIG_X86_PAE
14107-__PAGE_ALIGNED_DATA
14108- /* Page-aligned for the benefit of paravirt? */
14109- .align PAGE_SIZE
14110+.section .initial_page_table,"a",@progbits
14111 ENTRY(initial_page_table)
14112 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14113 # if KPMDS == 3
14114@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
14115 # error "Kernel PMDs should be 1, 2 or 3"
14116 # endif
14117 .align PAGE_SIZE /* needs to be page-sized too */
14118+
14119+#ifdef CONFIG_PAX_PER_CPU_PGD
14120+ENTRY(cpu_pgd)
14121+ .rept NR_CPUS
14122+ .fill 4,8,0
14123+ .endr
14124+#endif
14125+
14126 #endif
14127
14128 .data
14129 .balign 4
14130 ENTRY(stack_start)
14131- .long init_thread_union+THREAD_SIZE
14132+ .long init_thread_union+THREAD_SIZE-8
14133+
14134+ready: .byte 0
14135
14136+.section .rodata,"a",@progbits
14137 early_recursion_flag:
14138 .long 0
14139
14140-ready: .byte 0
14141-
14142 int_msg:
14143 .asciz "Unknown interrupt or fault at: %p %p %p\n"
14144
14145@@ -707,7 +811,7 @@ fault_msg:
14146 .word 0 # 32 bit align gdt_desc.address
14147 boot_gdt_descr:
14148 .word __BOOT_DS+7
14149- .long boot_gdt - __PAGE_OFFSET
14150+ .long pa(boot_gdt)
14151
14152 .word 0 # 32-bit align idt_desc.address
14153 idt_descr:
14154@@ -718,7 +822,7 @@ idt_descr:
14155 .word 0 # 32 bit align gdt_desc.address
14156 ENTRY(early_gdt_descr)
14157 .word GDT_ENTRIES*8-1
14158- .long gdt_page /* Overwritten for secondary CPUs */
14159+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
14160
14161 /*
14162 * The boot_gdt must mirror the equivalent in setup.S and is
14163@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
14164 .align L1_CACHE_BYTES
14165 ENTRY(boot_gdt)
14166 .fill GDT_ENTRY_BOOT_CS,8,0
14167- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14168- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14169+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14170+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14171+
14172+ .align PAGE_SIZE_asm
14173+ENTRY(cpu_gdt_table)
14174+ .rept NR_CPUS
14175+ .quad 0x0000000000000000 /* NULL descriptor */
14176+ .quad 0x0000000000000000 /* 0x0b reserved */
14177+ .quad 0x0000000000000000 /* 0x13 reserved */
14178+ .quad 0x0000000000000000 /* 0x1b reserved */
14179+
14180+#ifdef CONFIG_PAX_KERNEXEC
14181+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14182+#else
14183+ .quad 0x0000000000000000 /* 0x20 unused */
14184+#endif
14185+
14186+ .quad 0x0000000000000000 /* 0x28 unused */
14187+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14188+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14189+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14190+ .quad 0x0000000000000000 /* 0x4b reserved */
14191+ .quad 0x0000000000000000 /* 0x53 reserved */
14192+ .quad 0x0000000000000000 /* 0x5b reserved */
14193+
14194+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14195+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14196+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14197+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14198+
14199+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14200+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14201+
14202+ /*
14203+ * Segments used for calling PnP BIOS have byte granularity.
14204+ * The code segments and data segments have fixed 64k limits,
14205+ * the transfer segment sizes are set at run time.
14206+ */
14207+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
14208+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
14209+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
14210+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
14211+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
14212+
14213+ /*
14214+ * The APM segments have byte granularity and their bases
14215+ * are set at run time. All have 64k limits.
14216+ */
14217+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14218+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14219+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
14220+
14221+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14222+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14223+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14224+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14225+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14226+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14227+
14228+ /* Be sure this is zeroed to avoid false validations in Xen */
14229+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14230+ .endr
14231diff -urNp linux-3.1.4/arch/x86/kernel/head_64.S linux-3.1.4/arch/x86/kernel/head_64.S
14232--- linux-3.1.4/arch/x86/kernel/head_64.S 2011-11-11 15:19:27.000000000 -0500
14233+++ linux-3.1.4/arch/x86/kernel/head_64.S 2011-12-02 17:38:47.000000000 -0500
14234@@ -19,6 +19,8 @@
14235 #include <asm/cache.h>
14236 #include <asm/processor-flags.h>
14237 #include <asm/percpu.h>
14238+#include <asm/cpufeature.h>
14239+#include <asm/alternative-asm.h>
14240
14241 #ifdef CONFIG_PARAVIRT
14242 #include <asm/asm-offsets.h>
14243@@ -38,6 +40,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
14244 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14245 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14246 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14247+L4_VMALLOC_START = pgd_index(VMALLOC_START)
14248+L3_VMALLOC_START = pud_index(VMALLOC_START)
14249+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14250+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14251
14252 .text
14253 __HEAD
14254@@ -85,35 +91,22 @@ startup_64:
14255 */
14256 addq %rbp, init_level4_pgt + 0(%rip)
14257 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14258+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14259+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14260 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14261
14262 addq %rbp, level3_ident_pgt + 0(%rip)
14263+#ifndef CONFIG_XEN
14264+ addq %rbp, level3_ident_pgt + 8(%rip)
14265+#endif
14266
14267- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14268- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14269+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14270
14271- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14272+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14273+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14274
14275- /* Add an Identity mapping if I am above 1G */
14276- leaq _text(%rip), %rdi
14277- andq $PMD_PAGE_MASK, %rdi
14278-
14279- movq %rdi, %rax
14280- shrq $PUD_SHIFT, %rax
14281- andq $(PTRS_PER_PUD - 1), %rax
14282- jz ident_complete
14283-
14284- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
14285- leaq level3_ident_pgt(%rip), %rbx
14286- movq %rdx, 0(%rbx, %rax, 8)
14287-
14288- movq %rdi, %rax
14289- shrq $PMD_SHIFT, %rax
14290- andq $(PTRS_PER_PMD - 1), %rax
14291- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
14292- leaq level2_spare_pgt(%rip), %rbx
14293- movq %rdx, 0(%rbx, %rax, 8)
14294-ident_complete:
14295+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14296+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
14297
14298 /*
14299 * Fixup the kernel text+data virtual addresses. Note that
14300@@ -160,8 +153,8 @@ ENTRY(secondary_startup_64)
14301 * after the boot processor executes this code.
14302 */
14303
14304- /* Enable PAE mode and PGE */
14305- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
14306+ /* Enable PAE mode and PSE/PGE */
14307+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
14308 movq %rax, %cr4
14309
14310 /* Setup early boot stage 4 level pagetables. */
14311@@ -183,9 +176,16 @@ ENTRY(secondary_startup_64)
14312 movl $MSR_EFER, %ecx
14313 rdmsr
14314 btsl $_EFER_SCE, %eax /* Enable System Call */
14315- btl $20,%edi /* No Execute supported? */
14316+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
14317 jnc 1f
14318 btsl $_EFER_NX, %eax
14319+ leaq init_level4_pgt(%rip), %rdi
14320+#ifndef CONFIG_EFI
14321+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
14322+#endif
14323+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
14324+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
14325+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
14326 1: wrmsr /* Make changes effective */
14327
14328 /* Setup cr0 */
14329@@ -247,6 +247,7 @@ ENTRY(secondary_startup_64)
14330 * jump. In addition we need to ensure %cs is set so we make this
14331 * a far return.
14332 */
14333+ pax_set_fptr_mask
14334 movq initial_code(%rip),%rax
14335 pushq $0 # fake return address to stop unwinder
14336 pushq $__KERNEL_CS # set correct cs
14337@@ -269,7 +270,7 @@ ENTRY(secondary_startup_64)
14338 bad_address:
14339 jmp bad_address
14340
14341- .section ".init.text","ax"
14342+ __INIT
14343 #ifdef CONFIG_EARLY_PRINTK
14344 .globl early_idt_handlers
14345 early_idt_handlers:
14346@@ -314,18 +315,23 @@ ENTRY(early_idt_handler)
14347 #endif /* EARLY_PRINTK */
14348 1: hlt
14349 jmp 1b
14350+ .previous
14351
14352 #ifdef CONFIG_EARLY_PRINTK
14353+ __INITDATA
14354 early_recursion_flag:
14355 .long 0
14356+ .previous
14357
14358+ .section .rodata,"a",@progbits
14359 early_idt_msg:
14360 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
14361 early_idt_ripmsg:
14362 .asciz "RIP %s\n"
14363-#endif /* CONFIG_EARLY_PRINTK */
14364 .previous
14365+#endif /* CONFIG_EARLY_PRINTK */
14366
14367+ .section .rodata,"a",@progbits
14368 #define NEXT_PAGE(name) \
14369 .balign PAGE_SIZE; \
14370 ENTRY(name)
14371@@ -338,7 +344,6 @@ ENTRY(name)
14372 i = i + 1 ; \
14373 .endr
14374
14375- .data
14376 /*
14377 * This default setting generates an ident mapping at address 0x100000
14378 * and a mapping for the kernel that precisely maps virtual address
14379@@ -349,13 +354,36 @@ NEXT_PAGE(init_level4_pgt)
14380 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14381 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
14382 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14383+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
14384+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
14385+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
14386+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14387 .org init_level4_pgt + L4_START_KERNEL*8, 0
14388 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
14389 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
14390
14391+#ifdef CONFIG_PAX_PER_CPU_PGD
14392+NEXT_PAGE(cpu_pgd)
14393+ .rept NR_CPUS
14394+ .fill 512,8,0
14395+ .endr
14396+#endif
14397+
14398 NEXT_PAGE(level3_ident_pgt)
14399 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14400+#ifdef CONFIG_XEN
14401 .fill 511,8,0
14402+#else
14403+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
14404+ .fill 510,8,0
14405+#endif
14406+
14407+NEXT_PAGE(level3_vmalloc_pgt)
14408+ .fill 512,8,0
14409+
14410+NEXT_PAGE(level3_vmemmap_pgt)
14411+ .fill L3_VMEMMAP_START,8,0
14412+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14413
14414 NEXT_PAGE(level3_kernel_pgt)
14415 .fill L3_START_KERNEL,8,0
14416@@ -363,20 +391,23 @@ NEXT_PAGE(level3_kernel_pgt)
14417 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
14418 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14419
14420+NEXT_PAGE(level2_vmemmap_pgt)
14421+ .fill 512,8,0
14422+
14423 NEXT_PAGE(level2_fixmap_pgt)
14424- .fill 506,8,0
14425- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14426- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
14427- .fill 5,8,0
14428+ .fill 507,8,0
14429+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
14430+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
14431+ .fill 4,8,0
14432
14433-NEXT_PAGE(level1_fixmap_pgt)
14434+NEXT_PAGE(level1_vsyscall_pgt)
14435 .fill 512,8,0
14436
14437-NEXT_PAGE(level2_ident_pgt)
14438- /* Since I easily can, map the first 1G.
14439+ /* Since I easily can, map the first 2G.
14440 * Don't set NX because code runs from these pages.
14441 */
14442- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
14443+NEXT_PAGE(level2_ident_pgt)
14444+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
14445
14446 NEXT_PAGE(level2_kernel_pgt)
14447 /*
14448@@ -389,33 +420,55 @@ NEXT_PAGE(level2_kernel_pgt)
14449 * If you want to increase this then increase MODULES_VADDR
14450 * too.)
14451 */
14452- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
14453- KERNEL_IMAGE_SIZE/PMD_SIZE)
14454-
14455-NEXT_PAGE(level2_spare_pgt)
14456- .fill 512, 8, 0
14457+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
14458
14459 #undef PMDS
14460 #undef NEXT_PAGE
14461
14462- .data
14463+ .align PAGE_SIZE
14464+ENTRY(cpu_gdt_table)
14465+ .rept NR_CPUS
14466+ .quad 0x0000000000000000 /* NULL descriptor */
14467+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
14468+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
14469+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
14470+ .quad 0x00cffb000000ffff /* __USER32_CS */
14471+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
14472+ .quad 0x00affb000000ffff /* __USER_CS */
14473+
14474+#ifdef CONFIG_PAX_KERNEXEC
14475+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
14476+#else
14477+ .quad 0x0 /* unused */
14478+#endif
14479+
14480+ .quad 0,0 /* TSS */
14481+ .quad 0,0 /* LDT */
14482+ .quad 0,0,0 /* three TLS descriptors */
14483+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
14484+ /* asm/segment.h:GDT_ENTRIES must match this */
14485+
14486+ /* zero the remaining page */
14487+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
14488+ .endr
14489+
14490 .align 16
14491 .globl early_gdt_descr
14492 early_gdt_descr:
14493 .word GDT_ENTRIES*8-1
14494 early_gdt_descr_base:
14495- .quad INIT_PER_CPU_VAR(gdt_page)
14496+ .quad cpu_gdt_table
14497
14498 ENTRY(phys_base)
14499 /* This must match the first entry in level2_kernel_pgt */
14500 .quad 0x0000000000000000
14501
14502 #include "../../x86/xen/xen-head.S"
14503-
14504- .section .bss, "aw", @nobits
14505+
14506+ .section .rodata,"a",@progbits
14507 .align L1_CACHE_BYTES
14508 ENTRY(idt_table)
14509- .skip IDT_ENTRIES * 16
14510+ .fill 512,8,0
14511
14512 __PAGE_ALIGNED_BSS
14513 .align PAGE_SIZE
14514diff -urNp linux-3.1.4/arch/x86/kernel/i386_ksyms_32.c linux-3.1.4/arch/x86/kernel/i386_ksyms_32.c
14515--- linux-3.1.4/arch/x86/kernel/i386_ksyms_32.c 2011-11-11 15:19:27.000000000 -0500
14516+++ linux-3.1.4/arch/x86/kernel/i386_ksyms_32.c 2011-11-16 18:39:07.000000000 -0500
14517@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
14518 EXPORT_SYMBOL(cmpxchg8b_emu);
14519 #endif
14520
14521+EXPORT_SYMBOL_GPL(cpu_gdt_table);
14522+
14523 /* Networking helper routines. */
14524 EXPORT_SYMBOL(csum_partial_copy_generic);
14525+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
14526+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
14527
14528 EXPORT_SYMBOL(__get_user_1);
14529 EXPORT_SYMBOL(__get_user_2);
14530@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
14531
14532 EXPORT_SYMBOL(csum_partial);
14533 EXPORT_SYMBOL(empty_zero_page);
14534+
14535+#ifdef CONFIG_PAX_KERNEXEC
14536+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
14537+#endif
14538diff -urNp linux-3.1.4/arch/x86/kernel/i8259.c linux-3.1.4/arch/x86/kernel/i8259.c
14539--- linux-3.1.4/arch/x86/kernel/i8259.c 2011-11-11 15:19:27.000000000 -0500
14540+++ linux-3.1.4/arch/x86/kernel/i8259.c 2011-11-16 18:39:07.000000000 -0500
14541@@ -210,7 +210,7 @@ spurious_8259A_irq:
14542 "spurious 8259A interrupt: IRQ%d.\n", irq);
14543 spurious_irq_mask |= irqmask;
14544 }
14545- atomic_inc(&irq_err_count);
14546+ atomic_inc_unchecked(&irq_err_count);
14547 /*
14548 * Theoretically we do not have to handle this IRQ,
14549 * but in Linux this does not cause problems and is
14550diff -urNp linux-3.1.4/arch/x86/kernel/init_task.c linux-3.1.4/arch/x86/kernel/init_task.c
14551--- linux-3.1.4/arch/x86/kernel/init_task.c 2011-11-11 15:19:27.000000000 -0500
14552+++ linux-3.1.4/arch/x86/kernel/init_task.c 2011-11-16 18:39:07.000000000 -0500
14553@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
14554 * way process stacks are handled. This is done by having a special
14555 * "init_task" linker map entry..
14556 */
14557-union thread_union init_thread_union __init_task_data =
14558- { INIT_THREAD_INFO(init_task) };
14559+union thread_union init_thread_union __init_task_data;
14560
14561 /*
14562 * Initial task structure.
14563@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
14564 * section. Since TSS's are completely CPU-local, we want them
14565 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
14566 */
14567-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
14568-
14569+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
14570+EXPORT_SYMBOL(init_tss);
14571diff -urNp linux-3.1.4/arch/x86/kernel/ioport.c linux-3.1.4/arch/x86/kernel/ioport.c
14572--- linux-3.1.4/arch/x86/kernel/ioport.c 2011-11-11 15:19:27.000000000 -0500
14573+++ linux-3.1.4/arch/x86/kernel/ioport.c 2011-11-16 18:40:08.000000000 -0500
14574@@ -6,6 +6,7 @@
14575 #include <linux/sched.h>
14576 #include <linux/kernel.h>
14577 #include <linux/capability.h>
14578+#include <linux/security.h>
14579 #include <linux/errno.h>
14580 #include <linux/types.h>
14581 #include <linux/ioport.h>
14582@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
14583
14584 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
14585 return -EINVAL;
14586+#ifdef CONFIG_GRKERNSEC_IO
14587+ if (turn_on && grsec_disable_privio) {
14588+ gr_handle_ioperm();
14589+ return -EPERM;
14590+ }
14591+#endif
14592 if (turn_on && !capable(CAP_SYS_RAWIO))
14593 return -EPERM;
14594
14595@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
14596 * because the ->io_bitmap_max value must match the bitmap
14597 * contents:
14598 */
14599- tss = &per_cpu(init_tss, get_cpu());
14600+ tss = init_tss + get_cpu();
14601
14602 if (turn_on)
14603 bitmap_clear(t->io_bitmap_ptr, from, num);
14604@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
14605 return -EINVAL;
14606 /* Trying to gain more privileges? */
14607 if (level > old) {
14608+#ifdef CONFIG_GRKERNSEC_IO
14609+ if (grsec_disable_privio) {
14610+ gr_handle_iopl();
14611+ return -EPERM;
14612+ }
14613+#endif
14614 if (!capable(CAP_SYS_RAWIO))
14615 return -EPERM;
14616 }
14617diff -urNp linux-3.1.4/arch/x86/kernel/irq_32.c linux-3.1.4/arch/x86/kernel/irq_32.c
14618--- linux-3.1.4/arch/x86/kernel/irq_32.c 2011-11-11 15:19:27.000000000 -0500
14619+++ linux-3.1.4/arch/x86/kernel/irq_32.c 2011-11-16 18:39:07.000000000 -0500
14620@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
14621 __asm__ __volatile__("andl %%esp,%0" :
14622 "=r" (sp) : "0" (THREAD_SIZE - 1));
14623
14624- return sp < (sizeof(struct thread_info) + STACK_WARN);
14625+ return sp < STACK_WARN;
14626 }
14627
14628 static void print_stack_overflow(void)
14629@@ -54,8 +54,8 @@ static inline void print_stack_overflow(
14630 * per-CPU IRQ handling contexts (thread information and stack)
14631 */
14632 union irq_ctx {
14633- struct thread_info tinfo;
14634- u32 stack[THREAD_SIZE/sizeof(u32)];
14635+ unsigned long previous_esp;
14636+ u32 stack[THREAD_SIZE/sizeof(u32)];
14637 } __attribute__((aligned(THREAD_SIZE)));
14638
14639 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
14640@@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
14641 static inline int
14642 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
14643 {
14644- union irq_ctx *curctx, *irqctx;
14645+ union irq_ctx *irqctx;
14646 u32 *isp, arg1, arg2;
14647
14648- curctx = (union irq_ctx *) current_thread_info();
14649 irqctx = __this_cpu_read(hardirq_ctx);
14650
14651 /*
14652@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
14653 * handler) we can't do that and just have to keep using the
14654 * current stack (which is the irq stack already after all)
14655 */
14656- if (unlikely(curctx == irqctx))
14657+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
14658 return 0;
14659
14660 /* build the stack frame on the IRQ stack */
14661- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14662- irqctx->tinfo.task = curctx->tinfo.task;
14663- irqctx->tinfo.previous_esp = current_stack_pointer;
14664+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14665+ irqctx->previous_esp = current_stack_pointer;
14666
14667- /*
14668- * Copy the softirq bits in preempt_count so that the
14669- * softirq checks work in the hardirq context.
14670- */
14671- irqctx->tinfo.preempt_count =
14672- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
14673- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
14674+#ifdef CONFIG_PAX_MEMORY_UDEREF
14675+ __set_fs(MAKE_MM_SEG(0));
14676+#endif
14677
14678 if (unlikely(overflow))
14679 call_on_stack(print_stack_overflow, isp);
14680@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
14681 : "0" (irq), "1" (desc), "2" (isp),
14682 "D" (desc->handle_irq)
14683 : "memory", "cc", "ecx");
14684+
14685+#ifdef CONFIG_PAX_MEMORY_UDEREF
14686+ __set_fs(current_thread_info()->addr_limit);
14687+#endif
14688+
14689 return 1;
14690 }
14691
14692@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
14693 */
14694 void __cpuinit irq_ctx_init(int cpu)
14695 {
14696- union irq_ctx *irqctx;
14697-
14698 if (per_cpu(hardirq_ctx, cpu))
14699 return;
14700
14701- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
14702- THREAD_FLAGS,
14703- THREAD_ORDER));
14704- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
14705- irqctx->tinfo.cpu = cpu;
14706- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
14707- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14708-
14709- per_cpu(hardirq_ctx, cpu) = irqctx;
14710-
14711- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
14712- THREAD_FLAGS,
14713- THREAD_ORDER));
14714- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
14715- irqctx->tinfo.cpu = cpu;
14716- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14717-
14718- per_cpu(softirq_ctx, cpu) = irqctx;
14719+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
14720+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
14721
14722 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
14723 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
14724@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
14725 asmlinkage void do_softirq(void)
14726 {
14727 unsigned long flags;
14728- struct thread_info *curctx;
14729 union irq_ctx *irqctx;
14730 u32 *isp;
14731
14732@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
14733 local_irq_save(flags);
14734
14735 if (local_softirq_pending()) {
14736- curctx = current_thread_info();
14737 irqctx = __this_cpu_read(softirq_ctx);
14738- irqctx->tinfo.task = curctx->task;
14739- irqctx->tinfo.previous_esp = current_stack_pointer;
14740+ irqctx->previous_esp = current_stack_pointer;
14741
14742 /* build the stack frame on the softirq stack */
14743- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14744+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14745+
14746+#ifdef CONFIG_PAX_MEMORY_UDEREF
14747+ __set_fs(MAKE_MM_SEG(0));
14748+#endif
14749
14750 call_on_stack(__do_softirq, isp);
14751+
14752+#ifdef CONFIG_PAX_MEMORY_UDEREF
14753+ __set_fs(current_thread_info()->addr_limit);
14754+#endif
14755+
14756 /*
14757 * Shouldn't happen, we returned above if in_interrupt():
14758 */
14759diff -urNp linux-3.1.4/arch/x86/kernel/irq.c linux-3.1.4/arch/x86/kernel/irq.c
14760--- linux-3.1.4/arch/x86/kernel/irq.c 2011-11-11 15:19:27.000000000 -0500
14761+++ linux-3.1.4/arch/x86/kernel/irq.c 2011-11-16 18:39:07.000000000 -0500
14762@@ -17,7 +17,7 @@
14763 #include <asm/mce.h>
14764 #include <asm/hw_irq.h>
14765
14766-atomic_t irq_err_count;
14767+atomic_unchecked_t irq_err_count;
14768
14769 /* Function pointer for generic interrupt vector handling */
14770 void (*x86_platform_ipi_callback)(void) = NULL;
14771@@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
14772 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
14773 seq_printf(p, " Machine check polls\n");
14774 #endif
14775- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
14776+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
14777 #if defined(CONFIG_X86_IO_APIC)
14778- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
14779+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
14780 #endif
14781 return 0;
14782 }
14783@@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
14784
14785 u64 arch_irq_stat(void)
14786 {
14787- u64 sum = atomic_read(&irq_err_count);
14788+ u64 sum = atomic_read_unchecked(&irq_err_count);
14789
14790 #ifdef CONFIG_X86_IO_APIC
14791- sum += atomic_read(&irq_mis_count);
14792+ sum += atomic_read_unchecked(&irq_mis_count);
14793 #endif
14794 return sum;
14795 }
14796diff -urNp linux-3.1.4/arch/x86/kernel/kgdb.c linux-3.1.4/arch/x86/kernel/kgdb.c
14797--- linux-3.1.4/arch/x86/kernel/kgdb.c 2011-11-11 15:19:27.000000000 -0500
14798+++ linux-3.1.4/arch/x86/kernel/kgdb.c 2011-11-16 18:39:07.000000000 -0500
14799@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
14800 #ifdef CONFIG_X86_32
14801 switch (regno) {
14802 case GDB_SS:
14803- if (!user_mode_vm(regs))
14804+ if (!user_mode(regs))
14805 *(unsigned long *)mem = __KERNEL_DS;
14806 break;
14807 case GDB_SP:
14808- if (!user_mode_vm(regs))
14809+ if (!user_mode(regs))
14810 *(unsigned long *)mem = kernel_stack_pointer(regs);
14811 break;
14812 case GDB_GS:
14813@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
14814 case 'k':
14815 /* clear the trace bit */
14816 linux_regs->flags &= ~X86_EFLAGS_TF;
14817- atomic_set(&kgdb_cpu_doing_single_step, -1);
14818+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
14819
14820 /* set the trace bit if we're stepping */
14821 if (remcomInBuffer[0] == 's') {
14822 linux_regs->flags |= X86_EFLAGS_TF;
14823- atomic_set(&kgdb_cpu_doing_single_step,
14824+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
14825 raw_smp_processor_id());
14826 }
14827
14828@@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
14829 return NOTIFY_DONE;
14830
14831 case DIE_DEBUG:
14832- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
14833+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
14834 if (user_mode(regs))
14835 return single_step_cont(regs, args);
14836 break;
14837diff -urNp linux-3.1.4/arch/x86/kernel/kprobes.c linux-3.1.4/arch/x86/kernel/kprobes.c
14838--- linux-3.1.4/arch/x86/kernel/kprobes.c 2011-11-11 15:19:27.000000000 -0500
14839+++ linux-3.1.4/arch/x86/kernel/kprobes.c 2011-11-16 18:39:07.000000000 -0500
14840@@ -117,8 +117,11 @@ static void __kprobes __synthesize_relat
14841 } __attribute__((packed)) *insn;
14842
14843 insn = (struct __arch_relative_insn *)from;
14844+
14845+ pax_open_kernel();
14846 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
14847 insn->op = op;
14848+ pax_close_kernel();
14849 }
14850
14851 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
14852@@ -155,7 +158,7 @@ static int __kprobes can_boost(kprobe_op
14853 kprobe_opcode_t opcode;
14854 kprobe_opcode_t *orig_opcodes = opcodes;
14855
14856- if (search_exception_tables((unsigned long)opcodes))
14857+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
14858 return 0; /* Page fault may occur on this address. */
14859
14860 retry:
14861@@ -316,7 +319,9 @@ static int __kprobes __copy_instruction(
14862 }
14863 }
14864 insn_get_length(&insn);
14865+ pax_open_kernel();
14866 memcpy(dest, insn.kaddr, insn.length);
14867+ pax_close_kernel();
14868
14869 #ifdef CONFIG_X86_64
14870 if (insn_rip_relative(&insn)) {
14871@@ -340,7 +345,9 @@ static int __kprobes __copy_instruction(
14872 (u8 *) dest;
14873 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
14874 disp = (u8 *) dest + insn_offset_displacement(&insn);
14875+ pax_open_kernel();
14876 *(s32 *) disp = (s32) newdisp;
14877+ pax_close_kernel();
14878 }
14879 #endif
14880 return insn.length;
14881@@ -354,12 +361,12 @@ static void __kprobes arch_copy_kprobe(s
14882 */
14883 __copy_instruction(p->ainsn.insn, p->addr, 0);
14884
14885- if (can_boost(p->addr))
14886+ if (can_boost(ktla_ktva(p->addr)))
14887 p->ainsn.boostable = 0;
14888 else
14889 p->ainsn.boostable = -1;
14890
14891- p->opcode = *p->addr;
14892+ p->opcode = *(ktla_ktva(p->addr));
14893 }
14894
14895 int __kprobes arch_prepare_kprobe(struct kprobe *p)
14896@@ -476,7 +483,7 @@ static void __kprobes setup_singlestep(s
14897 * nor set current_kprobe, because it doesn't use single
14898 * stepping.
14899 */
14900- regs->ip = (unsigned long)p->ainsn.insn;
14901+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14902 preempt_enable_no_resched();
14903 return;
14904 }
14905@@ -495,7 +502,7 @@ static void __kprobes setup_singlestep(s
14906 if (p->opcode == BREAKPOINT_INSTRUCTION)
14907 regs->ip = (unsigned long)p->addr;
14908 else
14909- regs->ip = (unsigned long)p->ainsn.insn;
14910+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14911 }
14912
14913 /*
14914@@ -574,7 +581,7 @@ static int __kprobes kprobe_handler(stru
14915 setup_singlestep(p, regs, kcb, 0);
14916 return 1;
14917 }
14918- } else if (*addr != BREAKPOINT_INSTRUCTION) {
14919+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
14920 /*
14921 * The breakpoint instruction was removed right
14922 * after we hit it. Another cpu has removed
14923@@ -682,6 +689,9 @@ static void __used __kprobes kretprobe_t
14924 " movq %rax, 152(%rsp)\n"
14925 RESTORE_REGS_STRING
14926 " popfq\n"
14927+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
14928+ " btsq $63,(%rsp)\n"
14929+#endif
14930 #else
14931 " pushf\n"
14932 SAVE_REGS_STRING
14933@@ -819,7 +829,7 @@ static void __kprobes resume_execution(s
14934 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
14935 {
14936 unsigned long *tos = stack_addr(regs);
14937- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
14938+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
14939 unsigned long orig_ip = (unsigned long)p->addr;
14940 kprobe_opcode_t *insn = p->ainsn.insn;
14941
14942@@ -1001,7 +1011,7 @@ int __kprobes kprobe_exceptions_notify(s
14943 struct die_args *args = data;
14944 int ret = NOTIFY_DONE;
14945
14946- if (args->regs && user_mode_vm(args->regs))
14947+ if (args->regs && user_mode(args->regs))
14948 return ret;
14949
14950 switch (val) {
14951@@ -1383,7 +1393,7 @@ int __kprobes arch_prepare_optimized_kpr
14952 * Verify if the address gap is in 2GB range, because this uses
14953 * a relative jump.
14954 */
14955- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
14956+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
14957 if (abs(rel) > 0x7fffffff)
14958 return -ERANGE;
14959
14960@@ -1404,11 +1414,11 @@ int __kprobes arch_prepare_optimized_kpr
14961 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
14962
14963 /* Set probe function call */
14964- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
14965+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
14966
14967 /* Set returning jmp instruction at the tail of out-of-line buffer */
14968 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
14969- (u8 *)op->kp.addr + op->optinsn.size);
14970+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
14971
14972 flush_icache_range((unsigned long) buf,
14973 (unsigned long) buf + TMPL_END_IDX +
14974@@ -1430,7 +1440,7 @@ static void __kprobes setup_optimize_kpr
14975 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
14976
14977 /* Backup instructions which will be replaced by jump address */
14978- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
14979+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
14980 RELATIVE_ADDR_SIZE);
14981
14982 insn_buf[0] = RELATIVEJUMP_OPCODE;
14983diff -urNp linux-3.1.4/arch/x86/kernel/kvm.c linux-3.1.4/arch/x86/kernel/kvm.c
14984--- linux-3.1.4/arch/x86/kernel/kvm.c 2011-11-11 15:19:27.000000000 -0500
14985+++ linux-3.1.4/arch/x86/kernel/kvm.c 2011-11-16 18:39:07.000000000 -0500
14986@@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(vo
14987 pv_mmu_ops.set_pud = kvm_set_pud;
14988 #if PAGETABLE_LEVELS == 4
14989 pv_mmu_ops.set_pgd = kvm_set_pgd;
14990+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
14991 #endif
14992 #endif
14993 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
14994diff -urNp linux-3.1.4/arch/x86/kernel/ldt.c linux-3.1.4/arch/x86/kernel/ldt.c
14995--- linux-3.1.4/arch/x86/kernel/ldt.c 2011-11-11 15:19:27.000000000 -0500
14996+++ linux-3.1.4/arch/x86/kernel/ldt.c 2011-11-16 18:39:07.000000000 -0500
14997@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
14998 if (reload) {
14999 #ifdef CONFIG_SMP
15000 preempt_disable();
15001- load_LDT(pc);
15002+ load_LDT_nolock(pc);
15003 if (!cpumask_equal(mm_cpumask(current->mm),
15004 cpumask_of(smp_processor_id())))
15005 smp_call_function(flush_ldt, current->mm, 1);
15006 preempt_enable();
15007 #else
15008- load_LDT(pc);
15009+ load_LDT_nolock(pc);
15010 #endif
15011 }
15012 if (oldsize) {
15013@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
15014 return err;
15015
15016 for (i = 0; i < old->size; i++)
15017- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15018+ write_ldt_entry(new->ldt, i, old->ldt + i);
15019 return 0;
15020 }
15021
15022@@ -116,6 +116,24 @@ int init_new_context(struct task_struct
15023 retval = copy_ldt(&mm->context, &old_mm->context);
15024 mutex_unlock(&old_mm->context.lock);
15025 }
15026+
15027+ if (tsk == current) {
15028+ mm->context.vdso = 0;
15029+
15030+#ifdef CONFIG_X86_32
15031+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15032+ mm->context.user_cs_base = 0UL;
15033+ mm->context.user_cs_limit = ~0UL;
15034+
15035+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15036+ cpus_clear(mm->context.cpu_user_cs_mask);
15037+#endif
15038+
15039+#endif
15040+#endif
15041+
15042+ }
15043+
15044 return retval;
15045 }
15046
15047@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
15048 }
15049 }
15050
15051+#ifdef CONFIG_PAX_SEGMEXEC
15052+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15053+ error = -EINVAL;
15054+ goto out_unlock;
15055+ }
15056+#endif
15057+
15058 fill_ldt(&ldt, &ldt_info);
15059 if (oldmode)
15060 ldt.avl = 0;
15061diff -urNp linux-3.1.4/arch/x86/kernel/machine_kexec_32.c linux-3.1.4/arch/x86/kernel/machine_kexec_32.c
15062--- linux-3.1.4/arch/x86/kernel/machine_kexec_32.c 2011-11-11 15:19:27.000000000 -0500
15063+++ linux-3.1.4/arch/x86/kernel/machine_kexec_32.c 2011-11-16 18:39:07.000000000 -0500
15064@@ -27,7 +27,7 @@
15065 #include <asm/cacheflush.h>
15066 #include <asm/debugreg.h>
15067
15068-static void set_idt(void *newidt, __u16 limit)
15069+static void set_idt(struct desc_struct *newidt, __u16 limit)
15070 {
15071 struct desc_ptr curidt;
15072
15073@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
15074 }
15075
15076
15077-static void set_gdt(void *newgdt, __u16 limit)
15078+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15079 {
15080 struct desc_ptr curgdt;
15081
15082@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15083 }
15084
15085 control_page = page_address(image->control_code_page);
15086- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15087+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15088
15089 relocate_kernel_ptr = control_page;
15090 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15091diff -urNp linux-3.1.4/arch/x86/kernel/microcode_intel.c linux-3.1.4/arch/x86/kernel/microcode_intel.c
15092--- linux-3.1.4/arch/x86/kernel/microcode_intel.c 2011-11-11 15:19:27.000000000 -0500
15093+++ linux-3.1.4/arch/x86/kernel/microcode_intel.c 2011-11-16 18:39:07.000000000 -0500
15094@@ -440,13 +440,13 @@ static enum ucode_state request_microcod
15095
15096 static int get_ucode_user(void *to, const void *from, size_t n)
15097 {
15098- return copy_from_user(to, from, n);
15099+ return copy_from_user(to, (const void __force_user *)from, n);
15100 }
15101
15102 static enum ucode_state
15103 request_microcode_user(int cpu, const void __user *buf, size_t size)
15104 {
15105- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15106+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
15107 }
15108
15109 static void microcode_fini_cpu(int cpu)
15110diff -urNp linux-3.1.4/arch/x86/kernel/module.c linux-3.1.4/arch/x86/kernel/module.c
15111--- linux-3.1.4/arch/x86/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
15112+++ linux-3.1.4/arch/x86/kernel/module.c 2011-11-16 18:39:07.000000000 -0500
15113@@ -36,15 +36,60 @@
15114 #define DEBUGP(fmt...)
15115 #endif
15116
15117-void *module_alloc(unsigned long size)
15118+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
15119 {
15120 if (PAGE_ALIGN(size) > MODULES_LEN)
15121 return NULL;
15122 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
15123- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
15124+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
15125 -1, __builtin_return_address(0));
15126 }
15127
15128+void *module_alloc(unsigned long size)
15129+{
15130+
15131+#ifdef CONFIG_PAX_KERNEXEC
15132+ return __module_alloc(size, PAGE_KERNEL);
15133+#else
15134+ return __module_alloc(size, PAGE_KERNEL_EXEC);
15135+#endif
15136+
15137+}
15138+
15139+#ifdef CONFIG_PAX_KERNEXEC
15140+#ifdef CONFIG_X86_32
15141+void *module_alloc_exec(unsigned long size)
15142+{
15143+ struct vm_struct *area;
15144+
15145+ if (size == 0)
15146+ return NULL;
15147+
15148+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15149+ return area ? area->addr : NULL;
15150+}
15151+EXPORT_SYMBOL(module_alloc_exec);
15152+
15153+void module_free_exec(struct module *mod, void *module_region)
15154+{
15155+ vunmap(module_region);
15156+}
15157+EXPORT_SYMBOL(module_free_exec);
15158+#else
15159+void module_free_exec(struct module *mod, void *module_region)
15160+{
15161+ module_free(mod, module_region);
15162+}
15163+EXPORT_SYMBOL(module_free_exec);
15164+
15165+void *module_alloc_exec(unsigned long size)
15166+{
15167+ return __module_alloc(size, PAGE_KERNEL_RX);
15168+}
15169+EXPORT_SYMBOL(module_alloc_exec);
15170+#endif
15171+#endif
15172+
15173 #ifdef CONFIG_X86_32
15174 int apply_relocate(Elf32_Shdr *sechdrs,
15175 const char *strtab,
15176@@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15177 unsigned int i;
15178 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15179 Elf32_Sym *sym;
15180- uint32_t *location;
15181+ uint32_t *plocation, location;
15182
15183 DEBUGP("Applying relocate section %u to %u\n", relsec,
15184 sechdrs[relsec].sh_info);
15185 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15186 /* This is where to make the change */
15187- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15188- + rel[i].r_offset;
15189+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15190+ location = (uint32_t)plocation;
15191+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15192+ plocation = ktla_ktva((void *)plocation);
15193 /* This is the symbol it is referring to. Note that all
15194 undefined symbols have been resolved. */
15195 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15196@@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15197 switch (ELF32_R_TYPE(rel[i].r_info)) {
15198 case R_386_32:
15199 /* We add the value into the location given */
15200- *location += sym->st_value;
15201+ pax_open_kernel();
15202+ *plocation += sym->st_value;
15203+ pax_close_kernel();
15204 break;
15205 case R_386_PC32:
15206 /* Add the value, subtract its postition */
15207- *location += sym->st_value - (uint32_t)location;
15208+ pax_open_kernel();
15209+ *plocation += sym->st_value - location;
15210+ pax_close_kernel();
15211 break;
15212 default:
15213 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15214@@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
15215 case R_X86_64_NONE:
15216 break;
15217 case R_X86_64_64:
15218+ pax_open_kernel();
15219 *(u64 *)loc = val;
15220+ pax_close_kernel();
15221 break;
15222 case R_X86_64_32:
15223+ pax_open_kernel();
15224 *(u32 *)loc = val;
15225+ pax_close_kernel();
15226 if (val != *(u32 *)loc)
15227 goto overflow;
15228 break;
15229 case R_X86_64_32S:
15230+ pax_open_kernel();
15231 *(s32 *)loc = val;
15232+ pax_close_kernel();
15233 if ((s64)val != *(s32 *)loc)
15234 goto overflow;
15235 break;
15236 case R_X86_64_PC32:
15237 val -= (u64)loc;
15238+ pax_open_kernel();
15239 *(u32 *)loc = val;
15240+ pax_close_kernel();
15241+
15242 #if 0
15243 if ((s64)val != *(s32 *)loc)
15244 goto overflow;
15245diff -urNp linux-3.1.4/arch/x86/kernel/paravirt.c linux-3.1.4/arch/x86/kernel/paravirt.c
15246--- linux-3.1.4/arch/x86/kernel/paravirt.c 2011-11-11 15:19:27.000000000 -0500
15247+++ linux-3.1.4/arch/x86/kernel/paravirt.c 2011-11-17 18:29:42.000000000 -0500
15248@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
15249 {
15250 return x;
15251 }
15252+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15253+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
15254+#endif
15255
15256 void __init default_banner(void)
15257 {
15258@@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
15259 .pv_lock_ops = pv_lock_ops,
15260 #endif
15261 };
15262+
15263+ pax_track_stack();
15264+
15265 return *((void **)&tmpl + type);
15266 }
15267
15268@@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
15269 if (opfunc == NULL)
15270 /* If there's no function, patch it with a ud2a (BUG) */
15271 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
15272- else if (opfunc == _paravirt_nop)
15273+ else if (opfunc == (void *)_paravirt_nop)
15274 /* If the operation is a nop, then nop the callsite */
15275 ret = paravirt_patch_nop();
15276
15277 /* identity functions just return their single argument */
15278- else if (opfunc == _paravirt_ident_32)
15279+ else if (opfunc == (void *)_paravirt_ident_32)
15280 ret = paravirt_patch_ident_32(insnbuf, len);
15281- else if (opfunc == _paravirt_ident_64)
15282+ else if (opfunc == (void *)_paravirt_ident_64)
15283 ret = paravirt_patch_ident_64(insnbuf, len);
15284+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15285+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
15286+ ret = paravirt_patch_ident_64(insnbuf, len);
15287+#endif
15288
15289 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
15290 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
15291@@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
15292 if (insn_len > len || start == NULL)
15293 insn_len = len;
15294 else
15295- memcpy(insnbuf, start, insn_len);
15296+ memcpy(insnbuf, ktla_ktva(start), insn_len);
15297
15298 return insn_len;
15299 }
15300@@ -302,7 +312,7 @@ void arch_flush_lazy_mmu_mode(void)
15301 preempt_enable();
15302 }
15303
15304-struct pv_info pv_info = {
15305+struct pv_info pv_info __read_only = {
15306 .name = "bare hardware",
15307 .paravirt_enabled = 0,
15308 .kernel_rpl = 0,
15309@@ -313,16 +323,16 @@ struct pv_info pv_info = {
15310 #endif
15311 };
15312
15313-struct pv_init_ops pv_init_ops = {
15314+struct pv_init_ops pv_init_ops __read_only = {
15315 .patch = native_patch,
15316 };
15317
15318-struct pv_time_ops pv_time_ops = {
15319+struct pv_time_ops pv_time_ops __read_only = {
15320 .sched_clock = native_sched_clock,
15321 .steal_clock = native_steal_clock,
15322 };
15323
15324-struct pv_irq_ops pv_irq_ops = {
15325+struct pv_irq_ops pv_irq_ops __read_only = {
15326 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
15327 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
15328 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
15329@@ -334,7 +344,7 @@ struct pv_irq_ops pv_irq_ops = {
15330 #endif
15331 };
15332
15333-struct pv_cpu_ops pv_cpu_ops = {
15334+struct pv_cpu_ops pv_cpu_ops __read_only = {
15335 .cpuid = native_cpuid,
15336 .get_debugreg = native_get_debugreg,
15337 .set_debugreg = native_set_debugreg,
15338@@ -395,21 +405,26 @@ struct pv_cpu_ops pv_cpu_ops = {
15339 .end_context_switch = paravirt_nop,
15340 };
15341
15342-struct pv_apic_ops pv_apic_ops = {
15343+struct pv_apic_ops pv_apic_ops __read_only = {
15344 #ifdef CONFIG_X86_LOCAL_APIC
15345 .startup_ipi_hook = paravirt_nop,
15346 #endif
15347 };
15348
15349-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
15350+#ifdef CONFIG_X86_32
15351+#ifdef CONFIG_X86_PAE
15352+/* 64-bit pagetable entries */
15353+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
15354+#else
15355 /* 32-bit pagetable entries */
15356 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
15357+#endif
15358 #else
15359 /* 64-bit pagetable entries */
15360 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
15361 #endif
15362
15363-struct pv_mmu_ops pv_mmu_ops = {
15364+struct pv_mmu_ops pv_mmu_ops __read_only = {
15365
15366 .read_cr2 = native_read_cr2,
15367 .write_cr2 = native_write_cr2,
15368@@ -459,6 +474,7 @@ struct pv_mmu_ops pv_mmu_ops = {
15369 .make_pud = PTE_IDENT,
15370
15371 .set_pgd = native_set_pgd,
15372+ .set_pgd_batched = native_set_pgd_batched,
15373 #endif
15374 #endif /* PAGETABLE_LEVELS >= 3 */
15375
15376@@ -478,6 +494,12 @@ struct pv_mmu_ops pv_mmu_ops = {
15377 },
15378
15379 .set_fixmap = native_set_fixmap,
15380+
15381+#ifdef CONFIG_PAX_KERNEXEC
15382+ .pax_open_kernel = native_pax_open_kernel,
15383+ .pax_close_kernel = native_pax_close_kernel,
15384+#endif
15385+
15386 };
15387
15388 EXPORT_SYMBOL_GPL(pv_time_ops);
15389diff -urNp linux-3.1.4/arch/x86/kernel/paravirt-spinlocks.c linux-3.1.4/arch/x86/kernel/paravirt-spinlocks.c
15390--- linux-3.1.4/arch/x86/kernel/paravirt-spinlocks.c 2011-11-11 15:19:27.000000000 -0500
15391+++ linux-3.1.4/arch/x86/kernel/paravirt-spinlocks.c 2011-11-16 18:39:07.000000000 -0500
15392@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
15393 arch_spin_lock(lock);
15394 }
15395
15396-struct pv_lock_ops pv_lock_ops = {
15397+struct pv_lock_ops pv_lock_ops __read_only = {
15398 #ifdef CONFIG_SMP
15399 .spin_is_locked = __ticket_spin_is_locked,
15400 .spin_is_contended = __ticket_spin_is_contended,
15401diff -urNp linux-3.1.4/arch/x86/kernel/pci-iommu_table.c linux-3.1.4/arch/x86/kernel/pci-iommu_table.c
15402--- linux-3.1.4/arch/x86/kernel/pci-iommu_table.c 2011-11-11 15:19:27.000000000 -0500
15403+++ linux-3.1.4/arch/x86/kernel/pci-iommu_table.c 2011-11-16 18:40:08.000000000 -0500
15404@@ -2,7 +2,7 @@
15405 #include <asm/iommu_table.h>
15406 #include <linux/string.h>
15407 #include <linux/kallsyms.h>
15408-
15409+#include <linux/sched.h>
15410
15411 #define DEBUG 1
15412
15413@@ -51,6 +51,8 @@ void __init check_iommu_entries(struct i
15414 {
15415 struct iommu_table_entry *p, *q, *x;
15416
15417+ pax_track_stack();
15418+
15419 /* Simple cyclic dependency checker. */
15420 for (p = start; p < finish; p++) {
15421 q = find_dependents_of(start, finish, p);
15422diff -urNp linux-3.1.4/arch/x86/kernel/process_32.c linux-3.1.4/arch/x86/kernel/process_32.c
15423--- linux-3.1.4/arch/x86/kernel/process_32.c 2011-11-11 15:19:27.000000000 -0500
15424+++ linux-3.1.4/arch/x86/kernel/process_32.c 2011-11-16 18:39:07.000000000 -0500
15425@@ -66,6 +66,7 @@ asmlinkage void ret_from_fork(void) __as
15426 unsigned long thread_saved_pc(struct task_struct *tsk)
15427 {
15428 return ((unsigned long *)tsk->thread.sp)[3];
15429+//XXX return tsk->thread.eip;
15430 }
15431
15432 #ifndef CONFIG_SMP
15433@@ -128,15 +129,14 @@ void __show_regs(struct pt_regs *regs, i
15434 unsigned long sp;
15435 unsigned short ss, gs;
15436
15437- if (user_mode_vm(regs)) {
15438+ if (user_mode(regs)) {
15439 sp = regs->sp;
15440 ss = regs->ss & 0xffff;
15441- gs = get_user_gs(regs);
15442 } else {
15443 sp = kernel_stack_pointer(regs);
15444 savesegment(ss, ss);
15445- savesegment(gs, gs);
15446 }
15447+ gs = get_user_gs(regs);
15448
15449 show_regs_common();
15450
15451@@ -198,13 +198,14 @@ int copy_thread(unsigned long clone_flag
15452 struct task_struct *tsk;
15453 int err;
15454
15455- childregs = task_pt_regs(p);
15456+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
15457 *childregs = *regs;
15458 childregs->ax = 0;
15459 childregs->sp = sp;
15460
15461 p->thread.sp = (unsigned long) childregs;
15462 p->thread.sp0 = (unsigned long) (childregs+1);
15463+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15464
15465 p->thread.ip = (unsigned long) ret_from_fork;
15466
15467@@ -294,7 +295,7 @@ __switch_to(struct task_struct *prev_p,
15468 struct thread_struct *prev = &prev_p->thread,
15469 *next = &next_p->thread;
15470 int cpu = smp_processor_id();
15471- struct tss_struct *tss = &per_cpu(init_tss, cpu);
15472+ struct tss_struct *tss = init_tss + cpu;
15473 bool preload_fpu;
15474
15475 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
15476@@ -329,6 +330,10 @@ __switch_to(struct task_struct *prev_p,
15477 */
15478 lazy_save_gs(prev->gs);
15479
15480+#ifdef CONFIG_PAX_MEMORY_UDEREF
15481+ __set_fs(task_thread_info(next_p)->addr_limit);
15482+#endif
15483+
15484 /*
15485 * Load the per-thread Thread-Local Storage descriptor.
15486 */
15487@@ -364,6 +369,9 @@ __switch_to(struct task_struct *prev_p,
15488 */
15489 arch_end_context_switch(next_p);
15490
15491+ percpu_write(current_task, next_p);
15492+ percpu_write(current_tinfo, &next_p->tinfo);
15493+
15494 if (preload_fpu)
15495 __math_state_restore();
15496
15497@@ -373,8 +381,6 @@ __switch_to(struct task_struct *prev_p,
15498 if (prev->gs | next->gs)
15499 lazy_load_gs(next->gs);
15500
15501- percpu_write(current_task, next_p);
15502-
15503 return prev_p;
15504 }
15505
15506@@ -404,4 +410,3 @@ unsigned long get_wchan(struct task_stru
15507 } while (count++ < 16);
15508 return 0;
15509 }
15510-
15511diff -urNp linux-3.1.4/arch/x86/kernel/process_64.c linux-3.1.4/arch/x86/kernel/process_64.c
15512--- linux-3.1.4/arch/x86/kernel/process_64.c 2011-11-11 15:19:27.000000000 -0500
15513+++ linux-3.1.4/arch/x86/kernel/process_64.c 2011-11-16 18:39:07.000000000 -0500
15514@@ -88,7 +88,7 @@ static void __exit_idle(void)
15515 void exit_idle(void)
15516 {
15517 /* idle loop has pid 0 */
15518- if (current->pid)
15519+ if (task_pid_nr(current))
15520 return;
15521 __exit_idle();
15522 }
15523@@ -262,8 +262,7 @@ int copy_thread(unsigned long clone_flag
15524 struct pt_regs *childregs;
15525 struct task_struct *me = current;
15526
15527- childregs = ((struct pt_regs *)
15528- (THREAD_SIZE + task_stack_page(p))) - 1;
15529+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
15530 *childregs = *regs;
15531
15532 childregs->ax = 0;
15533@@ -275,6 +274,7 @@ int copy_thread(unsigned long clone_flag
15534 p->thread.sp = (unsigned long) childregs;
15535 p->thread.sp0 = (unsigned long) (childregs+1);
15536 p->thread.usersp = me->thread.usersp;
15537+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15538
15539 set_tsk_thread_flag(p, TIF_FORK);
15540
15541@@ -377,7 +377,7 @@ __switch_to(struct task_struct *prev_p,
15542 struct thread_struct *prev = &prev_p->thread;
15543 struct thread_struct *next = &next_p->thread;
15544 int cpu = smp_processor_id();
15545- struct tss_struct *tss = &per_cpu(init_tss, cpu);
15546+ struct tss_struct *tss = init_tss + cpu;
15547 unsigned fsindex, gsindex;
15548 bool preload_fpu;
15549
15550@@ -473,10 +473,9 @@ __switch_to(struct task_struct *prev_p,
15551 prev->usersp = percpu_read(old_rsp);
15552 percpu_write(old_rsp, next->usersp);
15553 percpu_write(current_task, next_p);
15554+ percpu_write(current_tinfo, &next_p->tinfo);
15555
15556- percpu_write(kernel_stack,
15557- (unsigned long)task_stack_page(next_p) +
15558- THREAD_SIZE - KERNEL_STACK_OFFSET);
15559+ percpu_write(kernel_stack, next->sp0);
15560
15561 /*
15562 * Now maybe reload the debug registers and handle I/O bitmaps
15563@@ -538,12 +537,11 @@ unsigned long get_wchan(struct task_stru
15564 if (!p || p == current || p->state == TASK_RUNNING)
15565 return 0;
15566 stack = (unsigned long)task_stack_page(p);
15567- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
15568+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
15569 return 0;
15570 fp = *(u64 *)(p->thread.sp);
15571 do {
15572- if (fp < (unsigned long)stack ||
15573- fp >= (unsigned long)stack+THREAD_SIZE)
15574+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
15575 return 0;
15576 ip = *(u64 *)(fp+8);
15577 if (!in_sched_functions(ip))
15578diff -urNp linux-3.1.4/arch/x86/kernel/process.c linux-3.1.4/arch/x86/kernel/process.c
15579--- linux-3.1.4/arch/x86/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
15580+++ linux-3.1.4/arch/x86/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
15581@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
15582
15583 void free_thread_info(struct thread_info *ti)
15584 {
15585- free_thread_xstate(ti->task);
15586 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
15587 }
15588
15589+static struct kmem_cache *task_struct_cachep;
15590+
15591 void arch_task_cache_init(void)
15592 {
15593- task_xstate_cachep =
15594- kmem_cache_create("task_xstate", xstate_size,
15595+ /* create a slab on which task_structs can be allocated */
15596+ task_struct_cachep =
15597+ kmem_cache_create("task_struct", sizeof(struct task_struct),
15598+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
15599+
15600+ task_xstate_cachep =
15601+ kmem_cache_create("task_xstate", xstate_size,
15602 __alignof__(union thread_xstate),
15603- SLAB_PANIC | SLAB_NOTRACK, NULL);
15604+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
15605+}
15606+
15607+struct task_struct *alloc_task_struct_node(int node)
15608+{
15609+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
15610+}
15611+
15612+void free_task_struct(struct task_struct *task)
15613+{
15614+ free_thread_xstate(task);
15615+ kmem_cache_free(task_struct_cachep, task);
15616 }
15617
15618 /*
15619@@ -70,7 +87,7 @@ void exit_thread(void)
15620 unsigned long *bp = t->io_bitmap_ptr;
15621
15622 if (bp) {
15623- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
15624+ struct tss_struct *tss = init_tss + get_cpu();
15625
15626 t->io_bitmap_ptr = NULL;
15627 clear_thread_flag(TIF_IO_BITMAP);
15628@@ -106,7 +123,7 @@ void show_regs_common(void)
15629
15630 printk(KERN_CONT "\n");
15631 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
15632- current->pid, current->comm, print_tainted(),
15633+ task_pid_nr(current), current->comm, print_tainted(),
15634 init_utsname()->release,
15635 (int)strcspn(init_utsname()->version, " "),
15636 init_utsname()->version);
15637@@ -120,6 +137,9 @@ void flush_thread(void)
15638 {
15639 struct task_struct *tsk = current;
15640
15641+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
15642+ loadsegment(gs, 0);
15643+#endif
15644 flush_ptrace_hw_breakpoint(tsk);
15645 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
15646 /*
15647@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
15648 regs.di = (unsigned long) arg;
15649
15650 #ifdef CONFIG_X86_32
15651- regs.ds = __USER_DS;
15652- regs.es = __USER_DS;
15653+ regs.ds = __KERNEL_DS;
15654+ regs.es = __KERNEL_DS;
15655 regs.fs = __KERNEL_PERCPU;
15656- regs.gs = __KERNEL_STACK_CANARY;
15657+ savesegment(gs, regs.gs);
15658 #else
15659 regs.ss = __KERNEL_DS;
15660 #endif
15661@@ -403,7 +423,7 @@ void default_idle(void)
15662 EXPORT_SYMBOL(default_idle);
15663 #endif
15664
15665-void stop_this_cpu(void *dummy)
15666+__noreturn void stop_this_cpu(void *dummy)
15667 {
15668 local_irq_disable();
15669 /*
15670@@ -645,16 +665,37 @@ static int __init idle_setup(char *str)
15671 }
15672 early_param("idle", idle_setup);
15673
15674-unsigned long arch_align_stack(unsigned long sp)
15675+#ifdef CONFIG_PAX_RANDKSTACK
15676+void pax_randomize_kstack(struct pt_regs *regs)
15677 {
15678- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
15679- sp -= get_random_int() % 8192;
15680- return sp & ~0xf;
15681-}
15682+ struct thread_struct *thread = &current->thread;
15683+ unsigned long time;
15684
15685-unsigned long arch_randomize_brk(struct mm_struct *mm)
15686-{
15687- unsigned long range_end = mm->brk + 0x02000000;
15688- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
15689-}
15690+ if (!randomize_va_space)
15691+ return;
15692+
15693+ if (v8086_mode(regs))
15694+ return;
15695
15696+ rdtscl(time);
15697+
15698+ /* P4 seems to return a 0 LSB, ignore it */
15699+#ifdef CONFIG_MPENTIUM4
15700+ time &= 0x3EUL;
15701+ time <<= 2;
15702+#elif defined(CONFIG_X86_64)
15703+ time &= 0xFUL;
15704+ time <<= 4;
15705+#else
15706+ time &= 0x1FUL;
15707+ time <<= 3;
15708+#endif
15709+
15710+ thread->sp0 ^= time;
15711+ load_sp0(init_tss + smp_processor_id(), thread);
15712+
15713+#ifdef CONFIG_X86_64
15714+ percpu_write(kernel_stack, thread->sp0);
15715+#endif
15716+}
15717+#endif
15718diff -urNp linux-3.1.4/arch/x86/kernel/ptrace.c linux-3.1.4/arch/x86/kernel/ptrace.c
15719--- linux-3.1.4/arch/x86/kernel/ptrace.c 2011-11-11 15:19:27.000000000 -0500
15720+++ linux-3.1.4/arch/x86/kernel/ptrace.c 2011-11-16 18:39:07.000000000 -0500
15721@@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *chi
15722 unsigned long addr, unsigned long data)
15723 {
15724 int ret;
15725- unsigned long __user *datap = (unsigned long __user *)data;
15726+ unsigned long __user *datap = (__force unsigned long __user *)data;
15727
15728 switch (request) {
15729 /* read the word at location addr in the USER area. */
15730@@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *chi
15731 if ((int) addr < 0)
15732 return -EIO;
15733 ret = do_get_thread_area(child, addr,
15734- (struct user_desc __user *)data);
15735+ (__force struct user_desc __user *) data);
15736 break;
15737
15738 case PTRACE_SET_THREAD_AREA:
15739 if ((int) addr < 0)
15740 return -EIO;
15741 ret = do_set_thread_area(child, addr,
15742- (struct user_desc __user *)data, 0);
15743+ (__force struct user_desc __user *) data, 0);
15744 break;
15745 #endif
15746
15747@@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct tas
15748 memset(info, 0, sizeof(*info));
15749 info->si_signo = SIGTRAP;
15750 info->si_code = si_code;
15751- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
15752+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
15753 }
15754
15755 void user_single_step_siginfo(struct task_struct *tsk,
15756diff -urNp linux-3.1.4/arch/x86/kernel/pvclock.c linux-3.1.4/arch/x86/kernel/pvclock.c
15757--- linux-3.1.4/arch/x86/kernel/pvclock.c 2011-11-11 15:19:27.000000000 -0500
15758+++ linux-3.1.4/arch/x86/kernel/pvclock.c 2011-11-16 18:39:07.000000000 -0500
15759@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
15760 return pv_tsc_khz;
15761 }
15762
15763-static atomic64_t last_value = ATOMIC64_INIT(0);
15764+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
15765
15766 void pvclock_resume(void)
15767 {
15768- atomic64_set(&last_value, 0);
15769+ atomic64_set_unchecked(&last_value, 0);
15770 }
15771
15772 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
15773@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
15774 * updating at the same time, and one of them could be slightly behind,
15775 * making the assumption that last_value always go forward fail to hold.
15776 */
15777- last = atomic64_read(&last_value);
15778+ last = atomic64_read_unchecked(&last_value);
15779 do {
15780 if (ret < last)
15781 return last;
15782- last = atomic64_cmpxchg(&last_value, last, ret);
15783+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
15784 } while (unlikely(last != ret));
15785
15786 return ret;
15787diff -urNp linux-3.1.4/arch/x86/kernel/reboot.c linux-3.1.4/arch/x86/kernel/reboot.c
15788--- linux-3.1.4/arch/x86/kernel/reboot.c 2011-11-11 15:19:27.000000000 -0500
15789+++ linux-3.1.4/arch/x86/kernel/reboot.c 2011-11-16 18:39:07.000000000 -0500
15790@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
15791 EXPORT_SYMBOL(pm_power_off);
15792
15793 static const struct desc_ptr no_idt = {};
15794-static int reboot_mode;
15795+static unsigned short reboot_mode;
15796 enum reboot_type reboot_type = BOOT_ACPI;
15797 int reboot_force;
15798
15799@@ -315,13 +315,17 @@ core_initcall(reboot_init);
15800 extern const unsigned char machine_real_restart_asm[];
15801 extern const u64 machine_real_restart_gdt[3];
15802
15803-void machine_real_restart(unsigned int type)
15804+__noreturn void machine_real_restart(unsigned int type)
15805 {
15806 void *restart_va;
15807 unsigned long restart_pa;
15808- void (*restart_lowmem)(unsigned int);
15809+ void (* __noreturn restart_lowmem)(unsigned int);
15810 u64 *lowmem_gdt;
15811
15812+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
15813+ struct desc_struct *gdt;
15814+#endif
15815+
15816 local_irq_disable();
15817
15818 /* Write zero to CMOS register number 0x0f, which the BIOS POST
15819@@ -347,14 +351,14 @@ void machine_real_restart(unsigned int t
15820 boot)". This seems like a fairly standard thing that gets set by
15821 REBOOT.COM programs, and the previous reset routine did this
15822 too. */
15823- *((unsigned short *)0x472) = reboot_mode;
15824+ *(unsigned short *)(__va(0x472)) = reboot_mode;
15825
15826 /* Patch the GDT in the low memory trampoline */
15827 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
15828
15829 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
15830 restart_pa = virt_to_phys(restart_va);
15831- restart_lowmem = (void (*)(unsigned int))restart_pa;
15832+ restart_lowmem = (void *)restart_pa;
15833
15834 /* GDT[0]: GDT self-pointer */
15835 lowmem_gdt[0] =
15836@@ -365,7 +369,33 @@ void machine_real_restart(unsigned int t
15837 GDT_ENTRY(0x009b, restart_pa, 0xffff);
15838
15839 /* Jump to the identity-mapped low memory code */
15840+
15841+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
15842+ gdt = get_cpu_gdt_table(smp_processor_id());
15843+ pax_open_kernel();
15844+#ifdef CONFIG_PAX_MEMORY_UDEREF
15845+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
15846+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
15847+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
15848+#endif
15849+#ifdef CONFIG_PAX_KERNEXEC
15850+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
15851+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
15852+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
15853+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
15854+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
15855+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
15856+#endif
15857+ pax_close_kernel();
15858+#endif
15859+
15860+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15861+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
15862+ unreachable();
15863+#else
15864 restart_lowmem(type);
15865+#endif
15866+
15867 }
15868 #ifdef CONFIG_APM_MODULE
15869 EXPORT_SYMBOL(machine_real_restart);
15870@@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_f
15871 * try to force a triple fault and then cycle between hitting the keyboard
15872 * controller and doing that
15873 */
15874-static void native_machine_emergency_restart(void)
15875+__noreturn static void native_machine_emergency_restart(void)
15876 {
15877 int i;
15878 int attempt = 0;
15879@@ -647,13 +677,13 @@ void native_machine_shutdown(void)
15880 #endif
15881 }
15882
15883-static void __machine_emergency_restart(int emergency)
15884+static __noreturn void __machine_emergency_restart(int emergency)
15885 {
15886 reboot_emergency = emergency;
15887 machine_ops.emergency_restart();
15888 }
15889
15890-static void native_machine_restart(char *__unused)
15891+static __noreturn void native_machine_restart(char *__unused)
15892 {
15893 printk("machine restart\n");
15894
15895@@ -662,7 +692,7 @@ static void native_machine_restart(char
15896 __machine_emergency_restart(0);
15897 }
15898
15899-static void native_machine_halt(void)
15900+static __noreturn void native_machine_halt(void)
15901 {
15902 /* stop other cpus and apics */
15903 machine_shutdown();
15904@@ -673,7 +703,7 @@ static void native_machine_halt(void)
15905 stop_this_cpu(NULL);
15906 }
15907
15908-static void native_machine_power_off(void)
15909+__noreturn static void native_machine_power_off(void)
15910 {
15911 if (pm_power_off) {
15912 if (!reboot_force)
15913@@ -682,6 +712,7 @@ static void native_machine_power_off(voi
15914 }
15915 /* a fallback in case there is no PM info available */
15916 tboot_shutdown(TB_SHUTDOWN_HALT);
15917+ unreachable();
15918 }
15919
15920 struct machine_ops machine_ops = {
15921diff -urNp linux-3.1.4/arch/x86/kernel/relocate_kernel_64.S linux-3.1.4/arch/x86/kernel/relocate_kernel_64.S
15922--- linux-3.1.4/arch/x86/kernel/relocate_kernel_64.S 2011-11-11 15:19:27.000000000 -0500
15923+++ linux-3.1.4/arch/x86/kernel/relocate_kernel_64.S 2011-12-02 17:38:47.000000000 -0500
15924@@ -11,6 +11,7 @@
15925 #include <asm/kexec.h>
15926 #include <asm/processor-flags.h>
15927 #include <asm/pgtable_types.h>
15928+#include <asm/alternative-asm.h>
15929
15930 /*
15931 * Must be relocatable PIC code callable as a C function
15932@@ -160,13 +161,14 @@ identity_mapped:
15933 xorq %rbp, %rbp
15934 xorq %r8, %r8
15935 xorq %r9, %r9
15936- xorq %r10, %r9
15937+ xorq %r10, %r10
15938 xorq %r11, %r11
15939 xorq %r12, %r12
15940 xorq %r13, %r13
15941 xorq %r14, %r14
15942 xorq %r15, %r15
15943
15944+ pax_force_retaddr 0, 1
15945 ret
15946
15947 1:
15948diff -urNp linux-3.1.4/arch/x86/kernel/setup.c linux-3.1.4/arch/x86/kernel/setup.c
15949--- linux-3.1.4/arch/x86/kernel/setup.c 2011-11-11 15:19:27.000000000 -0500
15950+++ linux-3.1.4/arch/x86/kernel/setup.c 2011-11-16 18:39:07.000000000 -0500
15951@@ -447,7 +447,7 @@ static void __init parse_setup_data(void
15952
15953 switch (data->type) {
15954 case SETUP_E820_EXT:
15955- parse_e820_ext(data);
15956+ parse_e820_ext((struct setup_data __force_kernel *)data);
15957 break;
15958 case SETUP_DTB:
15959 add_dtb(pa_data);
15960@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
15961 * area (640->1Mb) as ram even though it is not.
15962 * take them out.
15963 */
15964- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
15965+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
15966 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
15967 }
15968
15969@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
15970
15971 if (!boot_params.hdr.root_flags)
15972 root_mountflags &= ~MS_RDONLY;
15973- init_mm.start_code = (unsigned long) _text;
15974- init_mm.end_code = (unsigned long) _etext;
15975+ init_mm.start_code = ktla_ktva((unsigned long) _text);
15976+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
15977 init_mm.end_data = (unsigned long) _edata;
15978 init_mm.brk = _brk_end;
15979
15980- code_resource.start = virt_to_phys(_text);
15981- code_resource.end = virt_to_phys(_etext)-1;
15982- data_resource.start = virt_to_phys(_etext);
15983+ code_resource.start = virt_to_phys(ktla_ktva(_text));
15984+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
15985+ data_resource.start = virt_to_phys(_sdata);
15986 data_resource.end = virt_to_phys(_edata)-1;
15987 bss_resource.start = virt_to_phys(&__bss_start);
15988 bss_resource.end = virt_to_phys(&__bss_stop)-1;
15989diff -urNp linux-3.1.4/arch/x86/kernel/setup_percpu.c linux-3.1.4/arch/x86/kernel/setup_percpu.c
15990--- linux-3.1.4/arch/x86/kernel/setup_percpu.c 2011-11-11 15:19:27.000000000 -0500
15991+++ linux-3.1.4/arch/x86/kernel/setup_percpu.c 2011-11-16 18:39:07.000000000 -0500
15992@@ -21,19 +21,17 @@
15993 #include <asm/cpu.h>
15994 #include <asm/stackprotector.h>
15995
15996-DEFINE_PER_CPU(int, cpu_number);
15997+#ifdef CONFIG_SMP
15998+DEFINE_PER_CPU(unsigned int, cpu_number);
15999 EXPORT_PER_CPU_SYMBOL(cpu_number);
16000+#endif
16001
16002-#ifdef CONFIG_X86_64
16003 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16004-#else
16005-#define BOOT_PERCPU_OFFSET 0
16006-#endif
16007
16008 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16009 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16010
16011-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16012+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16013 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16014 };
16015 EXPORT_SYMBOL(__per_cpu_offset);
16016@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
16017 {
16018 #ifdef CONFIG_X86_32
16019 struct desc_struct gdt;
16020+ unsigned long base = per_cpu_offset(cpu);
16021
16022- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16023- 0x2 | DESCTYPE_S, 0x8);
16024- gdt.s = 1;
16025+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16026+ 0x83 | DESCTYPE_S, 0xC);
16027 write_gdt_entry(get_cpu_gdt_table(cpu),
16028 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16029 #endif
16030@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
16031 /* alrighty, percpu areas up and running */
16032 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16033 for_each_possible_cpu(cpu) {
16034+#ifdef CONFIG_CC_STACKPROTECTOR
16035+#ifdef CONFIG_X86_32
16036+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
16037+#endif
16038+#endif
16039 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16040 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16041 per_cpu(cpu_number, cpu) = cpu;
16042@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
16043 */
16044 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
16045 #endif
16046+#ifdef CONFIG_CC_STACKPROTECTOR
16047+#ifdef CONFIG_X86_32
16048+ if (!cpu)
16049+ per_cpu(stack_canary.canary, cpu) = canary;
16050+#endif
16051+#endif
16052 /*
16053 * Up to this point, the boot CPU has been using .init.data
16054 * area. Reload any changed state for the boot CPU.
16055diff -urNp linux-3.1.4/arch/x86/kernel/signal.c linux-3.1.4/arch/x86/kernel/signal.c
16056--- linux-3.1.4/arch/x86/kernel/signal.c 2011-11-11 15:19:27.000000000 -0500
16057+++ linux-3.1.4/arch/x86/kernel/signal.c 2011-11-16 19:39:49.000000000 -0500
16058@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
16059 * Align the stack pointer according to the i386 ABI,
16060 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16061 */
16062- sp = ((sp + 4) & -16ul) - 4;
16063+ sp = ((sp - 12) & -16ul) - 4;
16064 #else /* !CONFIG_X86_32 */
16065 sp = round_down(sp, 16) - 8;
16066 #endif
16067@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
16068 * Return an always-bogus address instead so we will die with SIGSEGV.
16069 */
16070 if (onsigstack && !likely(on_sig_stack(sp)))
16071- return (void __user *)-1L;
16072+ return (__force void __user *)-1L;
16073
16074 /* save i387 state */
16075 if (used_math() && save_i387_xstate(*fpstate) < 0)
16076- return (void __user *)-1L;
16077+ return (__force void __user *)-1L;
16078
16079 return (void __user *)sp;
16080 }
16081@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
16082 }
16083
16084 if (current->mm->context.vdso)
16085- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16086+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16087 else
16088- restorer = &frame->retcode;
16089+ restorer = (void __user *)&frame->retcode;
16090 if (ka->sa.sa_flags & SA_RESTORER)
16091 restorer = ka->sa.sa_restorer;
16092
16093@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
16094 * reasons and because gdb uses it as a signature to notice
16095 * signal handler stack frames.
16096 */
16097- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16098+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16099
16100 if (err)
16101 return -EFAULT;
16102@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
16103 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16104
16105 /* Set up to return from userspace. */
16106- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16107+ if (current->mm->context.vdso)
16108+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16109+ else
16110+ restorer = (void __user *)&frame->retcode;
16111 if (ka->sa.sa_flags & SA_RESTORER)
16112 restorer = ka->sa.sa_restorer;
16113 put_user_ex(restorer, &frame->pretcode);
16114@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
16115 * reasons and because gdb uses it as a signature to notice
16116 * signal handler stack frames.
16117 */
16118- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16119+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16120 } put_user_catch(err);
16121
16122 if (err)
16123@@ -762,6 +765,8 @@ static void do_signal(struct pt_regs *re
16124 siginfo_t info;
16125 int signr;
16126
16127+ pax_track_stack();
16128+
16129 /*
16130 * We want the common case to go fast, which is why we may in certain
16131 * cases get here from kernel mode. Just return without doing anything
16132@@ -769,7 +774,7 @@ static void do_signal(struct pt_regs *re
16133 * X86_32: vm86 regs switched out by assembly code before reaching
16134 * here, so testing against kernel CS suffices.
16135 */
16136- if (!user_mode(regs))
16137+ if (!user_mode_novm(regs))
16138 return;
16139
16140 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
16141diff -urNp linux-3.1.4/arch/x86/kernel/smpboot.c linux-3.1.4/arch/x86/kernel/smpboot.c
16142--- linux-3.1.4/arch/x86/kernel/smpboot.c 2011-11-11 15:19:27.000000000 -0500
16143+++ linux-3.1.4/arch/x86/kernel/smpboot.c 2011-11-16 18:39:07.000000000 -0500
16144@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
16145 set_idle_for_cpu(cpu, c_idle.idle);
16146 do_rest:
16147 per_cpu(current_task, cpu) = c_idle.idle;
16148+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16149 #ifdef CONFIG_X86_32
16150 /* Stack for startup_32 can be just as for start_secondary onwards */
16151 irq_ctx_init(cpu);
16152 #else
16153 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16154 initial_gs = per_cpu_offset(cpu);
16155- per_cpu(kernel_stack, cpu) =
16156- (unsigned long)task_stack_page(c_idle.idle) -
16157- KERNEL_STACK_OFFSET + THREAD_SIZE;
16158+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16159 #endif
16160+
16161+ pax_open_kernel();
16162 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16163+ pax_close_kernel();
16164+
16165 initial_code = (unsigned long)start_secondary;
16166 stack_start = c_idle.idle->thread.sp;
16167
16168@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
16169
16170 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16171
16172+#ifdef CONFIG_PAX_PER_CPU_PGD
16173+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16174+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16175+ KERNEL_PGD_PTRS);
16176+#endif
16177+
16178 err = do_boot_cpu(apicid, cpu);
16179 if (err) {
16180 pr_debug("do_boot_cpu failed %d\n", err);
16181diff -urNp linux-3.1.4/arch/x86/kernel/step.c linux-3.1.4/arch/x86/kernel/step.c
16182--- linux-3.1.4/arch/x86/kernel/step.c 2011-11-11 15:19:27.000000000 -0500
16183+++ linux-3.1.4/arch/x86/kernel/step.c 2011-11-16 18:39:07.000000000 -0500
16184@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
16185 struct desc_struct *desc;
16186 unsigned long base;
16187
16188- seg &= ~7UL;
16189+ seg >>= 3;
16190
16191 mutex_lock(&child->mm->context.lock);
16192- if (unlikely((seg >> 3) >= child->mm->context.size))
16193+ if (unlikely(seg >= child->mm->context.size))
16194 addr = -1L; /* bogus selector, access would fault */
16195 else {
16196 desc = child->mm->context.ldt + seg;
16197@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
16198 addr += base;
16199 }
16200 mutex_unlock(&child->mm->context.lock);
16201- }
16202+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16203+ addr = ktla_ktva(addr);
16204
16205 return addr;
16206 }
16207@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
16208 unsigned char opcode[15];
16209 unsigned long addr = convert_ip_to_linear(child, regs);
16210
16211+ if (addr == -EINVAL)
16212+ return 0;
16213+
16214 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16215 for (i = 0; i < copied; i++) {
16216 switch (opcode[i]) {
16217diff -urNp linux-3.1.4/arch/x86/kernel/syscall_table_32.S linux-3.1.4/arch/x86/kernel/syscall_table_32.S
16218--- linux-3.1.4/arch/x86/kernel/syscall_table_32.S 2011-11-11 15:19:27.000000000 -0500
16219+++ linux-3.1.4/arch/x86/kernel/syscall_table_32.S 2011-11-16 18:39:07.000000000 -0500
16220@@ -1,3 +1,4 @@
16221+.section .rodata,"a",@progbits
16222 ENTRY(sys_call_table)
16223 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
16224 .long sys_exit
16225diff -urNp linux-3.1.4/arch/x86/kernel/sys_i386_32.c linux-3.1.4/arch/x86/kernel/sys_i386_32.c
16226--- linux-3.1.4/arch/x86/kernel/sys_i386_32.c 2011-11-11 15:19:27.000000000 -0500
16227+++ linux-3.1.4/arch/x86/kernel/sys_i386_32.c 2011-11-16 18:39:07.000000000 -0500
16228@@ -24,17 +24,224 @@
16229
16230 #include <asm/syscalls.h>
16231
16232-/*
16233- * Do a system call from kernel instead of calling sys_execve so we
16234- * end up with proper pt_regs.
16235- */
16236-int kernel_execve(const char *filename,
16237- const char *const argv[],
16238- const char *const envp[])
16239+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
16240 {
16241- long __res;
16242- asm volatile ("int $0x80"
16243- : "=a" (__res)
16244- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
16245- return __res;
16246+ unsigned long pax_task_size = TASK_SIZE;
16247+
16248+#ifdef CONFIG_PAX_SEGMEXEC
16249+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
16250+ pax_task_size = SEGMEXEC_TASK_SIZE;
16251+#endif
16252+
16253+ if (len > pax_task_size || addr > pax_task_size - len)
16254+ return -EINVAL;
16255+
16256+ return 0;
16257+}
16258+
16259+unsigned long
16260+arch_get_unmapped_area(struct file *filp, unsigned long addr,
16261+ unsigned long len, unsigned long pgoff, unsigned long flags)
16262+{
16263+ struct mm_struct *mm = current->mm;
16264+ struct vm_area_struct *vma;
16265+ unsigned long start_addr, pax_task_size = TASK_SIZE;
16266+
16267+#ifdef CONFIG_PAX_SEGMEXEC
16268+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
16269+ pax_task_size = SEGMEXEC_TASK_SIZE;
16270+#endif
16271+
16272+ pax_task_size -= PAGE_SIZE;
16273+
16274+ if (len > pax_task_size)
16275+ return -ENOMEM;
16276+
16277+ if (flags & MAP_FIXED)
16278+ return addr;
16279+
16280+#ifdef CONFIG_PAX_RANDMMAP
16281+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16282+#endif
16283+
16284+ if (addr) {
16285+ addr = PAGE_ALIGN(addr);
16286+ if (pax_task_size - len >= addr) {
16287+ vma = find_vma(mm, addr);
16288+ if (check_heap_stack_gap(vma, addr, len))
16289+ return addr;
16290+ }
16291+ }
16292+ if (len > mm->cached_hole_size) {
16293+ start_addr = addr = mm->free_area_cache;
16294+ } else {
16295+ start_addr = addr = mm->mmap_base;
16296+ mm->cached_hole_size = 0;
16297+ }
16298+
16299+#ifdef CONFIG_PAX_PAGEEXEC
16300+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
16301+ start_addr = 0x00110000UL;
16302+
16303+#ifdef CONFIG_PAX_RANDMMAP
16304+ if (mm->pax_flags & MF_PAX_RANDMMAP)
16305+ start_addr += mm->delta_mmap & 0x03FFF000UL;
16306+#endif
16307+
16308+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
16309+ start_addr = addr = mm->mmap_base;
16310+ else
16311+ addr = start_addr;
16312+ }
16313+#endif
16314+
16315+full_search:
16316+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
16317+ /* At this point: (!vma || addr < vma->vm_end). */
16318+ if (pax_task_size - len < addr) {
16319+ /*
16320+ * Start a new search - just in case we missed
16321+ * some holes.
16322+ */
16323+ if (start_addr != mm->mmap_base) {
16324+ start_addr = addr = mm->mmap_base;
16325+ mm->cached_hole_size = 0;
16326+ goto full_search;
16327+ }
16328+ return -ENOMEM;
16329+ }
16330+ if (check_heap_stack_gap(vma, addr, len))
16331+ break;
16332+ if (addr + mm->cached_hole_size < vma->vm_start)
16333+ mm->cached_hole_size = vma->vm_start - addr;
16334+ addr = vma->vm_end;
16335+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
16336+ start_addr = addr = mm->mmap_base;
16337+ mm->cached_hole_size = 0;
16338+ goto full_search;
16339+ }
16340+ }
16341+
16342+ /*
16343+ * Remember the place where we stopped the search:
16344+ */
16345+ mm->free_area_cache = addr + len;
16346+ return addr;
16347+}
16348+
16349+unsigned long
16350+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16351+ const unsigned long len, const unsigned long pgoff,
16352+ const unsigned long flags)
16353+{
16354+ struct vm_area_struct *vma;
16355+ struct mm_struct *mm = current->mm;
16356+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
16357+
16358+#ifdef CONFIG_PAX_SEGMEXEC
16359+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
16360+ pax_task_size = SEGMEXEC_TASK_SIZE;
16361+#endif
16362+
16363+ pax_task_size -= PAGE_SIZE;
16364+
16365+ /* requested length too big for entire address space */
16366+ if (len > pax_task_size)
16367+ return -ENOMEM;
16368+
16369+ if (flags & MAP_FIXED)
16370+ return addr;
16371+
16372+#ifdef CONFIG_PAX_PAGEEXEC
16373+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
16374+ goto bottomup;
16375+#endif
16376+
16377+#ifdef CONFIG_PAX_RANDMMAP
16378+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16379+#endif
16380+
16381+ /* requesting a specific address */
16382+ if (addr) {
16383+ addr = PAGE_ALIGN(addr);
16384+ if (pax_task_size - len >= addr) {
16385+ vma = find_vma(mm, addr);
16386+ if (check_heap_stack_gap(vma, addr, len))
16387+ return addr;
16388+ }
16389+ }
16390+
16391+ /* check if free_area_cache is useful for us */
16392+ if (len <= mm->cached_hole_size) {
16393+ mm->cached_hole_size = 0;
16394+ mm->free_area_cache = mm->mmap_base;
16395+ }
16396+
16397+ /* either no address requested or can't fit in requested address hole */
16398+ addr = mm->free_area_cache;
16399+
16400+ /* make sure it can fit in the remaining address space */
16401+ if (addr > len) {
16402+ vma = find_vma(mm, addr-len);
16403+ if (check_heap_stack_gap(vma, addr - len, len))
16404+ /* remember the address as a hint for next time */
16405+ return (mm->free_area_cache = addr-len);
16406+ }
16407+
16408+ if (mm->mmap_base < len)
16409+ goto bottomup;
16410+
16411+ addr = mm->mmap_base-len;
16412+
16413+ do {
16414+ /*
16415+ * Lookup failure means no vma is above this address,
16416+ * else if new region fits below vma->vm_start,
16417+ * return with success:
16418+ */
16419+ vma = find_vma(mm, addr);
16420+ if (check_heap_stack_gap(vma, addr, len))
16421+ /* remember the address as a hint for next time */
16422+ return (mm->free_area_cache = addr);
16423+
16424+ /* remember the largest hole we saw so far */
16425+ if (addr + mm->cached_hole_size < vma->vm_start)
16426+ mm->cached_hole_size = vma->vm_start - addr;
16427+
16428+ /* try just below the current vma->vm_start */
16429+ addr = skip_heap_stack_gap(vma, len);
16430+ } while (!IS_ERR_VALUE(addr));
16431+
16432+bottomup:
16433+ /*
16434+ * A failed mmap() very likely causes application failure,
16435+ * so fall back to the bottom-up function here. This scenario
16436+ * can happen with large stack limits and large mmap()
16437+ * allocations.
16438+ */
16439+
16440+#ifdef CONFIG_PAX_SEGMEXEC
16441+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
16442+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
16443+ else
16444+#endif
16445+
16446+ mm->mmap_base = TASK_UNMAPPED_BASE;
16447+
16448+#ifdef CONFIG_PAX_RANDMMAP
16449+ if (mm->pax_flags & MF_PAX_RANDMMAP)
16450+ mm->mmap_base += mm->delta_mmap;
16451+#endif
16452+
16453+ mm->free_area_cache = mm->mmap_base;
16454+ mm->cached_hole_size = ~0UL;
16455+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16456+ /*
16457+ * Restore the topdown base:
16458+ */
16459+ mm->mmap_base = base;
16460+ mm->free_area_cache = base;
16461+ mm->cached_hole_size = ~0UL;
16462+
16463+ return addr;
16464 }
16465diff -urNp linux-3.1.4/arch/x86/kernel/sys_x86_64.c linux-3.1.4/arch/x86/kernel/sys_x86_64.c
16466--- linux-3.1.4/arch/x86/kernel/sys_x86_64.c 2011-11-11 15:19:27.000000000 -0500
16467+++ linux-3.1.4/arch/x86/kernel/sys_x86_64.c 2011-11-16 18:39:07.000000000 -0500
16468@@ -32,8 +32,8 @@ out:
16469 return error;
16470 }
16471
16472-static void find_start_end(unsigned long flags, unsigned long *begin,
16473- unsigned long *end)
16474+static void find_start_end(struct mm_struct *mm, unsigned long flags,
16475+ unsigned long *begin, unsigned long *end)
16476 {
16477 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
16478 unsigned long new_begin;
16479@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
16480 *begin = new_begin;
16481 }
16482 } else {
16483- *begin = TASK_UNMAPPED_BASE;
16484+ *begin = mm->mmap_base;
16485 *end = TASK_SIZE;
16486 }
16487 }
16488@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
16489 if (flags & MAP_FIXED)
16490 return addr;
16491
16492- find_start_end(flags, &begin, &end);
16493+ find_start_end(mm, flags, &begin, &end);
16494
16495 if (len > end)
16496 return -ENOMEM;
16497
16498+#ifdef CONFIG_PAX_RANDMMAP
16499+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16500+#endif
16501+
16502 if (addr) {
16503 addr = PAGE_ALIGN(addr);
16504 vma = find_vma(mm, addr);
16505- if (end - len >= addr &&
16506- (!vma || addr + len <= vma->vm_start))
16507+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
16508 return addr;
16509 }
16510 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
16511@@ -106,7 +109,7 @@ full_search:
16512 }
16513 return -ENOMEM;
16514 }
16515- if (!vma || addr + len <= vma->vm_start) {
16516+ if (check_heap_stack_gap(vma, addr, len)) {
16517 /*
16518 * Remember the place where we stopped the search:
16519 */
16520@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
16521 {
16522 struct vm_area_struct *vma;
16523 struct mm_struct *mm = current->mm;
16524- unsigned long addr = addr0;
16525+ unsigned long base = mm->mmap_base, addr = addr0;
16526
16527 /* requested length too big for entire address space */
16528 if (len > TASK_SIZE)
16529@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
16530 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
16531 goto bottomup;
16532
16533+#ifdef CONFIG_PAX_RANDMMAP
16534+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16535+#endif
16536+
16537 /* requesting a specific address */
16538 if (addr) {
16539 addr = PAGE_ALIGN(addr);
16540- vma = find_vma(mm, addr);
16541- if (TASK_SIZE - len >= addr &&
16542- (!vma || addr + len <= vma->vm_start))
16543- return addr;
16544+ if (TASK_SIZE - len >= addr) {
16545+ vma = find_vma(mm, addr);
16546+ if (check_heap_stack_gap(vma, addr, len))
16547+ return addr;
16548+ }
16549 }
16550
16551 /* check if free_area_cache is useful for us */
16552@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
16553 /* make sure it can fit in the remaining address space */
16554 if (addr > len) {
16555 vma = find_vma(mm, addr-len);
16556- if (!vma || addr <= vma->vm_start)
16557+ if (check_heap_stack_gap(vma, addr - len, len))
16558 /* remember the address as a hint for next time */
16559 return mm->free_area_cache = addr-len;
16560 }
16561@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
16562 * return with success:
16563 */
16564 vma = find_vma(mm, addr);
16565- if (!vma || addr+len <= vma->vm_start)
16566+ if (check_heap_stack_gap(vma, addr, len))
16567 /* remember the address as a hint for next time */
16568 return mm->free_area_cache = addr;
16569
16570@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
16571 mm->cached_hole_size = vma->vm_start - addr;
16572
16573 /* try just below the current vma->vm_start */
16574- addr = vma->vm_start-len;
16575- } while (len < vma->vm_start);
16576+ addr = skip_heap_stack_gap(vma, len);
16577+ } while (!IS_ERR_VALUE(addr));
16578
16579 bottomup:
16580 /*
16581@@ -198,13 +206,21 @@ bottomup:
16582 * can happen with large stack limits and large mmap()
16583 * allocations.
16584 */
16585+ mm->mmap_base = TASK_UNMAPPED_BASE;
16586+
16587+#ifdef CONFIG_PAX_RANDMMAP
16588+ if (mm->pax_flags & MF_PAX_RANDMMAP)
16589+ mm->mmap_base += mm->delta_mmap;
16590+#endif
16591+
16592+ mm->free_area_cache = mm->mmap_base;
16593 mm->cached_hole_size = ~0UL;
16594- mm->free_area_cache = TASK_UNMAPPED_BASE;
16595 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16596 /*
16597 * Restore the topdown base:
16598 */
16599- mm->free_area_cache = mm->mmap_base;
16600+ mm->mmap_base = base;
16601+ mm->free_area_cache = base;
16602 mm->cached_hole_size = ~0UL;
16603
16604 return addr;
16605diff -urNp linux-3.1.4/arch/x86/kernel/tboot.c linux-3.1.4/arch/x86/kernel/tboot.c
16606--- linux-3.1.4/arch/x86/kernel/tboot.c 2011-11-11 15:19:27.000000000 -0500
16607+++ linux-3.1.4/arch/x86/kernel/tboot.c 2011-11-16 18:39:07.000000000 -0500
16608@@ -218,7 +218,7 @@ static int tboot_setup_sleep(void)
16609
16610 void tboot_shutdown(u32 shutdown_type)
16611 {
16612- void (*shutdown)(void);
16613+ void (* __noreturn shutdown)(void);
16614
16615 if (!tboot_enabled())
16616 return;
16617@@ -240,7 +240,7 @@ void tboot_shutdown(u32 shutdown_type)
16618
16619 switch_to_tboot_pt();
16620
16621- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
16622+ shutdown = (void *)tboot->shutdown_entry;
16623 shutdown();
16624
16625 /* should not reach here */
16626@@ -297,7 +297,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
16627 tboot_shutdown(acpi_shutdown_map[sleep_state]);
16628 }
16629
16630-static atomic_t ap_wfs_count;
16631+static atomic_unchecked_t ap_wfs_count;
16632
16633 static int tboot_wait_for_aps(int num_aps)
16634 {
16635@@ -321,9 +321,9 @@ static int __cpuinit tboot_cpu_callback(
16636 {
16637 switch (action) {
16638 case CPU_DYING:
16639- atomic_inc(&ap_wfs_count);
16640+ atomic_inc_unchecked(&ap_wfs_count);
16641 if (num_online_cpus() == 1)
16642- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
16643+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
16644 return NOTIFY_BAD;
16645 break;
16646 }
16647@@ -342,7 +342,7 @@ static __init int tboot_late_init(void)
16648
16649 tboot_create_trampoline();
16650
16651- atomic_set(&ap_wfs_count, 0);
16652+ atomic_set_unchecked(&ap_wfs_count, 0);
16653 register_hotcpu_notifier(&tboot_cpu_notifier);
16654 return 0;
16655 }
16656diff -urNp linux-3.1.4/arch/x86/kernel/time.c linux-3.1.4/arch/x86/kernel/time.c
16657--- linux-3.1.4/arch/x86/kernel/time.c 2011-11-11 15:19:27.000000000 -0500
16658+++ linux-3.1.4/arch/x86/kernel/time.c 2011-11-16 18:39:07.000000000 -0500
16659@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs
16660 {
16661 unsigned long pc = instruction_pointer(regs);
16662
16663- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
16664+ if (!user_mode(regs) && in_lock_functions(pc)) {
16665 #ifdef CONFIG_FRAME_POINTER
16666- return *(unsigned long *)(regs->bp + sizeof(long));
16667+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
16668 #else
16669 unsigned long *sp =
16670 (unsigned long *)kernel_stack_pointer(regs);
16671@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
16672 * or above a saved flags. Eflags has bits 22-31 zero,
16673 * kernel addresses don't.
16674 */
16675+
16676+#ifdef CONFIG_PAX_KERNEXEC
16677+ return ktla_ktva(sp[0]);
16678+#else
16679 if (sp[0] >> 22)
16680 return sp[0];
16681 if (sp[1] >> 22)
16682 return sp[1];
16683 #endif
16684+
16685+#endif
16686 }
16687 return pc;
16688 }
16689diff -urNp linux-3.1.4/arch/x86/kernel/tls.c linux-3.1.4/arch/x86/kernel/tls.c
16690--- linux-3.1.4/arch/x86/kernel/tls.c 2011-11-11 15:19:27.000000000 -0500
16691+++ linux-3.1.4/arch/x86/kernel/tls.c 2011-11-16 18:39:07.000000000 -0500
16692@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
16693 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
16694 return -EINVAL;
16695
16696+#ifdef CONFIG_PAX_SEGMEXEC
16697+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
16698+ return -EINVAL;
16699+#endif
16700+
16701 set_tls_desc(p, idx, &info, 1);
16702
16703 return 0;
16704diff -urNp linux-3.1.4/arch/x86/kernel/trampoline_32.S linux-3.1.4/arch/x86/kernel/trampoline_32.S
16705--- linux-3.1.4/arch/x86/kernel/trampoline_32.S 2011-11-11 15:19:27.000000000 -0500
16706+++ linux-3.1.4/arch/x86/kernel/trampoline_32.S 2011-11-16 18:39:07.000000000 -0500
16707@@ -32,6 +32,12 @@
16708 #include <asm/segment.h>
16709 #include <asm/page_types.h>
16710
16711+#ifdef CONFIG_PAX_KERNEXEC
16712+#define ta(X) (X)
16713+#else
16714+#define ta(X) ((X) - __PAGE_OFFSET)
16715+#endif
16716+
16717 #ifdef CONFIG_SMP
16718
16719 .section ".x86_trampoline","a"
16720@@ -62,7 +68,7 @@ r_base = .
16721 inc %ax # protected mode (PE) bit
16722 lmsw %ax # into protected mode
16723 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
16724- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
16725+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
16726
16727 # These need to be in the same 64K segment as the above;
16728 # hence we don't use the boot_gdt_descr defined in head.S
16729diff -urNp linux-3.1.4/arch/x86/kernel/trampoline_64.S linux-3.1.4/arch/x86/kernel/trampoline_64.S
16730--- linux-3.1.4/arch/x86/kernel/trampoline_64.S 2011-11-11 15:19:27.000000000 -0500
16731+++ linux-3.1.4/arch/x86/kernel/trampoline_64.S 2011-11-16 18:39:07.000000000 -0500
16732@@ -90,7 +90,7 @@ startup_32:
16733 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
16734 movl %eax, %ds
16735
16736- movl $X86_CR4_PAE, %eax
16737+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
16738 movl %eax, %cr4 # Enable PAE mode
16739
16740 # Setup trampoline 4 level pagetables
16741@@ -138,7 +138,7 @@ tidt:
16742 # so the kernel can live anywhere
16743 .balign 4
16744 tgdt:
16745- .short tgdt_end - tgdt # gdt limit
16746+ .short tgdt_end - tgdt - 1 # gdt limit
16747 .long tgdt - r_base
16748 .short 0
16749 .quad 0x00cf9b000000ffff # __KERNEL32_CS
16750diff -urNp linux-3.1.4/arch/x86/kernel/traps.c linux-3.1.4/arch/x86/kernel/traps.c
16751--- linux-3.1.4/arch/x86/kernel/traps.c 2011-11-11 15:19:27.000000000 -0500
16752+++ linux-3.1.4/arch/x86/kernel/traps.c 2011-11-16 18:39:07.000000000 -0500
16753@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
16754
16755 /* Do we ignore FPU interrupts ? */
16756 char ignore_fpu_irq;
16757-
16758-/*
16759- * The IDT has to be page-aligned to simplify the Pentium
16760- * F0 0F bug workaround.
16761- */
16762-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
16763 #endif
16764
16765 DECLARE_BITMAP(used_vectors, NR_VECTORS);
16766@@ -117,13 +111,13 @@ static inline void preempt_conditional_c
16767 }
16768
16769 static void __kprobes
16770-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
16771+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
16772 long error_code, siginfo_t *info)
16773 {
16774 struct task_struct *tsk = current;
16775
16776 #ifdef CONFIG_X86_32
16777- if (regs->flags & X86_VM_MASK) {
16778+ if (v8086_mode(regs)) {
16779 /*
16780 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
16781 * On nmi (interrupt 2), do_trap should not be called.
16782@@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
16783 }
16784 #endif
16785
16786- if (!user_mode(regs))
16787+ if (!user_mode_novm(regs))
16788 goto kernel_trap;
16789
16790 #ifdef CONFIG_X86_32
16791@@ -157,7 +151,7 @@ trap_signal:
16792 printk_ratelimit()) {
16793 printk(KERN_INFO
16794 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
16795- tsk->comm, tsk->pid, str,
16796+ tsk->comm, task_pid_nr(tsk), str,
16797 regs->ip, regs->sp, error_code);
16798 print_vma_addr(" in ", regs->ip);
16799 printk("\n");
16800@@ -174,8 +168,20 @@ kernel_trap:
16801 if (!fixup_exception(regs)) {
16802 tsk->thread.error_code = error_code;
16803 tsk->thread.trap_no = trapnr;
16804+
16805+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16806+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
16807+ str = "PAX: suspicious stack segment fault";
16808+#endif
16809+
16810 die(str, regs, error_code);
16811 }
16812+
16813+#ifdef CONFIG_PAX_REFCOUNT
16814+ if (trapnr == 4)
16815+ pax_report_refcount_overflow(regs);
16816+#endif
16817+
16818 return;
16819
16820 #ifdef CONFIG_X86_32
16821@@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
16822 conditional_sti(regs);
16823
16824 #ifdef CONFIG_X86_32
16825- if (regs->flags & X86_VM_MASK)
16826+ if (v8086_mode(regs))
16827 goto gp_in_vm86;
16828 #endif
16829
16830 tsk = current;
16831- if (!user_mode(regs))
16832+ if (!user_mode_novm(regs))
16833 goto gp_in_kernel;
16834
16835+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
16836+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
16837+ struct mm_struct *mm = tsk->mm;
16838+ unsigned long limit;
16839+
16840+ down_write(&mm->mmap_sem);
16841+ limit = mm->context.user_cs_limit;
16842+ if (limit < TASK_SIZE) {
16843+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
16844+ up_write(&mm->mmap_sem);
16845+ return;
16846+ }
16847+ up_write(&mm->mmap_sem);
16848+ }
16849+#endif
16850+
16851 tsk->thread.error_code = error_code;
16852 tsk->thread.trap_no = 13;
16853
16854@@ -304,6 +326,13 @@ gp_in_kernel:
16855 if (notify_die(DIE_GPF, "general protection fault", regs,
16856 error_code, 13, SIGSEGV) == NOTIFY_STOP)
16857 return;
16858+
16859+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16860+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
16861+ die("PAX: suspicious general protection fault", regs, error_code);
16862+ else
16863+#endif
16864+
16865 die("general protection fault", regs, error_code);
16866 }
16867
16868@@ -433,6 +462,17 @@ static notrace __kprobes void default_do
16869 dotraplinkage notrace __kprobes void
16870 do_nmi(struct pt_regs *regs, long error_code)
16871 {
16872+
16873+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16874+ if (!user_mode(regs)) {
16875+ unsigned long cs = regs->cs & 0xFFFF;
16876+ unsigned long ip = ktva_ktla(regs->ip);
16877+
16878+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
16879+ regs->ip = ip;
16880+ }
16881+#endif
16882+
16883 nmi_enter();
16884
16885 inc_irq_stat(__nmi_count);
16886@@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
16887 /* It's safe to allow irq's after DR6 has been saved */
16888 preempt_conditional_sti(regs);
16889
16890- if (regs->flags & X86_VM_MASK) {
16891+ if (v8086_mode(regs)) {
16892 handle_vm86_trap((struct kernel_vm86_regs *) regs,
16893 error_code, 1);
16894 preempt_conditional_cli(regs);
16895@@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
16896 * We already checked v86 mode above, so we can check for kernel mode
16897 * by just checking the CPL of CS.
16898 */
16899- if ((dr6 & DR_STEP) && !user_mode(regs)) {
16900+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
16901 tsk->thread.debugreg6 &= ~DR_STEP;
16902 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
16903 regs->flags &= ~X86_EFLAGS_TF;
16904@@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
16905 return;
16906 conditional_sti(regs);
16907
16908- if (!user_mode_vm(regs))
16909+ if (!user_mode(regs))
16910 {
16911 if (!fixup_exception(regs)) {
16912 task->thread.error_code = error_code;
16913@@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
16914 void __math_state_restore(void)
16915 {
16916 struct thread_info *thread = current_thread_info();
16917- struct task_struct *tsk = thread->task;
16918+ struct task_struct *tsk = current;
16919
16920 /*
16921 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
16922@@ -750,8 +790,7 @@ void __math_state_restore(void)
16923 */
16924 asmlinkage void math_state_restore(void)
16925 {
16926- struct thread_info *thread = current_thread_info();
16927- struct task_struct *tsk = thread->task;
16928+ struct task_struct *tsk = current;
16929
16930 if (!tsk_used_math(tsk)) {
16931 local_irq_enable();
16932diff -urNp linux-3.1.4/arch/x86/kernel/verify_cpu.S linux-3.1.4/arch/x86/kernel/verify_cpu.S
16933--- linux-3.1.4/arch/x86/kernel/verify_cpu.S 2011-11-11 15:19:27.000000000 -0500
16934+++ linux-3.1.4/arch/x86/kernel/verify_cpu.S 2011-11-16 18:40:08.000000000 -0500
16935@@ -20,6 +20,7 @@
16936 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
16937 * arch/x86/kernel/trampoline_64.S: secondary processor verification
16938 * arch/x86/kernel/head_32.S: processor startup
16939+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
16940 *
16941 * verify_cpu, returns the status of longmode and SSE in register %eax.
16942 * 0: Success 1: Failure
16943diff -urNp linux-3.1.4/arch/x86/kernel/vm86_32.c linux-3.1.4/arch/x86/kernel/vm86_32.c
16944--- linux-3.1.4/arch/x86/kernel/vm86_32.c 2011-11-11 15:19:27.000000000 -0500
16945+++ linux-3.1.4/arch/x86/kernel/vm86_32.c 2011-11-16 18:40:08.000000000 -0500
16946@@ -41,6 +41,7 @@
16947 #include <linux/ptrace.h>
16948 #include <linux/audit.h>
16949 #include <linux/stddef.h>
16950+#include <linux/grsecurity.h>
16951
16952 #include <asm/uaccess.h>
16953 #include <asm/io.h>
16954@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
16955 do_exit(SIGSEGV);
16956 }
16957
16958- tss = &per_cpu(init_tss, get_cpu());
16959+ tss = init_tss + get_cpu();
16960 current->thread.sp0 = current->thread.saved_sp0;
16961 current->thread.sysenter_cs = __KERNEL_CS;
16962 load_sp0(tss, &current->thread);
16963@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
16964 struct task_struct *tsk;
16965 int tmp, ret = -EPERM;
16966
16967+#ifdef CONFIG_GRKERNSEC_VM86
16968+ if (!capable(CAP_SYS_RAWIO)) {
16969+ gr_handle_vm86();
16970+ goto out;
16971+ }
16972+#endif
16973+
16974 tsk = current;
16975 if (tsk->thread.saved_sp0)
16976 goto out;
16977@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
16978 int tmp, ret;
16979 struct vm86plus_struct __user *v86;
16980
16981+#ifdef CONFIG_GRKERNSEC_VM86
16982+ if (!capable(CAP_SYS_RAWIO)) {
16983+ gr_handle_vm86();
16984+ ret = -EPERM;
16985+ goto out;
16986+ }
16987+#endif
16988+
16989 tsk = current;
16990 switch (cmd) {
16991 case VM86_REQUEST_IRQ:
16992@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
16993 tsk->thread.saved_fs = info->regs32->fs;
16994 tsk->thread.saved_gs = get_user_gs(info->regs32);
16995
16996- tss = &per_cpu(init_tss, get_cpu());
16997+ tss = init_tss + get_cpu();
16998 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
16999 if (cpu_has_sep)
17000 tsk->thread.sysenter_cs = 0;
17001@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
17002 goto cannot_handle;
17003 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17004 goto cannot_handle;
17005- intr_ptr = (unsigned long __user *) (i << 2);
17006+ intr_ptr = (__force unsigned long __user *) (i << 2);
17007 if (get_user(segoffs, intr_ptr))
17008 goto cannot_handle;
17009 if ((segoffs >> 16) == BIOSSEG)
17010diff -urNp linux-3.1.4/arch/x86/kernel/vmlinux.lds.S linux-3.1.4/arch/x86/kernel/vmlinux.lds.S
17011--- linux-3.1.4/arch/x86/kernel/vmlinux.lds.S 2011-11-11 15:19:27.000000000 -0500
17012+++ linux-3.1.4/arch/x86/kernel/vmlinux.lds.S 2011-11-16 18:39:07.000000000 -0500
17013@@ -26,6 +26,13 @@
17014 #include <asm/page_types.h>
17015 #include <asm/cache.h>
17016 #include <asm/boot.h>
17017+#include <asm/segment.h>
17018+
17019+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17020+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
17021+#else
17022+#define __KERNEL_TEXT_OFFSET 0
17023+#endif
17024
17025 #undef i386 /* in case the preprocessor is a 32bit one */
17026
17027@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
17028
17029 PHDRS {
17030 text PT_LOAD FLAGS(5); /* R_E */
17031+#ifdef CONFIG_X86_32
17032+ module PT_LOAD FLAGS(5); /* R_E */
17033+#endif
17034+#ifdef CONFIG_XEN
17035+ rodata PT_LOAD FLAGS(5); /* R_E */
17036+#else
17037+ rodata PT_LOAD FLAGS(4); /* R__ */
17038+#endif
17039 data PT_LOAD FLAGS(6); /* RW_ */
17040-#ifdef CONFIG_X86_64
17041+ init.begin PT_LOAD FLAGS(6); /* RW_ */
17042 #ifdef CONFIG_SMP
17043 percpu PT_LOAD FLAGS(6); /* RW_ */
17044 #endif
17045+ text.init PT_LOAD FLAGS(5); /* R_E */
17046+ text.exit PT_LOAD FLAGS(5); /* R_E */
17047 init PT_LOAD FLAGS(7); /* RWE */
17048-#endif
17049 note PT_NOTE FLAGS(0); /* ___ */
17050 }
17051
17052 SECTIONS
17053 {
17054 #ifdef CONFIG_X86_32
17055- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
17056- phys_startup_32 = startup_32 - LOAD_OFFSET;
17057+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
17058 #else
17059- . = __START_KERNEL;
17060- phys_startup_64 = startup_64 - LOAD_OFFSET;
17061+ . = __START_KERNEL;
17062 #endif
17063
17064 /* Text and read-only data */
17065- .text : AT(ADDR(.text) - LOAD_OFFSET) {
17066- _text = .;
17067+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17068 /* bootstrapping code */
17069+#ifdef CONFIG_X86_32
17070+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17071+#else
17072+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17073+#endif
17074+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17075+ _text = .;
17076 HEAD_TEXT
17077 #ifdef CONFIG_X86_32
17078 . = ALIGN(PAGE_SIZE);
17079@@ -108,13 +128,47 @@ SECTIONS
17080 IRQENTRY_TEXT
17081 *(.fixup)
17082 *(.gnu.warning)
17083- /* End of text section */
17084- _etext = .;
17085 } :text = 0x9090
17086
17087- NOTES :text :note
17088+ . += __KERNEL_TEXT_OFFSET;
17089+
17090+#ifdef CONFIG_X86_32
17091+ . = ALIGN(PAGE_SIZE);
17092+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
17093+
17094+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
17095+ MODULES_EXEC_VADDR = .;
17096+ BYTE(0)
17097+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
17098+ . = ALIGN(HPAGE_SIZE);
17099+ MODULES_EXEC_END = . - 1;
17100+#endif
17101+
17102+ } :module
17103+#endif
17104+
17105+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
17106+ /* End of text section */
17107+ _etext = . - __KERNEL_TEXT_OFFSET;
17108+ }
17109+
17110+#ifdef CONFIG_X86_32
17111+ . = ALIGN(PAGE_SIZE);
17112+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
17113+ *(.idt)
17114+ . = ALIGN(PAGE_SIZE);
17115+ *(.empty_zero_page)
17116+ *(.initial_pg_fixmap)
17117+ *(.initial_pg_pmd)
17118+ *(.initial_page_table)
17119+ *(.swapper_pg_dir)
17120+ } :rodata
17121+#endif
17122+
17123+ . = ALIGN(PAGE_SIZE);
17124+ NOTES :rodata :note
17125
17126- EXCEPTION_TABLE(16) :text = 0x9090
17127+ EXCEPTION_TABLE(16) :rodata
17128
17129 #if defined(CONFIG_DEBUG_RODATA)
17130 /* .text should occupy whole number of pages */
17131@@ -126,16 +180,20 @@ SECTIONS
17132
17133 /* Data */
17134 .data : AT(ADDR(.data) - LOAD_OFFSET) {
17135+
17136+#ifdef CONFIG_PAX_KERNEXEC
17137+ . = ALIGN(HPAGE_SIZE);
17138+#else
17139+ . = ALIGN(PAGE_SIZE);
17140+#endif
17141+
17142 /* Start of data section */
17143 _sdata = .;
17144
17145 /* init_task */
17146 INIT_TASK_DATA(THREAD_SIZE)
17147
17148-#ifdef CONFIG_X86_32
17149- /* 32 bit has nosave before _edata */
17150 NOSAVE_DATA
17151-#endif
17152
17153 PAGE_ALIGNED_DATA(PAGE_SIZE)
17154
17155@@ -176,12 +234,19 @@ SECTIONS
17156 #endif /* CONFIG_X86_64 */
17157
17158 /* Init code and data - will be freed after init */
17159- . = ALIGN(PAGE_SIZE);
17160 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
17161+ BYTE(0)
17162+
17163+#ifdef CONFIG_PAX_KERNEXEC
17164+ . = ALIGN(HPAGE_SIZE);
17165+#else
17166+ . = ALIGN(PAGE_SIZE);
17167+#endif
17168+
17169 __init_begin = .; /* paired with __init_end */
17170- }
17171+ } :init.begin
17172
17173-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
17174+#ifdef CONFIG_SMP
17175 /*
17176 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
17177 * output PHDR, so the next output section - .init.text - should
17178@@ -190,12 +255,27 @@ SECTIONS
17179 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
17180 #endif
17181
17182- INIT_TEXT_SECTION(PAGE_SIZE)
17183-#ifdef CONFIG_X86_64
17184- :init
17185-#endif
17186+ . = ALIGN(PAGE_SIZE);
17187+ init_begin = .;
17188+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
17189+ VMLINUX_SYMBOL(_sinittext) = .;
17190+ INIT_TEXT
17191+ VMLINUX_SYMBOL(_einittext) = .;
17192+ . = ALIGN(PAGE_SIZE);
17193+ } :text.init
17194
17195- INIT_DATA_SECTION(16)
17196+ /*
17197+ * .exit.text is discard at runtime, not link time, to deal with
17198+ * references from .altinstructions and .eh_frame
17199+ */
17200+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17201+ EXIT_TEXT
17202+ . = ALIGN(16);
17203+ } :text.exit
17204+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
17205+
17206+ . = ALIGN(PAGE_SIZE);
17207+ INIT_DATA_SECTION(16) :init
17208
17209 /*
17210 * Code and data for a variety of lowlevel trampolines, to be
17211@@ -269,19 +349,12 @@ SECTIONS
17212 }
17213
17214 . = ALIGN(8);
17215- /*
17216- * .exit.text is discard at runtime, not link time, to deal with
17217- * references from .altinstructions and .eh_frame
17218- */
17219- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
17220- EXIT_TEXT
17221- }
17222
17223 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
17224 EXIT_DATA
17225 }
17226
17227-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
17228+#ifndef CONFIG_SMP
17229 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
17230 #endif
17231
17232@@ -300,16 +373,10 @@ SECTIONS
17233 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
17234 __smp_locks = .;
17235 *(.smp_locks)
17236- . = ALIGN(PAGE_SIZE);
17237 __smp_locks_end = .;
17238+ . = ALIGN(PAGE_SIZE);
17239 }
17240
17241-#ifdef CONFIG_X86_64
17242- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
17243- NOSAVE_DATA
17244- }
17245-#endif
17246-
17247 /* BSS */
17248 . = ALIGN(PAGE_SIZE);
17249 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
17250@@ -325,6 +392,7 @@ SECTIONS
17251 __brk_base = .;
17252 . += 64 * 1024; /* 64k alignment slop space */
17253 *(.brk_reservation) /* areas brk users have reserved */
17254+ . = ALIGN(HPAGE_SIZE);
17255 __brk_limit = .;
17256 }
17257
17258@@ -351,13 +419,12 @@ SECTIONS
17259 * for the boot processor.
17260 */
17261 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
17262-INIT_PER_CPU(gdt_page);
17263 INIT_PER_CPU(irq_stack_union);
17264
17265 /*
17266 * Build-time check on the image size:
17267 */
17268-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
17269+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
17270 "kernel image bigger than KERNEL_IMAGE_SIZE");
17271
17272 #ifdef CONFIG_SMP
17273diff -urNp linux-3.1.4/arch/x86/kernel/vsyscall_64.c linux-3.1.4/arch/x86/kernel/vsyscall_64.c
17274--- linux-3.1.4/arch/x86/kernel/vsyscall_64.c 2011-11-11 15:19:27.000000000 -0500
17275+++ linux-3.1.4/arch/x86/kernel/vsyscall_64.c 2011-11-16 18:39:07.000000000 -0500
17276@@ -56,15 +56,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, v
17277 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
17278 };
17279
17280-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
17281+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
17282
17283 static int __init vsyscall_setup(char *str)
17284 {
17285 if (str) {
17286 if (!strcmp("emulate", str))
17287 vsyscall_mode = EMULATE;
17288- else if (!strcmp("native", str))
17289- vsyscall_mode = NATIVE;
17290 else if (!strcmp("none", str))
17291 vsyscall_mode = NONE;
17292 else
17293@@ -177,7 +175,7 @@ bool emulate_vsyscall(struct pt_regs *re
17294
17295 tsk = current;
17296 if (seccomp_mode(&tsk->seccomp))
17297- do_exit(SIGKILL);
17298+ do_group_exit(SIGKILL);
17299
17300 switch (vsyscall_nr) {
17301 case 0:
17302@@ -219,8 +217,7 @@ bool emulate_vsyscall(struct pt_regs *re
17303 return true;
17304
17305 sigsegv:
17306- force_sig(SIGSEGV, current);
17307- return true;
17308+ do_group_exit(SIGKILL);
17309 }
17310
17311 /*
17312@@ -273,10 +270,7 @@ void __init map_vsyscall(void)
17313 extern char __vvar_page;
17314 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
17315
17316- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
17317- vsyscall_mode == NATIVE
17318- ? PAGE_KERNEL_VSYSCALL
17319- : PAGE_KERNEL_VVAR);
17320+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
17321 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
17322 (unsigned long)VSYSCALL_START);
17323
17324diff -urNp linux-3.1.4/arch/x86/kernel/x8664_ksyms_64.c linux-3.1.4/arch/x86/kernel/x8664_ksyms_64.c
17325--- linux-3.1.4/arch/x86/kernel/x8664_ksyms_64.c 2011-11-11 15:19:27.000000000 -0500
17326+++ linux-3.1.4/arch/x86/kernel/x8664_ksyms_64.c 2011-11-16 18:39:07.000000000 -0500
17327@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
17328 EXPORT_SYMBOL(copy_user_generic_string);
17329 EXPORT_SYMBOL(copy_user_generic_unrolled);
17330 EXPORT_SYMBOL(__copy_user_nocache);
17331-EXPORT_SYMBOL(_copy_from_user);
17332-EXPORT_SYMBOL(_copy_to_user);
17333
17334 EXPORT_SYMBOL(copy_page);
17335 EXPORT_SYMBOL(clear_page);
17336diff -urNp linux-3.1.4/arch/x86/kernel/xsave.c linux-3.1.4/arch/x86/kernel/xsave.c
17337--- linux-3.1.4/arch/x86/kernel/xsave.c 2011-11-11 15:19:27.000000000 -0500
17338+++ linux-3.1.4/arch/x86/kernel/xsave.c 2011-11-16 18:39:07.000000000 -0500
17339@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
17340 fx_sw_user->xstate_size > fx_sw_user->extended_size)
17341 return -EINVAL;
17342
17343- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
17344+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
17345 fx_sw_user->extended_size -
17346 FP_XSTATE_MAGIC2_SIZE));
17347 if (err)
17348@@ -267,7 +267,7 @@ fx_only:
17349 * the other extended state.
17350 */
17351 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
17352- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
17353+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
17354 }
17355
17356 /*
17357@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
17358 if (use_xsave())
17359 err = restore_user_xstate(buf);
17360 else
17361- err = fxrstor_checking((__force struct i387_fxsave_struct *)
17362+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
17363 buf);
17364 if (unlikely(err)) {
17365 /*
17366diff -urNp linux-3.1.4/arch/x86/kvm/emulate.c linux-3.1.4/arch/x86/kvm/emulate.c
17367--- linux-3.1.4/arch/x86/kvm/emulate.c 2011-11-11 15:19:27.000000000 -0500
17368+++ linux-3.1.4/arch/x86/kvm/emulate.c 2011-11-16 18:39:07.000000000 -0500
17369@@ -96,7 +96,7 @@
17370 #define Src2ImmByte (2<<29)
17371 #define Src2One (3<<29)
17372 #define Src2Imm (4<<29)
17373-#define Src2Mask (7<<29)
17374+#define Src2Mask (7U<<29)
17375
17376 #define X2(x...) x, x
17377 #define X3(x...) X2(x), x
17378@@ -207,6 +207,7 @@ struct gprefix {
17379
17380 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
17381 do { \
17382+ unsigned long _tmp; \
17383 __asm__ __volatile__ ( \
17384 _PRE_EFLAGS("0", "4", "2") \
17385 _op _suffix " %"_x"3,%1; " \
17386@@ -220,8 +221,6 @@ struct gprefix {
17387 /* Raw emulation: instruction has two explicit operands. */
17388 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
17389 do { \
17390- unsigned long _tmp; \
17391- \
17392 switch ((_dst).bytes) { \
17393 case 2: \
17394 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
17395@@ -237,7 +236,6 @@ struct gprefix {
17396
17397 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
17398 do { \
17399- unsigned long _tmp; \
17400 switch ((_dst).bytes) { \
17401 case 1: \
17402 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
17403diff -urNp linux-3.1.4/arch/x86/kvm/lapic.c linux-3.1.4/arch/x86/kvm/lapic.c
17404--- linux-3.1.4/arch/x86/kvm/lapic.c 2011-11-11 15:19:27.000000000 -0500
17405+++ linux-3.1.4/arch/x86/kvm/lapic.c 2011-11-16 18:39:07.000000000 -0500
17406@@ -53,7 +53,7 @@
17407 #define APIC_BUS_CYCLE_NS 1
17408
17409 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
17410-#define apic_debug(fmt, arg...)
17411+#define apic_debug(fmt, arg...) do {} while (0)
17412
17413 #define APIC_LVT_NUM 6
17414 /* 14 is the version for Xeon and Pentium 8.4.8*/
17415diff -urNp linux-3.1.4/arch/x86/kvm/mmu.c linux-3.1.4/arch/x86/kvm/mmu.c
17416--- linux-3.1.4/arch/x86/kvm/mmu.c 2011-11-11 15:19:27.000000000 -0500
17417+++ linux-3.1.4/arch/x86/kvm/mmu.c 2011-11-16 18:39:07.000000000 -0500
17418@@ -3552,7 +3552,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
17419
17420 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
17421
17422- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
17423+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
17424
17425 /*
17426 * Assume that the pte write on a page table of the same type
17427@@ -3584,7 +3584,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
17428 }
17429
17430 spin_lock(&vcpu->kvm->mmu_lock);
17431- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
17432+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
17433 gentry = 0;
17434 kvm_mmu_free_some_pages(vcpu);
17435 ++vcpu->kvm->stat.mmu_pte_write;
17436diff -urNp linux-3.1.4/arch/x86/kvm/paging_tmpl.h linux-3.1.4/arch/x86/kvm/paging_tmpl.h
17437--- linux-3.1.4/arch/x86/kvm/paging_tmpl.h 2011-11-11 15:19:27.000000000 -0500
17438+++ linux-3.1.4/arch/x86/kvm/paging_tmpl.h 2011-11-16 19:40:44.000000000 -0500
17439@@ -197,7 +197,7 @@ retry_walk:
17440 if (unlikely(kvm_is_error_hva(host_addr)))
17441 goto error;
17442
17443- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
17444+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
17445 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
17446 goto error;
17447
17448@@ -575,6 +575,8 @@ static int FNAME(page_fault)(struct kvm_
17449 unsigned long mmu_seq;
17450 bool map_writable;
17451
17452+ pax_track_stack();
17453+
17454 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
17455
17456 if (unlikely(error_code & PFERR_RSVD_MASK))
17457@@ -701,7 +703,7 @@ static void FNAME(invlpg)(struct kvm_vcp
17458 if (need_flush)
17459 kvm_flush_remote_tlbs(vcpu->kvm);
17460
17461- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
17462+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
17463
17464 spin_unlock(&vcpu->kvm->mmu_lock);
17465
17466diff -urNp linux-3.1.4/arch/x86/kvm/svm.c linux-3.1.4/arch/x86/kvm/svm.c
17467--- linux-3.1.4/arch/x86/kvm/svm.c 2011-11-11 15:19:27.000000000 -0500
17468+++ linux-3.1.4/arch/x86/kvm/svm.c 2011-11-16 18:39:07.000000000 -0500
17469@@ -3381,7 +3381,11 @@ static void reload_tss(struct kvm_vcpu *
17470 int cpu = raw_smp_processor_id();
17471
17472 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
17473+
17474+ pax_open_kernel();
17475 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
17476+ pax_close_kernel();
17477+
17478 load_TR_desc();
17479 }
17480
17481@@ -3759,6 +3763,10 @@ static void svm_vcpu_run(struct kvm_vcpu
17482 #endif
17483 #endif
17484
17485+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17486+ __set_fs(current_thread_info()->addr_limit);
17487+#endif
17488+
17489 reload_tss(vcpu);
17490
17491 local_irq_disable();
17492diff -urNp linux-3.1.4/arch/x86/kvm/vmx.c linux-3.1.4/arch/x86/kvm/vmx.c
17493--- linux-3.1.4/arch/x86/kvm/vmx.c 2011-11-11 15:19:27.000000000 -0500
17494+++ linux-3.1.4/arch/x86/kvm/vmx.c 2011-11-16 18:39:07.000000000 -0500
17495@@ -1251,7 +1251,11 @@ static void reload_tss(void)
17496 struct desc_struct *descs;
17497
17498 descs = (void *)gdt->address;
17499+
17500+ pax_open_kernel();
17501 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
17502+ pax_close_kernel();
17503+
17504 load_TR_desc();
17505 }
17506
17507@@ -2520,8 +2524,11 @@ static __init int hardware_setup(void)
17508 if (!cpu_has_vmx_flexpriority())
17509 flexpriority_enabled = 0;
17510
17511- if (!cpu_has_vmx_tpr_shadow())
17512- kvm_x86_ops->update_cr8_intercept = NULL;
17513+ if (!cpu_has_vmx_tpr_shadow()) {
17514+ pax_open_kernel();
17515+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
17516+ pax_close_kernel();
17517+ }
17518
17519 if (enable_ept && !cpu_has_vmx_ept_2m_page())
17520 kvm_disable_largepages();
17521@@ -3535,7 +3542,7 @@ static void vmx_set_constant_host_state(
17522 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
17523
17524 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
17525- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
17526+ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
17527
17528 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
17529 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
17530@@ -6021,6 +6028,12 @@ static void __noclone vmx_vcpu_run(struc
17531 "jmp .Lkvm_vmx_return \n\t"
17532 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
17533 ".Lkvm_vmx_return: "
17534+
17535+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17536+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
17537+ ".Lkvm_vmx_return2: "
17538+#endif
17539+
17540 /* Save guest registers, load host registers, keep flags */
17541 "mov %0, %c[wordsize](%%"R"sp) \n\t"
17542 "pop %0 \n\t"
17543@@ -6069,6 +6082,11 @@ static void __noclone vmx_vcpu_run(struc
17544 #endif
17545 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
17546 [wordsize]"i"(sizeof(ulong))
17547+
17548+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17549+ ,[cs]"i"(__KERNEL_CS)
17550+#endif
17551+
17552 : "cc", "memory"
17553 , R"ax", R"bx", R"di", R"si"
17554 #ifdef CONFIG_X86_64
17555@@ -6097,7 +6115,16 @@ static void __noclone vmx_vcpu_run(struc
17556 }
17557 }
17558
17559- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
17560+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
17561+
17562+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17563+ loadsegment(fs, __KERNEL_PERCPU);
17564+#endif
17565+
17566+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17567+ __set_fs(current_thread_info()->addr_limit);
17568+#endif
17569+
17570 vmx->loaded_vmcs->launched = 1;
17571
17572 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
17573diff -urNp linux-3.1.4/arch/x86/kvm/x86.c linux-3.1.4/arch/x86/kvm/x86.c
17574--- linux-3.1.4/arch/x86/kvm/x86.c 2011-11-11 15:19:27.000000000 -0500
17575+++ linux-3.1.4/arch/x86/kvm/x86.c 2011-11-16 18:39:07.000000000 -0500
17576@@ -1334,8 +1334,8 @@ static int xen_hvm_config(struct kvm_vcp
17577 {
17578 struct kvm *kvm = vcpu->kvm;
17579 int lm = is_long_mode(vcpu);
17580- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
17581- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
17582+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
17583+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
17584 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
17585 : kvm->arch.xen_hvm_config.blob_size_32;
17586 u32 page_num = data & ~PAGE_MASK;
17587@@ -2137,6 +2137,8 @@ long kvm_arch_dev_ioctl(struct file *fil
17588 if (n < msr_list.nmsrs)
17589 goto out;
17590 r = -EFAULT;
17591+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
17592+ goto out;
17593 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
17594 num_msrs_to_save * sizeof(u32)))
17595 goto out;
17596@@ -2312,15 +2314,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
17597 struct kvm_cpuid2 *cpuid,
17598 struct kvm_cpuid_entry2 __user *entries)
17599 {
17600- int r;
17601+ int r, i;
17602
17603 r = -E2BIG;
17604 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
17605 goto out;
17606 r = -EFAULT;
17607- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
17608- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17609+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17610 goto out;
17611+ for (i = 0; i < cpuid->nent; ++i) {
17612+ struct kvm_cpuid_entry2 cpuid_entry;
17613+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
17614+ goto out;
17615+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
17616+ }
17617 vcpu->arch.cpuid_nent = cpuid->nent;
17618 kvm_apic_set_version(vcpu);
17619 kvm_x86_ops->cpuid_update(vcpu);
17620@@ -2335,15 +2342,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
17621 struct kvm_cpuid2 *cpuid,
17622 struct kvm_cpuid_entry2 __user *entries)
17623 {
17624- int r;
17625+ int r, i;
17626
17627 r = -E2BIG;
17628 if (cpuid->nent < vcpu->arch.cpuid_nent)
17629 goto out;
17630 r = -EFAULT;
17631- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
17632- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17633+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17634 goto out;
17635+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
17636+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
17637+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
17638+ goto out;
17639+ }
17640 return 0;
17641
17642 out:
17643@@ -2718,7 +2729,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
17644 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
17645 struct kvm_interrupt *irq)
17646 {
17647- if (irq->irq < 0 || irq->irq >= 256)
17648+ if (irq->irq >= 256)
17649 return -EINVAL;
17650 if (irqchip_in_kernel(vcpu->kvm))
17651 return -ENXIO;
17652@@ -5089,7 +5100,7 @@ static void kvm_set_mmio_spte_mask(void)
17653 kvm_mmu_set_mmio_spte_mask(mask);
17654 }
17655
17656-int kvm_arch_init(void *opaque)
17657+int kvm_arch_init(const void *opaque)
17658 {
17659 int r;
17660 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
17661diff -urNp linux-3.1.4/arch/x86/lguest/boot.c linux-3.1.4/arch/x86/lguest/boot.c
17662--- linux-3.1.4/arch/x86/lguest/boot.c 2011-11-11 15:19:27.000000000 -0500
17663+++ linux-3.1.4/arch/x86/lguest/boot.c 2011-11-16 18:39:07.000000000 -0500
17664@@ -1184,9 +1184,10 @@ static __init int early_put_chars(u32 vt
17665 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
17666 * Launcher to reboot us.
17667 */
17668-static void lguest_restart(char *reason)
17669+static __noreturn void lguest_restart(char *reason)
17670 {
17671 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
17672+ BUG();
17673 }
17674
17675 /*G:050
17676diff -urNp linux-3.1.4/arch/x86/lib/atomic64_32.c linux-3.1.4/arch/x86/lib/atomic64_32.c
17677--- linux-3.1.4/arch/x86/lib/atomic64_32.c 2011-11-11 15:19:27.000000000 -0500
17678+++ linux-3.1.4/arch/x86/lib/atomic64_32.c 2011-11-16 18:39:07.000000000 -0500
17679@@ -8,18 +8,30 @@
17680
17681 long long atomic64_read_cx8(long long, const atomic64_t *v);
17682 EXPORT_SYMBOL(atomic64_read_cx8);
17683+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
17684+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
17685 long long atomic64_set_cx8(long long, const atomic64_t *v);
17686 EXPORT_SYMBOL(atomic64_set_cx8);
17687+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
17688+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
17689 long long atomic64_xchg_cx8(long long, unsigned high);
17690 EXPORT_SYMBOL(atomic64_xchg_cx8);
17691 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
17692 EXPORT_SYMBOL(atomic64_add_return_cx8);
17693+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
17694+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
17695 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
17696 EXPORT_SYMBOL(atomic64_sub_return_cx8);
17697+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
17698+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
17699 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
17700 EXPORT_SYMBOL(atomic64_inc_return_cx8);
17701+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
17702+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
17703 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
17704 EXPORT_SYMBOL(atomic64_dec_return_cx8);
17705+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
17706+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
17707 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
17708 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
17709 int atomic64_inc_not_zero_cx8(atomic64_t *v);
17710@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
17711 #ifndef CONFIG_X86_CMPXCHG64
17712 long long atomic64_read_386(long long, const atomic64_t *v);
17713 EXPORT_SYMBOL(atomic64_read_386);
17714+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
17715+EXPORT_SYMBOL(atomic64_read_unchecked_386);
17716 long long atomic64_set_386(long long, const atomic64_t *v);
17717 EXPORT_SYMBOL(atomic64_set_386);
17718+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
17719+EXPORT_SYMBOL(atomic64_set_unchecked_386);
17720 long long atomic64_xchg_386(long long, unsigned high);
17721 EXPORT_SYMBOL(atomic64_xchg_386);
17722 long long atomic64_add_return_386(long long a, atomic64_t *v);
17723 EXPORT_SYMBOL(atomic64_add_return_386);
17724+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17725+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
17726 long long atomic64_sub_return_386(long long a, atomic64_t *v);
17727 EXPORT_SYMBOL(atomic64_sub_return_386);
17728+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17729+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
17730 long long atomic64_inc_return_386(long long a, atomic64_t *v);
17731 EXPORT_SYMBOL(atomic64_inc_return_386);
17732+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17733+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
17734 long long atomic64_dec_return_386(long long a, atomic64_t *v);
17735 EXPORT_SYMBOL(atomic64_dec_return_386);
17736+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17737+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
17738 long long atomic64_add_386(long long a, atomic64_t *v);
17739 EXPORT_SYMBOL(atomic64_add_386);
17740+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
17741+EXPORT_SYMBOL(atomic64_add_unchecked_386);
17742 long long atomic64_sub_386(long long a, atomic64_t *v);
17743 EXPORT_SYMBOL(atomic64_sub_386);
17744+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
17745+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
17746 long long atomic64_inc_386(long long a, atomic64_t *v);
17747 EXPORT_SYMBOL(atomic64_inc_386);
17748+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
17749+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
17750 long long atomic64_dec_386(long long a, atomic64_t *v);
17751 EXPORT_SYMBOL(atomic64_dec_386);
17752+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
17753+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
17754 long long atomic64_dec_if_positive_386(atomic64_t *v);
17755 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
17756 int atomic64_inc_not_zero_386(atomic64_t *v);
17757diff -urNp linux-3.1.4/arch/x86/lib/atomic64_386_32.S linux-3.1.4/arch/x86/lib/atomic64_386_32.S
17758--- linux-3.1.4/arch/x86/lib/atomic64_386_32.S 2011-11-11 15:19:27.000000000 -0500
17759+++ linux-3.1.4/arch/x86/lib/atomic64_386_32.S 2011-11-16 18:39:07.000000000 -0500
17760@@ -48,6 +48,10 @@ BEGIN(read)
17761 movl (v), %eax
17762 movl 4(v), %edx
17763 RET_ENDP
17764+BEGIN(read_unchecked)
17765+ movl (v), %eax
17766+ movl 4(v), %edx
17767+RET_ENDP
17768 #undef v
17769
17770 #define v %esi
17771@@ -55,6 +59,10 @@ BEGIN(set)
17772 movl %ebx, (v)
17773 movl %ecx, 4(v)
17774 RET_ENDP
17775+BEGIN(set_unchecked)
17776+ movl %ebx, (v)
17777+ movl %ecx, 4(v)
17778+RET_ENDP
17779 #undef v
17780
17781 #define v %esi
17782@@ -70,6 +78,20 @@ RET_ENDP
17783 BEGIN(add)
17784 addl %eax, (v)
17785 adcl %edx, 4(v)
17786+
17787+#ifdef CONFIG_PAX_REFCOUNT
17788+ jno 0f
17789+ subl %eax, (v)
17790+ sbbl %edx, 4(v)
17791+ int $4
17792+0:
17793+ _ASM_EXTABLE(0b, 0b)
17794+#endif
17795+
17796+RET_ENDP
17797+BEGIN(add_unchecked)
17798+ addl %eax, (v)
17799+ adcl %edx, 4(v)
17800 RET_ENDP
17801 #undef v
17802
17803@@ -77,6 +99,24 @@ RET_ENDP
17804 BEGIN(add_return)
17805 addl (v), %eax
17806 adcl 4(v), %edx
17807+
17808+#ifdef CONFIG_PAX_REFCOUNT
17809+ into
17810+1234:
17811+ _ASM_EXTABLE(1234b, 2f)
17812+#endif
17813+
17814+ movl %eax, (v)
17815+ movl %edx, 4(v)
17816+
17817+#ifdef CONFIG_PAX_REFCOUNT
17818+2:
17819+#endif
17820+
17821+RET_ENDP
17822+BEGIN(add_return_unchecked)
17823+ addl (v), %eax
17824+ adcl 4(v), %edx
17825 movl %eax, (v)
17826 movl %edx, 4(v)
17827 RET_ENDP
17828@@ -86,6 +126,20 @@ RET_ENDP
17829 BEGIN(sub)
17830 subl %eax, (v)
17831 sbbl %edx, 4(v)
17832+
17833+#ifdef CONFIG_PAX_REFCOUNT
17834+ jno 0f
17835+ addl %eax, (v)
17836+ adcl %edx, 4(v)
17837+ int $4
17838+0:
17839+ _ASM_EXTABLE(0b, 0b)
17840+#endif
17841+
17842+RET_ENDP
17843+BEGIN(sub_unchecked)
17844+ subl %eax, (v)
17845+ sbbl %edx, 4(v)
17846 RET_ENDP
17847 #undef v
17848
17849@@ -96,6 +150,27 @@ BEGIN(sub_return)
17850 sbbl $0, %edx
17851 addl (v), %eax
17852 adcl 4(v), %edx
17853+
17854+#ifdef CONFIG_PAX_REFCOUNT
17855+ into
17856+1234:
17857+ _ASM_EXTABLE(1234b, 2f)
17858+#endif
17859+
17860+ movl %eax, (v)
17861+ movl %edx, 4(v)
17862+
17863+#ifdef CONFIG_PAX_REFCOUNT
17864+2:
17865+#endif
17866+
17867+RET_ENDP
17868+BEGIN(sub_return_unchecked)
17869+ negl %edx
17870+ negl %eax
17871+ sbbl $0, %edx
17872+ addl (v), %eax
17873+ adcl 4(v), %edx
17874 movl %eax, (v)
17875 movl %edx, 4(v)
17876 RET_ENDP
17877@@ -105,6 +180,20 @@ RET_ENDP
17878 BEGIN(inc)
17879 addl $1, (v)
17880 adcl $0, 4(v)
17881+
17882+#ifdef CONFIG_PAX_REFCOUNT
17883+ jno 0f
17884+ subl $1, (v)
17885+ sbbl $0, 4(v)
17886+ int $4
17887+0:
17888+ _ASM_EXTABLE(0b, 0b)
17889+#endif
17890+
17891+RET_ENDP
17892+BEGIN(inc_unchecked)
17893+ addl $1, (v)
17894+ adcl $0, 4(v)
17895 RET_ENDP
17896 #undef v
17897
17898@@ -114,6 +203,26 @@ BEGIN(inc_return)
17899 movl 4(v), %edx
17900 addl $1, %eax
17901 adcl $0, %edx
17902+
17903+#ifdef CONFIG_PAX_REFCOUNT
17904+ into
17905+1234:
17906+ _ASM_EXTABLE(1234b, 2f)
17907+#endif
17908+
17909+ movl %eax, (v)
17910+ movl %edx, 4(v)
17911+
17912+#ifdef CONFIG_PAX_REFCOUNT
17913+2:
17914+#endif
17915+
17916+RET_ENDP
17917+BEGIN(inc_return_unchecked)
17918+ movl (v), %eax
17919+ movl 4(v), %edx
17920+ addl $1, %eax
17921+ adcl $0, %edx
17922 movl %eax, (v)
17923 movl %edx, 4(v)
17924 RET_ENDP
17925@@ -123,6 +232,20 @@ RET_ENDP
17926 BEGIN(dec)
17927 subl $1, (v)
17928 sbbl $0, 4(v)
17929+
17930+#ifdef CONFIG_PAX_REFCOUNT
17931+ jno 0f
17932+ addl $1, (v)
17933+ adcl $0, 4(v)
17934+ int $4
17935+0:
17936+ _ASM_EXTABLE(0b, 0b)
17937+#endif
17938+
17939+RET_ENDP
17940+BEGIN(dec_unchecked)
17941+ subl $1, (v)
17942+ sbbl $0, 4(v)
17943 RET_ENDP
17944 #undef v
17945
17946@@ -132,6 +255,26 @@ BEGIN(dec_return)
17947 movl 4(v), %edx
17948 subl $1, %eax
17949 sbbl $0, %edx
17950+
17951+#ifdef CONFIG_PAX_REFCOUNT
17952+ into
17953+1234:
17954+ _ASM_EXTABLE(1234b, 2f)
17955+#endif
17956+
17957+ movl %eax, (v)
17958+ movl %edx, 4(v)
17959+
17960+#ifdef CONFIG_PAX_REFCOUNT
17961+2:
17962+#endif
17963+
17964+RET_ENDP
17965+BEGIN(dec_return_unchecked)
17966+ movl (v), %eax
17967+ movl 4(v), %edx
17968+ subl $1, %eax
17969+ sbbl $0, %edx
17970 movl %eax, (v)
17971 movl %edx, 4(v)
17972 RET_ENDP
17973@@ -143,6 +286,13 @@ BEGIN(add_unless)
17974 adcl %edx, %edi
17975 addl (v), %eax
17976 adcl 4(v), %edx
17977+
17978+#ifdef CONFIG_PAX_REFCOUNT
17979+ into
17980+1234:
17981+ _ASM_EXTABLE(1234b, 2f)
17982+#endif
17983+
17984 cmpl %eax, %esi
17985 je 3f
17986 1:
17987@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
17988 1:
17989 addl $1, %eax
17990 adcl $0, %edx
17991+
17992+#ifdef CONFIG_PAX_REFCOUNT
17993+ into
17994+1234:
17995+ _ASM_EXTABLE(1234b, 2f)
17996+#endif
17997+
17998 movl %eax, (v)
17999 movl %edx, 4(v)
18000 movl $1, %eax
18001@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
18002 movl 4(v), %edx
18003 subl $1, %eax
18004 sbbl $0, %edx
18005+
18006+#ifdef CONFIG_PAX_REFCOUNT
18007+ into
18008+1234:
18009+ _ASM_EXTABLE(1234b, 1f)
18010+#endif
18011+
18012 js 1f
18013 movl %eax, (v)
18014 movl %edx, 4(v)
18015diff -urNp linux-3.1.4/arch/x86/lib/atomic64_cx8_32.S linux-3.1.4/arch/x86/lib/atomic64_cx8_32.S
18016--- linux-3.1.4/arch/x86/lib/atomic64_cx8_32.S 2011-11-11 15:19:27.000000000 -0500
18017+++ linux-3.1.4/arch/x86/lib/atomic64_cx8_32.S 2011-11-16 18:39:07.000000000 -0500
18018@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
18019 CFI_STARTPROC
18020
18021 read64 %ecx
18022+ pax_force_retaddr
18023 ret
18024 CFI_ENDPROC
18025 ENDPROC(atomic64_read_cx8)
18026
18027+ENTRY(atomic64_read_unchecked_cx8)
18028+ CFI_STARTPROC
18029+
18030+ read64 %ecx
18031+ pax_force_retaddr
18032+ ret
18033+ CFI_ENDPROC
18034+ENDPROC(atomic64_read_unchecked_cx8)
18035+
18036 ENTRY(atomic64_set_cx8)
18037 CFI_STARTPROC
18038
18039@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
18040 cmpxchg8b (%esi)
18041 jne 1b
18042
18043+ pax_force_retaddr
18044 ret
18045 CFI_ENDPROC
18046 ENDPROC(atomic64_set_cx8)
18047
18048+ENTRY(atomic64_set_unchecked_cx8)
18049+ CFI_STARTPROC
18050+
18051+1:
18052+/* we don't need LOCK_PREFIX since aligned 64-bit writes
18053+ * are atomic on 586 and newer */
18054+ cmpxchg8b (%esi)
18055+ jne 1b
18056+
18057+ pax_force_retaddr
18058+ ret
18059+ CFI_ENDPROC
18060+ENDPROC(atomic64_set_unchecked_cx8)
18061+
18062 ENTRY(atomic64_xchg_cx8)
18063 CFI_STARTPROC
18064
18065@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
18066 cmpxchg8b (%esi)
18067 jne 1b
18068
18069+ pax_force_retaddr
18070 ret
18071 CFI_ENDPROC
18072 ENDPROC(atomic64_xchg_cx8)
18073
18074-.macro addsub_return func ins insc
18075-ENTRY(atomic64_\func\()_return_cx8)
18076+.macro addsub_return func ins insc unchecked=""
18077+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18078 CFI_STARTPROC
18079 SAVE ebp
18080 SAVE ebx
18081@@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
18082 movl %edx, %ecx
18083 \ins\()l %esi, %ebx
18084 \insc\()l %edi, %ecx
18085+
18086+.ifb \unchecked
18087+#ifdef CONFIG_PAX_REFCOUNT
18088+ into
18089+2:
18090+ _ASM_EXTABLE(2b, 3f)
18091+#endif
18092+.endif
18093+
18094 LOCK_PREFIX
18095 cmpxchg8b (%ebp)
18096 jne 1b
18097-
18098-10:
18099 movl %ebx, %eax
18100 movl %ecx, %edx
18101+
18102+.ifb \unchecked
18103+#ifdef CONFIG_PAX_REFCOUNT
18104+3:
18105+#endif
18106+.endif
18107+
18108 RESTORE edi
18109 RESTORE esi
18110 RESTORE ebx
18111 RESTORE ebp
18112+ pax_force_retaddr
18113 ret
18114 CFI_ENDPROC
18115-ENDPROC(atomic64_\func\()_return_cx8)
18116+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18117 .endm
18118
18119 addsub_return add add adc
18120 addsub_return sub sub sbb
18121+addsub_return add add adc _unchecked
18122+addsub_return sub sub sbb _unchecked
18123
18124-.macro incdec_return func ins insc
18125-ENTRY(atomic64_\func\()_return_cx8)
18126+.macro incdec_return func ins insc unchecked
18127+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18128 CFI_STARTPROC
18129 SAVE ebx
18130
18131@@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
18132 movl %edx, %ecx
18133 \ins\()l $1, %ebx
18134 \insc\()l $0, %ecx
18135+
18136+.ifb \unchecked
18137+#ifdef CONFIG_PAX_REFCOUNT
18138+ into
18139+2:
18140+ _ASM_EXTABLE(2b, 3f)
18141+#endif
18142+.endif
18143+
18144 LOCK_PREFIX
18145 cmpxchg8b (%esi)
18146 jne 1b
18147
18148-10:
18149 movl %ebx, %eax
18150 movl %ecx, %edx
18151+
18152+.ifb \unchecked
18153+#ifdef CONFIG_PAX_REFCOUNT
18154+3:
18155+#endif
18156+.endif
18157+
18158 RESTORE ebx
18159+ pax_force_retaddr
18160 ret
18161 CFI_ENDPROC
18162-ENDPROC(atomic64_\func\()_return_cx8)
18163+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18164 .endm
18165
18166 incdec_return inc add adc
18167 incdec_return dec sub sbb
18168+incdec_return inc add adc _unchecked
18169+incdec_return dec sub sbb _unchecked
18170
18171 ENTRY(atomic64_dec_if_positive_cx8)
18172 CFI_STARTPROC
18173@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
18174 movl %edx, %ecx
18175 subl $1, %ebx
18176 sbb $0, %ecx
18177+
18178+#ifdef CONFIG_PAX_REFCOUNT
18179+ into
18180+1234:
18181+ _ASM_EXTABLE(1234b, 2f)
18182+#endif
18183+
18184 js 2f
18185 LOCK_PREFIX
18186 cmpxchg8b (%esi)
18187@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
18188 movl %ebx, %eax
18189 movl %ecx, %edx
18190 RESTORE ebx
18191+ pax_force_retaddr
18192 ret
18193 CFI_ENDPROC
18194 ENDPROC(atomic64_dec_if_positive_cx8)
18195@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
18196 movl %edx, %ecx
18197 addl %esi, %ebx
18198 adcl %edi, %ecx
18199+
18200+#ifdef CONFIG_PAX_REFCOUNT
18201+ into
18202+1234:
18203+ _ASM_EXTABLE(1234b, 3f)
18204+#endif
18205+
18206 LOCK_PREFIX
18207 cmpxchg8b (%ebp)
18208 jne 1b
18209@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
18210 CFI_ADJUST_CFA_OFFSET -8
18211 RESTORE ebx
18212 RESTORE ebp
18213+ pax_force_retaddr
18214 ret
18215 4:
18216 cmpl %edx, 4(%esp)
18217@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
18218 movl %edx, %ecx
18219 addl $1, %ebx
18220 adcl $0, %ecx
18221+
18222+#ifdef CONFIG_PAX_REFCOUNT
18223+ into
18224+1234:
18225+ _ASM_EXTABLE(1234b, 3f)
18226+#endif
18227+
18228 LOCK_PREFIX
18229 cmpxchg8b (%esi)
18230 jne 1b
18231@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
18232 movl $1, %eax
18233 3:
18234 RESTORE ebx
18235+ pax_force_retaddr
18236 ret
18237 4:
18238 testl %edx, %edx
18239diff -urNp linux-3.1.4/arch/x86/lib/checksum_32.S linux-3.1.4/arch/x86/lib/checksum_32.S
18240--- linux-3.1.4/arch/x86/lib/checksum_32.S 2011-11-11 15:19:27.000000000 -0500
18241+++ linux-3.1.4/arch/x86/lib/checksum_32.S 2011-11-16 18:39:07.000000000 -0500
18242@@ -28,7 +28,8 @@
18243 #include <linux/linkage.h>
18244 #include <asm/dwarf2.h>
18245 #include <asm/errno.h>
18246-
18247+#include <asm/segment.h>
18248+
18249 /*
18250 * computes a partial checksum, e.g. for TCP/UDP fragments
18251 */
18252@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
18253
18254 #define ARGBASE 16
18255 #define FP 12
18256-
18257-ENTRY(csum_partial_copy_generic)
18258+
18259+ENTRY(csum_partial_copy_generic_to_user)
18260 CFI_STARTPROC
18261+
18262+#ifdef CONFIG_PAX_MEMORY_UDEREF
18263+ pushl_cfi %gs
18264+ popl_cfi %es
18265+ jmp csum_partial_copy_generic
18266+#endif
18267+
18268+ENTRY(csum_partial_copy_generic_from_user)
18269+
18270+#ifdef CONFIG_PAX_MEMORY_UDEREF
18271+ pushl_cfi %gs
18272+ popl_cfi %ds
18273+#endif
18274+
18275+ENTRY(csum_partial_copy_generic)
18276 subl $4,%esp
18277 CFI_ADJUST_CFA_OFFSET 4
18278 pushl_cfi %edi
18279@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
18280 jmp 4f
18281 SRC(1: movw (%esi), %bx )
18282 addl $2, %esi
18283-DST( movw %bx, (%edi) )
18284+DST( movw %bx, %es:(%edi) )
18285 addl $2, %edi
18286 addw %bx, %ax
18287 adcl $0, %eax
18288@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
18289 SRC(1: movl (%esi), %ebx )
18290 SRC( movl 4(%esi), %edx )
18291 adcl %ebx, %eax
18292-DST( movl %ebx, (%edi) )
18293+DST( movl %ebx, %es:(%edi) )
18294 adcl %edx, %eax
18295-DST( movl %edx, 4(%edi) )
18296+DST( movl %edx, %es:4(%edi) )
18297
18298 SRC( movl 8(%esi), %ebx )
18299 SRC( movl 12(%esi), %edx )
18300 adcl %ebx, %eax
18301-DST( movl %ebx, 8(%edi) )
18302+DST( movl %ebx, %es:8(%edi) )
18303 adcl %edx, %eax
18304-DST( movl %edx, 12(%edi) )
18305+DST( movl %edx, %es:12(%edi) )
18306
18307 SRC( movl 16(%esi), %ebx )
18308 SRC( movl 20(%esi), %edx )
18309 adcl %ebx, %eax
18310-DST( movl %ebx, 16(%edi) )
18311+DST( movl %ebx, %es:16(%edi) )
18312 adcl %edx, %eax
18313-DST( movl %edx, 20(%edi) )
18314+DST( movl %edx, %es:20(%edi) )
18315
18316 SRC( movl 24(%esi), %ebx )
18317 SRC( movl 28(%esi), %edx )
18318 adcl %ebx, %eax
18319-DST( movl %ebx, 24(%edi) )
18320+DST( movl %ebx, %es:24(%edi) )
18321 adcl %edx, %eax
18322-DST( movl %edx, 28(%edi) )
18323+DST( movl %edx, %es:28(%edi) )
18324
18325 lea 32(%esi), %esi
18326 lea 32(%edi), %edi
18327@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
18328 shrl $2, %edx # This clears CF
18329 SRC(3: movl (%esi), %ebx )
18330 adcl %ebx, %eax
18331-DST( movl %ebx, (%edi) )
18332+DST( movl %ebx, %es:(%edi) )
18333 lea 4(%esi), %esi
18334 lea 4(%edi), %edi
18335 dec %edx
18336@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
18337 jb 5f
18338 SRC( movw (%esi), %cx )
18339 leal 2(%esi), %esi
18340-DST( movw %cx, (%edi) )
18341+DST( movw %cx, %es:(%edi) )
18342 leal 2(%edi), %edi
18343 je 6f
18344 shll $16,%ecx
18345 SRC(5: movb (%esi), %cl )
18346-DST( movb %cl, (%edi) )
18347+DST( movb %cl, %es:(%edi) )
18348 6: addl %ecx, %eax
18349 adcl $0, %eax
18350 7:
18351@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
18352
18353 6001:
18354 movl ARGBASE+20(%esp), %ebx # src_err_ptr
18355- movl $-EFAULT, (%ebx)
18356+ movl $-EFAULT, %ss:(%ebx)
18357
18358 # zero the complete destination - computing the rest
18359 # is too much work
18360@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
18361
18362 6002:
18363 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18364- movl $-EFAULT,(%ebx)
18365+ movl $-EFAULT,%ss:(%ebx)
18366 jmp 5000b
18367
18368 .previous
18369
18370+ pushl_cfi %ss
18371+ popl_cfi %ds
18372+ pushl_cfi %ss
18373+ popl_cfi %es
18374 popl_cfi %ebx
18375 CFI_RESTORE ebx
18376 popl_cfi %esi
18377@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
18378 popl_cfi %ecx # equivalent to addl $4,%esp
18379 ret
18380 CFI_ENDPROC
18381-ENDPROC(csum_partial_copy_generic)
18382+ENDPROC(csum_partial_copy_generic_to_user)
18383
18384 #else
18385
18386 /* Version for PentiumII/PPro */
18387
18388 #define ROUND1(x) \
18389+ nop; nop; nop; \
18390 SRC(movl x(%esi), %ebx ) ; \
18391 addl %ebx, %eax ; \
18392- DST(movl %ebx, x(%edi) ) ;
18393+ DST(movl %ebx, %es:x(%edi)) ;
18394
18395 #define ROUND(x) \
18396+ nop; nop; nop; \
18397 SRC(movl x(%esi), %ebx ) ; \
18398 adcl %ebx, %eax ; \
18399- DST(movl %ebx, x(%edi) ) ;
18400+ DST(movl %ebx, %es:x(%edi)) ;
18401
18402 #define ARGBASE 12
18403-
18404-ENTRY(csum_partial_copy_generic)
18405+
18406+ENTRY(csum_partial_copy_generic_to_user)
18407 CFI_STARTPROC
18408+
18409+#ifdef CONFIG_PAX_MEMORY_UDEREF
18410+ pushl_cfi %gs
18411+ popl_cfi %es
18412+ jmp csum_partial_copy_generic
18413+#endif
18414+
18415+ENTRY(csum_partial_copy_generic_from_user)
18416+
18417+#ifdef CONFIG_PAX_MEMORY_UDEREF
18418+ pushl_cfi %gs
18419+ popl_cfi %ds
18420+#endif
18421+
18422+ENTRY(csum_partial_copy_generic)
18423 pushl_cfi %ebx
18424 CFI_REL_OFFSET ebx, 0
18425 pushl_cfi %edi
18426@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
18427 subl %ebx, %edi
18428 lea -1(%esi),%edx
18429 andl $-32,%edx
18430- lea 3f(%ebx,%ebx), %ebx
18431+ lea 3f(%ebx,%ebx,2), %ebx
18432 testl %esi, %esi
18433 jmp *%ebx
18434 1: addl $64,%esi
18435@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
18436 jb 5f
18437 SRC( movw (%esi), %dx )
18438 leal 2(%esi), %esi
18439-DST( movw %dx, (%edi) )
18440+DST( movw %dx, %es:(%edi) )
18441 leal 2(%edi), %edi
18442 je 6f
18443 shll $16,%edx
18444 5:
18445 SRC( movb (%esi), %dl )
18446-DST( movb %dl, (%edi) )
18447+DST( movb %dl, %es:(%edi) )
18448 6: addl %edx, %eax
18449 adcl $0, %eax
18450 7:
18451 .section .fixup, "ax"
18452 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
18453- movl $-EFAULT, (%ebx)
18454+ movl $-EFAULT, %ss:(%ebx)
18455 # zero the complete destination (computing the rest is too much work)
18456 movl ARGBASE+8(%esp),%edi # dst
18457 movl ARGBASE+12(%esp),%ecx # len
18458@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
18459 rep; stosb
18460 jmp 7b
18461 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18462- movl $-EFAULT, (%ebx)
18463+ movl $-EFAULT, %ss:(%ebx)
18464 jmp 7b
18465 .previous
18466
18467+#ifdef CONFIG_PAX_MEMORY_UDEREF
18468+ pushl_cfi %ss
18469+ popl_cfi %ds
18470+ pushl_cfi %ss
18471+ popl_cfi %es
18472+#endif
18473+
18474 popl_cfi %esi
18475 CFI_RESTORE esi
18476 popl_cfi %edi
18477@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
18478 CFI_RESTORE ebx
18479 ret
18480 CFI_ENDPROC
18481-ENDPROC(csum_partial_copy_generic)
18482+ENDPROC(csum_partial_copy_generic_to_user)
18483
18484 #undef ROUND
18485 #undef ROUND1
18486diff -urNp linux-3.1.4/arch/x86/lib/clear_page_64.S linux-3.1.4/arch/x86/lib/clear_page_64.S
18487--- linux-3.1.4/arch/x86/lib/clear_page_64.S 2011-11-11 15:19:27.000000000 -0500
18488+++ linux-3.1.4/arch/x86/lib/clear_page_64.S 2011-11-16 18:39:07.000000000 -0500
18489@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
18490 movl $4096/8,%ecx
18491 xorl %eax,%eax
18492 rep stosq
18493+ pax_force_retaddr
18494 ret
18495 CFI_ENDPROC
18496 ENDPROC(clear_page_c)
18497@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
18498 movl $4096,%ecx
18499 xorl %eax,%eax
18500 rep stosb
18501+ pax_force_retaddr
18502 ret
18503 CFI_ENDPROC
18504 ENDPROC(clear_page_c_e)
18505@@ -43,6 +45,7 @@ ENTRY(clear_page)
18506 leaq 64(%rdi),%rdi
18507 jnz .Lloop
18508 nop
18509+ pax_force_retaddr
18510 ret
18511 CFI_ENDPROC
18512 .Lclear_page_end:
18513@@ -58,7 +61,7 @@ ENDPROC(clear_page)
18514
18515 #include <asm/cpufeature.h>
18516
18517- .section .altinstr_replacement,"ax"
18518+ .section .altinstr_replacement,"a"
18519 1: .byte 0xeb /* jmp <disp8> */
18520 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
18521 2: .byte 0xeb /* jmp <disp8> */
18522diff -urNp linux-3.1.4/arch/x86/lib/cmpxchg16b_emu.S linux-3.1.4/arch/x86/lib/cmpxchg16b_emu.S
18523--- linux-3.1.4/arch/x86/lib/cmpxchg16b_emu.S 2011-11-11 15:19:27.000000000 -0500
18524+++ linux-3.1.4/arch/x86/lib/cmpxchg16b_emu.S 2011-11-16 18:39:07.000000000 -0500
18525@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
18526
18527 popf
18528 mov $1, %al
18529+ pax_force_retaddr
18530 ret
18531
18532 not_same:
18533 popf
18534 xor %al,%al
18535+ pax_force_retaddr
18536 ret
18537
18538 CFI_ENDPROC
18539diff -urNp linux-3.1.4/arch/x86/lib/copy_page_64.S linux-3.1.4/arch/x86/lib/copy_page_64.S
18540--- linux-3.1.4/arch/x86/lib/copy_page_64.S 2011-11-11 15:19:27.000000000 -0500
18541+++ linux-3.1.4/arch/x86/lib/copy_page_64.S 2011-12-02 17:38:47.000000000 -0500
18542@@ -9,6 +9,7 @@ copy_page_c:
18543 CFI_STARTPROC
18544 movl $4096/8,%ecx
18545 rep movsq
18546+ pax_force_retaddr
18547 ret
18548 CFI_ENDPROC
18549 ENDPROC(copy_page_c)
18550@@ -39,7 +40,7 @@ ENTRY(copy_page)
18551 movq 16 (%rsi), %rdx
18552 movq 24 (%rsi), %r8
18553 movq 32 (%rsi), %r9
18554- movq 40 (%rsi), %r10
18555+ movq 40 (%rsi), %r13
18556 movq 48 (%rsi), %r11
18557 movq 56 (%rsi), %r12
18558
18559@@ -50,7 +51,7 @@ ENTRY(copy_page)
18560 movq %rdx, 16 (%rdi)
18561 movq %r8, 24 (%rdi)
18562 movq %r9, 32 (%rdi)
18563- movq %r10, 40 (%rdi)
18564+ movq %r13, 40 (%rdi)
18565 movq %r11, 48 (%rdi)
18566 movq %r12, 56 (%rdi)
18567
18568@@ -69,7 +70,7 @@ ENTRY(copy_page)
18569 movq 16 (%rsi), %rdx
18570 movq 24 (%rsi), %r8
18571 movq 32 (%rsi), %r9
18572- movq 40 (%rsi), %r10
18573+ movq 40 (%rsi), %r13
18574 movq 48 (%rsi), %r11
18575 movq 56 (%rsi), %r12
18576
18577@@ -78,7 +79,7 @@ ENTRY(copy_page)
18578 movq %rdx, 16 (%rdi)
18579 movq %r8, 24 (%rdi)
18580 movq %r9, 32 (%rdi)
18581- movq %r10, 40 (%rdi)
18582+ movq %r13, 40 (%rdi)
18583 movq %r11, 48 (%rdi)
18584 movq %r12, 56 (%rdi)
18585
18586@@ -95,6 +96,7 @@ ENTRY(copy_page)
18587 CFI_RESTORE r13
18588 addq $3*8,%rsp
18589 CFI_ADJUST_CFA_OFFSET -3*8
18590+ pax_force_retaddr
18591 ret
18592 .Lcopy_page_end:
18593 CFI_ENDPROC
18594@@ -105,7 +107,7 @@ ENDPROC(copy_page)
18595
18596 #include <asm/cpufeature.h>
18597
18598- .section .altinstr_replacement,"ax"
18599+ .section .altinstr_replacement,"a"
18600 1: .byte 0xeb /* jmp <disp8> */
18601 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
18602 2:
18603diff -urNp linux-3.1.4/arch/x86/lib/copy_user_64.S linux-3.1.4/arch/x86/lib/copy_user_64.S
18604--- linux-3.1.4/arch/x86/lib/copy_user_64.S 2011-11-11 15:19:27.000000000 -0500
18605+++ linux-3.1.4/arch/x86/lib/copy_user_64.S 2011-12-02 17:38:47.000000000 -0500
18606@@ -16,6 +16,7 @@
18607 #include <asm/thread_info.h>
18608 #include <asm/cpufeature.h>
18609 #include <asm/alternative-asm.h>
18610+#include <asm/pgtable.h>
18611
18612 /*
18613 * By placing feature2 after feature1 in altinstructions section, we logically
18614@@ -29,7 +30,7 @@
18615 .byte 0xe9 /* 32bit jump */
18616 .long \orig-1f /* by default jump to orig */
18617 1:
18618- .section .altinstr_replacement,"ax"
18619+ .section .altinstr_replacement,"a"
18620 2: .byte 0xe9 /* near jump with 32bit immediate */
18621 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
18622 3: .byte 0xe9 /* near jump with 32bit immediate */
18623@@ -71,47 +72,20 @@
18624 #endif
18625 .endm
18626
18627-/* Standard copy_to_user with segment limit checking */
18628-ENTRY(_copy_to_user)
18629- CFI_STARTPROC
18630- GET_THREAD_INFO(%rax)
18631- movq %rdi,%rcx
18632- addq %rdx,%rcx
18633- jc bad_to_user
18634- cmpq TI_addr_limit(%rax),%rcx
18635- ja bad_to_user
18636- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
18637- copy_user_generic_unrolled,copy_user_generic_string, \
18638- copy_user_enhanced_fast_string
18639- CFI_ENDPROC
18640-ENDPROC(_copy_to_user)
18641-
18642-/* Standard copy_from_user with segment limit checking */
18643-ENTRY(_copy_from_user)
18644- CFI_STARTPROC
18645- GET_THREAD_INFO(%rax)
18646- movq %rsi,%rcx
18647- addq %rdx,%rcx
18648- jc bad_from_user
18649- cmpq TI_addr_limit(%rax),%rcx
18650- ja bad_from_user
18651- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
18652- copy_user_generic_unrolled,copy_user_generic_string, \
18653- copy_user_enhanced_fast_string
18654- CFI_ENDPROC
18655-ENDPROC(_copy_from_user)
18656-
18657 .section .fixup,"ax"
18658 /* must zero dest */
18659 ENTRY(bad_from_user)
18660 bad_from_user:
18661 CFI_STARTPROC
18662+ testl %edx,%edx
18663+ js bad_to_user
18664 movl %edx,%ecx
18665 xorl %eax,%eax
18666 rep
18667 stosb
18668 bad_to_user:
18669 movl %edx,%eax
18670+ pax_force_retaddr
18671 ret
18672 CFI_ENDPROC
18673 ENDPROC(bad_from_user)
18674@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
18675 jz 17f
18676 1: movq (%rsi),%r8
18677 2: movq 1*8(%rsi),%r9
18678-3: movq 2*8(%rsi),%r10
18679+3: movq 2*8(%rsi),%rax
18680 4: movq 3*8(%rsi),%r11
18681 5: movq %r8,(%rdi)
18682 6: movq %r9,1*8(%rdi)
18683-7: movq %r10,2*8(%rdi)
18684+7: movq %rax,2*8(%rdi)
18685 8: movq %r11,3*8(%rdi)
18686 9: movq 4*8(%rsi),%r8
18687 10: movq 5*8(%rsi),%r9
18688-11: movq 6*8(%rsi),%r10
18689+11: movq 6*8(%rsi),%rax
18690 12: movq 7*8(%rsi),%r11
18691 13: movq %r8,4*8(%rdi)
18692 14: movq %r9,5*8(%rdi)
18693-15: movq %r10,6*8(%rdi)
18694+15: movq %rax,6*8(%rdi)
18695 16: movq %r11,7*8(%rdi)
18696 leaq 64(%rsi),%rsi
18697 leaq 64(%rdi),%rdi
18698@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
18699 decl %ecx
18700 jnz 21b
18701 23: xor %eax,%eax
18702+ pax_force_retaddr
18703 ret
18704
18705 .section .fixup,"ax"
18706@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
18707 3: rep
18708 movsb
18709 4: xorl %eax,%eax
18710+ pax_force_retaddr
18711 ret
18712
18713 .section .fixup,"ax"
18714@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
18715 1: rep
18716 movsb
18717 2: xorl %eax,%eax
18718+ pax_force_retaddr
18719 ret
18720
18721 .section .fixup,"ax"
18722diff -urNp linux-3.1.4/arch/x86/lib/copy_user_nocache_64.S linux-3.1.4/arch/x86/lib/copy_user_nocache_64.S
18723--- linux-3.1.4/arch/x86/lib/copy_user_nocache_64.S 2011-11-11 15:19:27.000000000 -0500
18724+++ linux-3.1.4/arch/x86/lib/copy_user_nocache_64.S 2011-12-02 17:38:47.000000000 -0500
18725@@ -8,12 +8,14 @@
18726
18727 #include <linux/linkage.h>
18728 #include <asm/dwarf2.h>
18729+#include <asm/alternative-asm.h>
18730
18731 #define FIX_ALIGNMENT 1
18732
18733 #include <asm/current.h>
18734 #include <asm/asm-offsets.h>
18735 #include <asm/thread_info.h>
18736+#include <asm/pgtable.h>
18737
18738 .macro ALIGN_DESTINATION
18739 #ifdef FIX_ALIGNMENT
18740@@ -50,6 +52,15 @@
18741 */
18742 ENTRY(__copy_user_nocache)
18743 CFI_STARTPROC
18744+
18745+#ifdef CONFIG_PAX_MEMORY_UDEREF
18746+ mov $PAX_USER_SHADOW_BASE,%rcx
18747+ cmp %rcx,%rsi
18748+ jae 1f
18749+ add %rcx,%rsi
18750+1:
18751+#endif
18752+
18753 cmpl $8,%edx
18754 jb 20f /* less then 8 bytes, go to byte copy loop */
18755 ALIGN_DESTINATION
18756@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
18757 jz 17f
18758 1: movq (%rsi),%r8
18759 2: movq 1*8(%rsi),%r9
18760-3: movq 2*8(%rsi),%r10
18761+3: movq 2*8(%rsi),%rax
18762 4: movq 3*8(%rsi),%r11
18763 5: movnti %r8,(%rdi)
18764 6: movnti %r9,1*8(%rdi)
18765-7: movnti %r10,2*8(%rdi)
18766+7: movnti %rax,2*8(%rdi)
18767 8: movnti %r11,3*8(%rdi)
18768 9: movq 4*8(%rsi),%r8
18769 10: movq 5*8(%rsi),%r9
18770-11: movq 6*8(%rsi),%r10
18771+11: movq 6*8(%rsi),%rax
18772 12: movq 7*8(%rsi),%r11
18773 13: movnti %r8,4*8(%rdi)
18774 14: movnti %r9,5*8(%rdi)
18775-15: movnti %r10,6*8(%rdi)
18776+15: movnti %rax,6*8(%rdi)
18777 16: movnti %r11,7*8(%rdi)
18778 leaq 64(%rsi),%rsi
18779 leaq 64(%rdi),%rdi
18780@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
18781 jnz 21b
18782 23: xorl %eax,%eax
18783 sfence
18784+ pax_force_retaddr
18785 ret
18786
18787 .section .fixup,"ax"
18788diff -urNp linux-3.1.4/arch/x86/lib/csum-copy_64.S linux-3.1.4/arch/x86/lib/csum-copy_64.S
18789--- linux-3.1.4/arch/x86/lib/csum-copy_64.S 2011-11-11 15:19:27.000000000 -0500
18790+++ linux-3.1.4/arch/x86/lib/csum-copy_64.S 2011-12-02 17:38:47.000000000 -0500
18791@@ -8,6 +8,7 @@
18792 #include <linux/linkage.h>
18793 #include <asm/dwarf2.h>
18794 #include <asm/errno.h>
18795+#include <asm/alternative-asm.h>
18796
18797 /*
18798 * Checksum copy with exception handling.
18799@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
18800 CFI_RESTORE rbp
18801 addq $7*8, %rsp
18802 CFI_ADJUST_CFA_OFFSET -7*8
18803+ pax_force_retaddr 0, 1
18804 ret
18805 CFI_RESTORE_STATE
18806
18807diff -urNp linux-3.1.4/arch/x86/lib/csum-wrappers_64.c linux-3.1.4/arch/x86/lib/csum-wrappers_64.c
18808--- linux-3.1.4/arch/x86/lib/csum-wrappers_64.c 2011-11-11 15:19:27.000000000 -0500
18809+++ linux-3.1.4/arch/x86/lib/csum-wrappers_64.c 2011-11-16 18:39:07.000000000 -0500
18810@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void _
18811 len -= 2;
18812 }
18813 }
18814- isum = csum_partial_copy_generic((__force const void *)src,
18815+
18816+#ifdef CONFIG_PAX_MEMORY_UDEREF
18817+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18818+ src += PAX_USER_SHADOW_BASE;
18819+#endif
18820+
18821+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
18822 dst, len, isum, errp, NULL);
18823 if (unlikely(*errp))
18824 goto out_err;
18825@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *sr
18826 }
18827
18828 *errp = 0;
18829- return csum_partial_copy_generic(src, (void __force *)dst,
18830+
18831+#ifdef CONFIG_PAX_MEMORY_UDEREF
18832+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
18833+ dst += PAX_USER_SHADOW_BASE;
18834+#endif
18835+
18836+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
18837 len, isum, NULL, errp);
18838 }
18839 EXPORT_SYMBOL(csum_partial_copy_to_user);
18840diff -urNp linux-3.1.4/arch/x86/lib/getuser.S linux-3.1.4/arch/x86/lib/getuser.S
18841--- linux-3.1.4/arch/x86/lib/getuser.S 2011-11-11 15:19:27.000000000 -0500
18842+++ linux-3.1.4/arch/x86/lib/getuser.S 2011-11-16 18:39:07.000000000 -0500
18843@@ -33,15 +33,38 @@
18844 #include <asm/asm-offsets.h>
18845 #include <asm/thread_info.h>
18846 #include <asm/asm.h>
18847+#include <asm/segment.h>
18848+#include <asm/pgtable.h>
18849+#include <asm/alternative-asm.h>
18850+
18851+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18852+#define __copyuser_seg gs;
18853+#else
18854+#define __copyuser_seg
18855+#endif
18856
18857 .text
18858 ENTRY(__get_user_1)
18859 CFI_STARTPROC
18860+
18861+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18862 GET_THREAD_INFO(%_ASM_DX)
18863 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18864 jae bad_get_user
18865-1: movzb (%_ASM_AX),%edx
18866+
18867+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18868+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18869+ cmp %_ASM_DX,%_ASM_AX
18870+ jae 1234f
18871+ add %_ASM_DX,%_ASM_AX
18872+1234:
18873+#endif
18874+
18875+#endif
18876+
18877+1: __copyuser_seg movzb (%_ASM_AX),%edx
18878 xor %eax,%eax
18879+ pax_force_retaddr
18880 ret
18881 CFI_ENDPROC
18882 ENDPROC(__get_user_1)
18883@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
18884 ENTRY(__get_user_2)
18885 CFI_STARTPROC
18886 add $1,%_ASM_AX
18887+
18888+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18889 jc bad_get_user
18890 GET_THREAD_INFO(%_ASM_DX)
18891 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18892 jae bad_get_user
18893-2: movzwl -1(%_ASM_AX),%edx
18894+
18895+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18896+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18897+ cmp %_ASM_DX,%_ASM_AX
18898+ jae 1234f
18899+ add %_ASM_DX,%_ASM_AX
18900+1234:
18901+#endif
18902+
18903+#endif
18904+
18905+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
18906 xor %eax,%eax
18907+ pax_force_retaddr
18908 ret
18909 CFI_ENDPROC
18910 ENDPROC(__get_user_2)
18911@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
18912 ENTRY(__get_user_4)
18913 CFI_STARTPROC
18914 add $3,%_ASM_AX
18915+
18916+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18917 jc bad_get_user
18918 GET_THREAD_INFO(%_ASM_DX)
18919 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18920 jae bad_get_user
18921-3: mov -3(%_ASM_AX),%edx
18922+
18923+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18924+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18925+ cmp %_ASM_DX,%_ASM_AX
18926+ jae 1234f
18927+ add %_ASM_DX,%_ASM_AX
18928+1234:
18929+#endif
18930+
18931+#endif
18932+
18933+3: __copyuser_seg mov -3(%_ASM_AX),%edx
18934 xor %eax,%eax
18935+ pax_force_retaddr
18936 ret
18937 CFI_ENDPROC
18938 ENDPROC(__get_user_4)
18939@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
18940 GET_THREAD_INFO(%_ASM_DX)
18941 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18942 jae bad_get_user
18943+
18944+#ifdef CONFIG_PAX_MEMORY_UDEREF
18945+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18946+ cmp %_ASM_DX,%_ASM_AX
18947+ jae 1234f
18948+ add %_ASM_DX,%_ASM_AX
18949+1234:
18950+#endif
18951+
18952 4: movq -7(%_ASM_AX),%_ASM_DX
18953 xor %eax,%eax
18954+ pax_force_retaddr
18955 ret
18956 CFI_ENDPROC
18957 ENDPROC(__get_user_8)
18958@@ -91,6 +152,7 @@ bad_get_user:
18959 CFI_STARTPROC
18960 xor %edx,%edx
18961 mov $(-EFAULT),%_ASM_AX
18962+ pax_force_retaddr
18963 ret
18964 CFI_ENDPROC
18965 END(bad_get_user)
18966diff -urNp linux-3.1.4/arch/x86/lib/insn.c linux-3.1.4/arch/x86/lib/insn.c
18967--- linux-3.1.4/arch/x86/lib/insn.c 2011-11-11 15:19:27.000000000 -0500
18968+++ linux-3.1.4/arch/x86/lib/insn.c 2011-11-16 18:39:07.000000000 -0500
18969@@ -21,6 +21,11 @@
18970 #include <linux/string.h>
18971 #include <asm/inat.h>
18972 #include <asm/insn.h>
18973+#ifdef __KERNEL__
18974+#include <asm/pgtable_types.h>
18975+#else
18976+#define ktla_ktva(addr) addr
18977+#endif
18978
18979 #define get_next(t, insn) \
18980 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
18981@@ -40,8 +45,8 @@
18982 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
18983 {
18984 memset(insn, 0, sizeof(*insn));
18985- insn->kaddr = kaddr;
18986- insn->next_byte = kaddr;
18987+ insn->kaddr = ktla_ktva(kaddr);
18988+ insn->next_byte = ktla_ktva(kaddr);
18989 insn->x86_64 = x86_64 ? 1 : 0;
18990 insn->opnd_bytes = 4;
18991 if (x86_64)
18992diff -urNp linux-3.1.4/arch/x86/lib/iomap_copy_64.S linux-3.1.4/arch/x86/lib/iomap_copy_64.S
18993--- linux-3.1.4/arch/x86/lib/iomap_copy_64.S 2011-11-11 15:19:27.000000000 -0500
18994+++ linux-3.1.4/arch/x86/lib/iomap_copy_64.S 2011-11-16 18:39:07.000000000 -0500
18995@@ -17,6 +17,7 @@
18996
18997 #include <linux/linkage.h>
18998 #include <asm/dwarf2.h>
18999+#include <asm/alternative-asm.h>
19000
19001 /*
19002 * override generic version in lib/iomap_copy.c
19003@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
19004 CFI_STARTPROC
19005 movl %edx,%ecx
19006 rep movsd
19007+ pax_force_retaddr
19008 ret
19009 CFI_ENDPROC
19010 ENDPROC(__iowrite32_copy)
19011diff -urNp linux-3.1.4/arch/x86/lib/memcpy_64.S linux-3.1.4/arch/x86/lib/memcpy_64.S
19012--- linux-3.1.4/arch/x86/lib/memcpy_64.S 2011-11-11 15:19:27.000000000 -0500
19013+++ linux-3.1.4/arch/x86/lib/memcpy_64.S 2011-12-02 17:38:47.000000000 -0500
19014@@ -34,6 +34,7 @@
19015 rep movsq
19016 movl %edx, %ecx
19017 rep movsb
19018+ pax_force_retaddr
19019 ret
19020 .Lmemcpy_e:
19021 .previous
19022@@ -51,6 +52,7 @@
19023
19024 movl %edx, %ecx
19025 rep movsb
19026+ pax_force_retaddr
19027 ret
19028 .Lmemcpy_e_e:
19029 .previous
19030@@ -81,13 +83,13 @@ ENTRY(memcpy)
19031 */
19032 movq 0*8(%rsi), %r8
19033 movq 1*8(%rsi), %r9
19034- movq 2*8(%rsi), %r10
19035+ movq 2*8(%rsi), %rcx
19036 movq 3*8(%rsi), %r11
19037 leaq 4*8(%rsi), %rsi
19038
19039 movq %r8, 0*8(%rdi)
19040 movq %r9, 1*8(%rdi)
19041- movq %r10, 2*8(%rdi)
19042+ movq %rcx, 2*8(%rdi)
19043 movq %r11, 3*8(%rdi)
19044 leaq 4*8(%rdi), %rdi
19045 jae .Lcopy_forward_loop
19046@@ -110,12 +112,12 @@ ENTRY(memcpy)
19047 subq $0x20, %rdx
19048 movq -1*8(%rsi), %r8
19049 movq -2*8(%rsi), %r9
19050- movq -3*8(%rsi), %r10
19051+ movq -3*8(%rsi), %rcx
19052 movq -4*8(%rsi), %r11
19053 leaq -4*8(%rsi), %rsi
19054 movq %r8, -1*8(%rdi)
19055 movq %r9, -2*8(%rdi)
19056- movq %r10, -3*8(%rdi)
19057+ movq %rcx, -3*8(%rdi)
19058 movq %r11, -4*8(%rdi)
19059 leaq -4*8(%rdi), %rdi
19060 jae .Lcopy_backward_loop
19061@@ -135,12 +137,13 @@ ENTRY(memcpy)
19062 */
19063 movq 0*8(%rsi), %r8
19064 movq 1*8(%rsi), %r9
19065- movq -2*8(%rsi, %rdx), %r10
19066+ movq -2*8(%rsi, %rdx), %rcx
19067 movq -1*8(%rsi, %rdx), %r11
19068 movq %r8, 0*8(%rdi)
19069 movq %r9, 1*8(%rdi)
19070- movq %r10, -2*8(%rdi, %rdx)
19071+ movq %rcx, -2*8(%rdi, %rdx)
19072 movq %r11, -1*8(%rdi, %rdx)
19073+ pax_force_retaddr
19074 retq
19075 .p2align 4
19076 .Lless_16bytes:
19077@@ -153,6 +156,7 @@ ENTRY(memcpy)
19078 movq -1*8(%rsi, %rdx), %r9
19079 movq %r8, 0*8(%rdi)
19080 movq %r9, -1*8(%rdi, %rdx)
19081+ pax_force_retaddr
19082 retq
19083 .p2align 4
19084 .Lless_8bytes:
19085@@ -166,6 +170,7 @@ ENTRY(memcpy)
19086 movl -4(%rsi, %rdx), %r8d
19087 movl %ecx, (%rdi)
19088 movl %r8d, -4(%rdi, %rdx)
19089+ pax_force_retaddr
19090 retq
19091 .p2align 4
19092 .Lless_3bytes:
19093@@ -183,6 +188,7 @@ ENTRY(memcpy)
19094 jnz .Lloop_1
19095
19096 .Lend:
19097+ pax_force_retaddr
19098 retq
19099 CFI_ENDPROC
19100 ENDPROC(memcpy)
19101diff -urNp linux-3.1.4/arch/x86/lib/memmove_64.S linux-3.1.4/arch/x86/lib/memmove_64.S
19102--- linux-3.1.4/arch/x86/lib/memmove_64.S 2011-11-11 15:19:27.000000000 -0500
19103+++ linux-3.1.4/arch/x86/lib/memmove_64.S 2011-12-02 17:38:47.000000000 -0500
19104@@ -61,13 +61,13 @@ ENTRY(memmove)
19105 5:
19106 sub $0x20, %rdx
19107 movq 0*8(%rsi), %r11
19108- movq 1*8(%rsi), %r10
19109+ movq 1*8(%rsi), %rcx
19110 movq 2*8(%rsi), %r9
19111 movq 3*8(%rsi), %r8
19112 leaq 4*8(%rsi), %rsi
19113
19114 movq %r11, 0*8(%rdi)
19115- movq %r10, 1*8(%rdi)
19116+ movq %rcx, 1*8(%rdi)
19117 movq %r9, 2*8(%rdi)
19118 movq %r8, 3*8(%rdi)
19119 leaq 4*8(%rdi), %rdi
19120@@ -81,10 +81,10 @@ ENTRY(memmove)
19121 4:
19122 movq %rdx, %rcx
19123 movq -8(%rsi, %rdx), %r11
19124- lea -8(%rdi, %rdx), %r10
19125+ lea -8(%rdi, %rdx), %r9
19126 shrq $3, %rcx
19127 rep movsq
19128- movq %r11, (%r10)
19129+ movq %r11, (%r9)
19130 jmp 13f
19131 .Lmemmove_end_forward:
19132
19133@@ -95,14 +95,14 @@ ENTRY(memmove)
19134 7:
19135 movq %rdx, %rcx
19136 movq (%rsi), %r11
19137- movq %rdi, %r10
19138+ movq %rdi, %r9
19139 leaq -8(%rsi, %rdx), %rsi
19140 leaq -8(%rdi, %rdx), %rdi
19141 shrq $3, %rcx
19142 std
19143 rep movsq
19144 cld
19145- movq %r11, (%r10)
19146+ movq %r11, (%r9)
19147 jmp 13f
19148
19149 /*
19150@@ -127,13 +127,13 @@ ENTRY(memmove)
19151 8:
19152 subq $0x20, %rdx
19153 movq -1*8(%rsi), %r11
19154- movq -2*8(%rsi), %r10
19155+ movq -2*8(%rsi), %rcx
19156 movq -3*8(%rsi), %r9
19157 movq -4*8(%rsi), %r8
19158 leaq -4*8(%rsi), %rsi
19159
19160 movq %r11, -1*8(%rdi)
19161- movq %r10, -2*8(%rdi)
19162+ movq %rcx, -2*8(%rdi)
19163 movq %r9, -3*8(%rdi)
19164 movq %r8, -4*8(%rdi)
19165 leaq -4*8(%rdi), %rdi
19166@@ -151,11 +151,11 @@ ENTRY(memmove)
19167 * Move data from 16 bytes to 31 bytes.
19168 */
19169 movq 0*8(%rsi), %r11
19170- movq 1*8(%rsi), %r10
19171+ movq 1*8(%rsi), %rcx
19172 movq -2*8(%rsi, %rdx), %r9
19173 movq -1*8(%rsi, %rdx), %r8
19174 movq %r11, 0*8(%rdi)
19175- movq %r10, 1*8(%rdi)
19176+ movq %rcx, 1*8(%rdi)
19177 movq %r9, -2*8(%rdi, %rdx)
19178 movq %r8, -1*8(%rdi, %rdx)
19179 jmp 13f
19180@@ -167,9 +167,9 @@ ENTRY(memmove)
19181 * Move data from 8 bytes to 15 bytes.
19182 */
19183 movq 0*8(%rsi), %r11
19184- movq -1*8(%rsi, %rdx), %r10
19185+ movq -1*8(%rsi, %rdx), %r9
19186 movq %r11, 0*8(%rdi)
19187- movq %r10, -1*8(%rdi, %rdx)
19188+ movq %r9, -1*8(%rdi, %rdx)
19189 jmp 13f
19190 10:
19191 cmpq $4, %rdx
19192@@ -178,9 +178,9 @@ ENTRY(memmove)
19193 * Move data from 4 bytes to 7 bytes.
19194 */
19195 movl (%rsi), %r11d
19196- movl -4(%rsi, %rdx), %r10d
19197+ movl -4(%rsi, %rdx), %r9d
19198 movl %r11d, (%rdi)
19199- movl %r10d, -4(%rdi, %rdx)
19200+ movl %r9d, -4(%rdi, %rdx)
19201 jmp 13f
19202 11:
19203 cmp $2, %rdx
19204@@ -189,9 +189,9 @@ ENTRY(memmove)
19205 * Move data from 2 bytes to 3 bytes.
19206 */
19207 movw (%rsi), %r11w
19208- movw -2(%rsi, %rdx), %r10w
19209+ movw -2(%rsi, %rdx), %r9w
19210 movw %r11w, (%rdi)
19211- movw %r10w, -2(%rdi, %rdx)
19212+ movw %r9w, -2(%rdi, %rdx)
19213 jmp 13f
19214 12:
19215 cmp $1, %rdx
19216@@ -202,6 +202,7 @@ ENTRY(memmove)
19217 movb (%rsi), %r11b
19218 movb %r11b, (%rdi)
19219 13:
19220+ pax_force_retaddr
19221 retq
19222 CFI_ENDPROC
19223
19224@@ -210,6 +211,7 @@ ENTRY(memmove)
19225 /* Forward moving data. */
19226 movq %rdx, %rcx
19227 rep movsb
19228+ pax_force_retaddr
19229 retq
19230 .Lmemmove_end_forward_efs:
19231 .previous
19232diff -urNp linux-3.1.4/arch/x86/lib/memset_64.S linux-3.1.4/arch/x86/lib/memset_64.S
19233--- linux-3.1.4/arch/x86/lib/memset_64.S 2011-11-11 15:19:27.000000000 -0500
19234+++ linux-3.1.4/arch/x86/lib/memset_64.S 2011-12-02 17:38:47.000000000 -0500
19235@@ -31,6 +31,7 @@
19236 movl %r8d,%ecx
19237 rep stosb
19238 movq %r9,%rax
19239+ pax_force_retaddr
19240 ret
19241 .Lmemset_e:
19242 .previous
19243@@ -53,6 +54,7 @@
19244 movl %edx,%ecx
19245 rep stosb
19246 movq %r9,%rax
19247+ pax_force_retaddr
19248 ret
19249 .Lmemset_e_e:
19250 .previous
19251@@ -60,13 +62,13 @@
19252 ENTRY(memset)
19253 ENTRY(__memset)
19254 CFI_STARTPROC
19255- movq %rdi,%r10
19256 movq %rdx,%r11
19257
19258 /* expand byte value */
19259 movzbl %sil,%ecx
19260 movabs $0x0101010101010101,%rax
19261 mul %rcx /* with rax, clobbers rdx */
19262+ movq %rdi,%rdx
19263
19264 /* align dst */
19265 movl %edi,%r9d
19266@@ -120,7 +122,8 @@ ENTRY(__memset)
19267 jnz .Lloop_1
19268
19269 .Lende:
19270- movq %r10,%rax
19271+ movq %rdx,%rax
19272+ pax_force_retaddr
19273 ret
19274
19275 CFI_RESTORE_STATE
19276diff -urNp linux-3.1.4/arch/x86/lib/mmx_32.c linux-3.1.4/arch/x86/lib/mmx_32.c
19277--- linux-3.1.4/arch/x86/lib/mmx_32.c 2011-11-11 15:19:27.000000000 -0500
19278+++ linux-3.1.4/arch/x86/lib/mmx_32.c 2011-11-16 18:39:07.000000000 -0500
19279@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
19280 {
19281 void *p;
19282 int i;
19283+ unsigned long cr0;
19284
19285 if (unlikely(in_interrupt()))
19286 return __memcpy(to, from, len);
19287@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
19288 kernel_fpu_begin();
19289
19290 __asm__ __volatile__ (
19291- "1: prefetch (%0)\n" /* This set is 28 bytes */
19292- " prefetch 64(%0)\n"
19293- " prefetch 128(%0)\n"
19294- " prefetch 192(%0)\n"
19295- " prefetch 256(%0)\n"
19296+ "1: prefetch (%1)\n" /* This set is 28 bytes */
19297+ " prefetch 64(%1)\n"
19298+ " prefetch 128(%1)\n"
19299+ " prefetch 192(%1)\n"
19300+ " prefetch 256(%1)\n"
19301 "2: \n"
19302 ".section .fixup, \"ax\"\n"
19303- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19304+ "3: \n"
19305+
19306+#ifdef CONFIG_PAX_KERNEXEC
19307+ " movl %%cr0, %0\n"
19308+ " movl %0, %%eax\n"
19309+ " andl $0xFFFEFFFF, %%eax\n"
19310+ " movl %%eax, %%cr0\n"
19311+#endif
19312+
19313+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19314+
19315+#ifdef CONFIG_PAX_KERNEXEC
19316+ " movl %0, %%cr0\n"
19317+#endif
19318+
19319 " jmp 2b\n"
19320 ".previous\n"
19321 _ASM_EXTABLE(1b, 3b)
19322- : : "r" (from));
19323+ : "=&r" (cr0) : "r" (from) : "ax");
19324
19325 for ( ; i > 5; i--) {
19326 __asm__ __volatile__ (
19327- "1: prefetch 320(%0)\n"
19328- "2: movq (%0), %%mm0\n"
19329- " movq 8(%0), %%mm1\n"
19330- " movq 16(%0), %%mm2\n"
19331- " movq 24(%0), %%mm3\n"
19332- " movq %%mm0, (%1)\n"
19333- " movq %%mm1, 8(%1)\n"
19334- " movq %%mm2, 16(%1)\n"
19335- " movq %%mm3, 24(%1)\n"
19336- " movq 32(%0), %%mm0\n"
19337- " movq 40(%0), %%mm1\n"
19338- " movq 48(%0), %%mm2\n"
19339- " movq 56(%0), %%mm3\n"
19340- " movq %%mm0, 32(%1)\n"
19341- " movq %%mm1, 40(%1)\n"
19342- " movq %%mm2, 48(%1)\n"
19343- " movq %%mm3, 56(%1)\n"
19344+ "1: prefetch 320(%1)\n"
19345+ "2: movq (%1), %%mm0\n"
19346+ " movq 8(%1), %%mm1\n"
19347+ " movq 16(%1), %%mm2\n"
19348+ " movq 24(%1), %%mm3\n"
19349+ " movq %%mm0, (%2)\n"
19350+ " movq %%mm1, 8(%2)\n"
19351+ " movq %%mm2, 16(%2)\n"
19352+ " movq %%mm3, 24(%2)\n"
19353+ " movq 32(%1), %%mm0\n"
19354+ " movq 40(%1), %%mm1\n"
19355+ " movq 48(%1), %%mm2\n"
19356+ " movq 56(%1), %%mm3\n"
19357+ " movq %%mm0, 32(%2)\n"
19358+ " movq %%mm1, 40(%2)\n"
19359+ " movq %%mm2, 48(%2)\n"
19360+ " movq %%mm3, 56(%2)\n"
19361 ".section .fixup, \"ax\"\n"
19362- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19363+ "3:\n"
19364+
19365+#ifdef CONFIG_PAX_KERNEXEC
19366+ " movl %%cr0, %0\n"
19367+ " movl %0, %%eax\n"
19368+ " andl $0xFFFEFFFF, %%eax\n"
19369+ " movl %%eax, %%cr0\n"
19370+#endif
19371+
19372+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19373+
19374+#ifdef CONFIG_PAX_KERNEXEC
19375+ " movl %0, %%cr0\n"
19376+#endif
19377+
19378 " jmp 2b\n"
19379 ".previous\n"
19380 _ASM_EXTABLE(1b, 3b)
19381- : : "r" (from), "r" (to) : "memory");
19382+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19383
19384 from += 64;
19385 to += 64;
19386@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
19387 static void fast_copy_page(void *to, void *from)
19388 {
19389 int i;
19390+ unsigned long cr0;
19391
19392 kernel_fpu_begin();
19393
19394@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
19395 * but that is for later. -AV
19396 */
19397 __asm__ __volatile__(
19398- "1: prefetch (%0)\n"
19399- " prefetch 64(%0)\n"
19400- " prefetch 128(%0)\n"
19401- " prefetch 192(%0)\n"
19402- " prefetch 256(%0)\n"
19403+ "1: prefetch (%1)\n"
19404+ " prefetch 64(%1)\n"
19405+ " prefetch 128(%1)\n"
19406+ " prefetch 192(%1)\n"
19407+ " prefetch 256(%1)\n"
19408 "2: \n"
19409 ".section .fixup, \"ax\"\n"
19410- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19411+ "3: \n"
19412+
19413+#ifdef CONFIG_PAX_KERNEXEC
19414+ " movl %%cr0, %0\n"
19415+ " movl %0, %%eax\n"
19416+ " andl $0xFFFEFFFF, %%eax\n"
19417+ " movl %%eax, %%cr0\n"
19418+#endif
19419+
19420+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19421+
19422+#ifdef CONFIG_PAX_KERNEXEC
19423+ " movl %0, %%cr0\n"
19424+#endif
19425+
19426 " jmp 2b\n"
19427 ".previous\n"
19428- _ASM_EXTABLE(1b, 3b) : : "r" (from));
19429+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19430
19431 for (i = 0; i < (4096-320)/64; i++) {
19432 __asm__ __volatile__ (
19433- "1: prefetch 320(%0)\n"
19434- "2: movq (%0), %%mm0\n"
19435- " movntq %%mm0, (%1)\n"
19436- " movq 8(%0), %%mm1\n"
19437- " movntq %%mm1, 8(%1)\n"
19438- " movq 16(%0), %%mm2\n"
19439- " movntq %%mm2, 16(%1)\n"
19440- " movq 24(%0), %%mm3\n"
19441- " movntq %%mm3, 24(%1)\n"
19442- " movq 32(%0), %%mm4\n"
19443- " movntq %%mm4, 32(%1)\n"
19444- " movq 40(%0), %%mm5\n"
19445- " movntq %%mm5, 40(%1)\n"
19446- " movq 48(%0), %%mm6\n"
19447- " movntq %%mm6, 48(%1)\n"
19448- " movq 56(%0), %%mm7\n"
19449- " movntq %%mm7, 56(%1)\n"
19450+ "1: prefetch 320(%1)\n"
19451+ "2: movq (%1), %%mm0\n"
19452+ " movntq %%mm0, (%2)\n"
19453+ " movq 8(%1), %%mm1\n"
19454+ " movntq %%mm1, 8(%2)\n"
19455+ " movq 16(%1), %%mm2\n"
19456+ " movntq %%mm2, 16(%2)\n"
19457+ " movq 24(%1), %%mm3\n"
19458+ " movntq %%mm3, 24(%2)\n"
19459+ " movq 32(%1), %%mm4\n"
19460+ " movntq %%mm4, 32(%2)\n"
19461+ " movq 40(%1), %%mm5\n"
19462+ " movntq %%mm5, 40(%2)\n"
19463+ " movq 48(%1), %%mm6\n"
19464+ " movntq %%mm6, 48(%2)\n"
19465+ " movq 56(%1), %%mm7\n"
19466+ " movntq %%mm7, 56(%2)\n"
19467 ".section .fixup, \"ax\"\n"
19468- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19469+ "3:\n"
19470+
19471+#ifdef CONFIG_PAX_KERNEXEC
19472+ " movl %%cr0, %0\n"
19473+ " movl %0, %%eax\n"
19474+ " andl $0xFFFEFFFF, %%eax\n"
19475+ " movl %%eax, %%cr0\n"
19476+#endif
19477+
19478+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19479+
19480+#ifdef CONFIG_PAX_KERNEXEC
19481+ " movl %0, %%cr0\n"
19482+#endif
19483+
19484 " jmp 2b\n"
19485 ".previous\n"
19486- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
19487+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19488
19489 from += 64;
19490 to += 64;
19491@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
19492 static void fast_copy_page(void *to, void *from)
19493 {
19494 int i;
19495+ unsigned long cr0;
19496
19497 kernel_fpu_begin();
19498
19499 __asm__ __volatile__ (
19500- "1: prefetch (%0)\n"
19501- " prefetch 64(%0)\n"
19502- " prefetch 128(%0)\n"
19503- " prefetch 192(%0)\n"
19504- " prefetch 256(%0)\n"
19505+ "1: prefetch (%1)\n"
19506+ " prefetch 64(%1)\n"
19507+ " prefetch 128(%1)\n"
19508+ " prefetch 192(%1)\n"
19509+ " prefetch 256(%1)\n"
19510 "2: \n"
19511 ".section .fixup, \"ax\"\n"
19512- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19513+ "3: \n"
19514+
19515+#ifdef CONFIG_PAX_KERNEXEC
19516+ " movl %%cr0, %0\n"
19517+ " movl %0, %%eax\n"
19518+ " andl $0xFFFEFFFF, %%eax\n"
19519+ " movl %%eax, %%cr0\n"
19520+#endif
19521+
19522+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19523+
19524+#ifdef CONFIG_PAX_KERNEXEC
19525+ " movl %0, %%cr0\n"
19526+#endif
19527+
19528 " jmp 2b\n"
19529 ".previous\n"
19530- _ASM_EXTABLE(1b, 3b) : : "r" (from));
19531+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19532
19533 for (i = 0; i < 4096/64; i++) {
19534 __asm__ __volatile__ (
19535- "1: prefetch 320(%0)\n"
19536- "2: movq (%0), %%mm0\n"
19537- " movq 8(%0), %%mm1\n"
19538- " movq 16(%0), %%mm2\n"
19539- " movq 24(%0), %%mm3\n"
19540- " movq %%mm0, (%1)\n"
19541- " movq %%mm1, 8(%1)\n"
19542- " movq %%mm2, 16(%1)\n"
19543- " movq %%mm3, 24(%1)\n"
19544- " movq 32(%0), %%mm0\n"
19545- " movq 40(%0), %%mm1\n"
19546- " movq 48(%0), %%mm2\n"
19547- " movq 56(%0), %%mm3\n"
19548- " movq %%mm0, 32(%1)\n"
19549- " movq %%mm1, 40(%1)\n"
19550- " movq %%mm2, 48(%1)\n"
19551- " movq %%mm3, 56(%1)\n"
19552+ "1: prefetch 320(%1)\n"
19553+ "2: movq (%1), %%mm0\n"
19554+ " movq 8(%1), %%mm1\n"
19555+ " movq 16(%1), %%mm2\n"
19556+ " movq 24(%1), %%mm3\n"
19557+ " movq %%mm0, (%2)\n"
19558+ " movq %%mm1, 8(%2)\n"
19559+ " movq %%mm2, 16(%2)\n"
19560+ " movq %%mm3, 24(%2)\n"
19561+ " movq 32(%1), %%mm0\n"
19562+ " movq 40(%1), %%mm1\n"
19563+ " movq 48(%1), %%mm2\n"
19564+ " movq 56(%1), %%mm3\n"
19565+ " movq %%mm0, 32(%2)\n"
19566+ " movq %%mm1, 40(%2)\n"
19567+ " movq %%mm2, 48(%2)\n"
19568+ " movq %%mm3, 56(%2)\n"
19569 ".section .fixup, \"ax\"\n"
19570- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19571+ "3:\n"
19572+
19573+#ifdef CONFIG_PAX_KERNEXEC
19574+ " movl %%cr0, %0\n"
19575+ " movl %0, %%eax\n"
19576+ " andl $0xFFFEFFFF, %%eax\n"
19577+ " movl %%eax, %%cr0\n"
19578+#endif
19579+
19580+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19581+
19582+#ifdef CONFIG_PAX_KERNEXEC
19583+ " movl %0, %%cr0\n"
19584+#endif
19585+
19586 " jmp 2b\n"
19587 ".previous\n"
19588 _ASM_EXTABLE(1b, 3b)
19589- : : "r" (from), "r" (to) : "memory");
19590+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19591
19592 from += 64;
19593 to += 64;
19594diff -urNp linux-3.1.4/arch/x86/lib/msr-reg.S linux-3.1.4/arch/x86/lib/msr-reg.S
19595--- linux-3.1.4/arch/x86/lib/msr-reg.S 2011-11-11 15:19:27.000000000 -0500
19596+++ linux-3.1.4/arch/x86/lib/msr-reg.S 2011-12-02 17:38:47.000000000 -0500
19597@@ -3,6 +3,7 @@
19598 #include <asm/dwarf2.h>
19599 #include <asm/asm.h>
19600 #include <asm/msr.h>
19601+#include <asm/alternative-asm.h>
19602
19603 #ifdef CONFIG_X86_64
19604 /*
19605@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
19606 CFI_STARTPROC
19607 pushq_cfi %rbx
19608 pushq_cfi %rbp
19609- movq %rdi, %r10 /* Save pointer */
19610+ movq %rdi, %r9 /* Save pointer */
19611 xorl %r11d, %r11d /* Return value */
19612 movl (%rdi), %eax
19613 movl 4(%rdi), %ecx
19614@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
19615 movl 28(%rdi), %edi
19616 CFI_REMEMBER_STATE
19617 1: \op
19618-2: movl %eax, (%r10)
19619+2: movl %eax, (%r9)
19620 movl %r11d, %eax /* Return value */
19621- movl %ecx, 4(%r10)
19622- movl %edx, 8(%r10)
19623- movl %ebx, 12(%r10)
19624- movl %ebp, 20(%r10)
19625- movl %esi, 24(%r10)
19626- movl %edi, 28(%r10)
19627+ movl %ecx, 4(%r9)
19628+ movl %edx, 8(%r9)
19629+ movl %ebx, 12(%r9)
19630+ movl %ebp, 20(%r9)
19631+ movl %esi, 24(%r9)
19632+ movl %edi, 28(%r9)
19633 popq_cfi %rbp
19634 popq_cfi %rbx
19635+ pax_force_retaddr
19636 ret
19637 3:
19638 CFI_RESTORE_STATE
19639diff -urNp linux-3.1.4/arch/x86/lib/putuser.S linux-3.1.4/arch/x86/lib/putuser.S
19640--- linux-3.1.4/arch/x86/lib/putuser.S 2011-11-11 15:19:27.000000000 -0500
19641+++ linux-3.1.4/arch/x86/lib/putuser.S 2011-11-16 18:39:07.000000000 -0500
19642@@ -15,7 +15,9 @@
19643 #include <asm/thread_info.h>
19644 #include <asm/errno.h>
19645 #include <asm/asm.h>
19646-
19647+#include <asm/segment.h>
19648+#include <asm/pgtable.h>
19649+#include <asm/alternative-asm.h>
19650
19651 /*
19652 * __put_user_X
19653@@ -29,52 +31,119 @@
19654 * as they get called from within inline assembly.
19655 */
19656
19657-#define ENTER CFI_STARTPROC ; \
19658- GET_THREAD_INFO(%_ASM_BX)
19659-#define EXIT ret ; \
19660+#define ENTER CFI_STARTPROC
19661+#define EXIT pax_force_retaddr; ret ; \
19662 CFI_ENDPROC
19663
19664+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19665+#define _DEST %_ASM_CX,%_ASM_BX
19666+#else
19667+#define _DEST %_ASM_CX
19668+#endif
19669+
19670+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19671+#define __copyuser_seg gs;
19672+#else
19673+#define __copyuser_seg
19674+#endif
19675+
19676 .text
19677 ENTRY(__put_user_1)
19678 ENTER
19679+
19680+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19681+ GET_THREAD_INFO(%_ASM_BX)
19682 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
19683 jae bad_put_user
19684-1: movb %al,(%_ASM_CX)
19685+
19686+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19687+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19688+ cmp %_ASM_BX,%_ASM_CX
19689+ jb 1234f
19690+ xor %ebx,%ebx
19691+1234:
19692+#endif
19693+
19694+#endif
19695+
19696+1: __copyuser_seg movb %al,(_DEST)
19697 xor %eax,%eax
19698 EXIT
19699 ENDPROC(__put_user_1)
19700
19701 ENTRY(__put_user_2)
19702 ENTER
19703+
19704+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19705+ GET_THREAD_INFO(%_ASM_BX)
19706 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19707 sub $1,%_ASM_BX
19708 cmp %_ASM_BX,%_ASM_CX
19709 jae bad_put_user
19710-2: movw %ax,(%_ASM_CX)
19711+
19712+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19713+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19714+ cmp %_ASM_BX,%_ASM_CX
19715+ jb 1234f
19716+ xor %ebx,%ebx
19717+1234:
19718+#endif
19719+
19720+#endif
19721+
19722+2: __copyuser_seg movw %ax,(_DEST)
19723 xor %eax,%eax
19724 EXIT
19725 ENDPROC(__put_user_2)
19726
19727 ENTRY(__put_user_4)
19728 ENTER
19729+
19730+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19731+ GET_THREAD_INFO(%_ASM_BX)
19732 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19733 sub $3,%_ASM_BX
19734 cmp %_ASM_BX,%_ASM_CX
19735 jae bad_put_user
19736-3: movl %eax,(%_ASM_CX)
19737+
19738+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19739+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19740+ cmp %_ASM_BX,%_ASM_CX
19741+ jb 1234f
19742+ xor %ebx,%ebx
19743+1234:
19744+#endif
19745+
19746+#endif
19747+
19748+3: __copyuser_seg movl %eax,(_DEST)
19749 xor %eax,%eax
19750 EXIT
19751 ENDPROC(__put_user_4)
19752
19753 ENTRY(__put_user_8)
19754 ENTER
19755+
19756+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19757+ GET_THREAD_INFO(%_ASM_BX)
19758 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19759 sub $7,%_ASM_BX
19760 cmp %_ASM_BX,%_ASM_CX
19761 jae bad_put_user
19762-4: mov %_ASM_AX,(%_ASM_CX)
19763+
19764+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19765+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19766+ cmp %_ASM_BX,%_ASM_CX
19767+ jb 1234f
19768+ xor %ebx,%ebx
19769+1234:
19770+#endif
19771+
19772+#endif
19773+
19774+4: __copyuser_seg mov %_ASM_AX,(_DEST)
19775 #ifdef CONFIG_X86_32
19776-5: movl %edx,4(%_ASM_CX)
19777+5: __copyuser_seg movl %edx,4(_DEST)
19778 #endif
19779 xor %eax,%eax
19780 EXIT
19781diff -urNp linux-3.1.4/arch/x86/lib/rwlock.S linux-3.1.4/arch/x86/lib/rwlock.S
19782--- linux-3.1.4/arch/x86/lib/rwlock.S 2011-11-11 15:19:27.000000000 -0500
19783+++ linux-3.1.4/arch/x86/lib/rwlock.S 2011-11-16 18:39:07.000000000 -0500
19784@@ -23,6 +23,7 @@ ENTRY(__write_lock_failed)
19785 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
19786 jnz 0b
19787 ENDFRAME
19788+ pax_force_retaddr
19789 ret
19790 CFI_ENDPROC
19791 END(__write_lock_failed)
19792@@ -39,6 +40,7 @@ ENTRY(__read_lock_failed)
19793 READ_LOCK_SIZE(dec) (%__lock_ptr)
19794 js 0b
19795 ENDFRAME
19796+ pax_force_retaddr
19797 ret
19798 CFI_ENDPROC
19799 END(__read_lock_failed)
19800diff -urNp linux-3.1.4/arch/x86/lib/rwsem.S linux-3.1.4/arch/x86/lib/rwsem.S
19801--- linux-3.1.4/arch/x86/lib/rwsem.S 2011-11-11 15:19:27.000000000 -0500
19802+++ linux-3.1.4/arch/x86/lib/rwsem.S 2011-11-16 18:39:07.000000000 -0500
19803@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
19804 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
19805 CFI_RESTORE __ASM_REG(dx)
19806 restore_common_regs
19807+ pax_force_retaddr
19808 ret
19809 CFI_ENDPROC
19810 ENDPROC(call_rwsem_down_read_failed)
19811@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
19812 movq %rax,%rdi
19813 call rwsem_down_write_failed
19814 restore_common_regs
19815+ pax_force_retaddr
19816 ret
19817 CFI_ENDPROC
19818 ENDPROC(call_rwsem_down_write_failed)
19819@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
19820 movq %rax,%rdi
19821 call rwsem_wake
19822 restore_common_regs
19823-1: ret
19824+1: pax_force_retaddr
19825+ ret
19826 CFI_ENDPROC
19827 ENDPROC(call_rwsem_wake)
19828
19829@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
19830 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
19831 CFI_RESTORE __ASM_REG(dx)
19832 restore_common_regs
19833+ pax_force_retaddr
19834 ret
19835 CFI_ENDPROC
19836 ENDPROC(call_rwsem_downgrade_wake)
19837diff -urNp linux-3.1.4/arch/x86/lib/thunk_64.S linux-3.1.4/arch/x86/lib/thunk_64.S
19838--- linux-3.1.4/arch/x86/lib/thunk_64.S 2011-11-11 15:19:27.000000000 -0500
19839+++ linux-3.1.4/arch/x86/lib/thunk_64.S 2011-11-16 18:39:07.000000000 -0500
19840@@ -8,6 +8,7 @@
19841 #include <linux/linkage.h>
19842 #include <asm/dwarf2.h>
19843 #include <asm/calling.h>
19844+#include <asm/alternative-asm.h>
19845
19846 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
19847 .macro THUNK name, func, put_ret_addr_in_rdi=0
19848@@ -41,5 +42,6 @@
19849 SAVE_ARGS
19850 restore:
19851 RESTORE_ARGS
19852+ pax_force_retaddr
19853 ret
19854 CFI_ENDPROC
19855diff -urNp linux-3.1.4/arch/x86/lib/usercopy_32.c linux-3.1.4/arch/x86/lib/usercopy_32.c
19856--- linux-3.1.4/arch/x86/lib/usercopy_32.c 2011-11-11 15:19:27.000000000 -0500
19857+++ linux-3.1.4/arch/x86/lib/usercopy_32.c 2011-11-16 18:39:07.000000000 -0500
19858@@ -43,7 +43,7 @@ do { \
19859 __asm__ __volatile__( \
19860 " testl %1,%1\n" \
19861 " jz 2f\n" \
19862- "0: lodsb\n" \
19863+ "0: "__copyuser_seg"lodsb\n" \
19864 " stosb\n" \
19865 " testb %%al,%%al\n" \
19866 " jz 1f\n" \
19867@@ -128,10 +128,12 @@ do { \
19868 int __d0; \
19869 might_fault(); \
19870 __asm__ __volatile__( \
19871+ __COPYUSER_SET_ES \
19872 "0: rep; stosl\n" \
19873 " movl %2,%0\n" \
19874 "1: rep; stosb\n" \
19875 "2:\n" \
19876+ __COPYUSER_RESTORE_ES \
19877 ".section .fixup,\"ax\"\n" \
19878 "3: lea 0(%2,%0,4),%0\n" \
19879 " jmp 2b\n" \
19880@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
19881 might_fault();
19882
19883 __asm__ __volatile__(
19884+ __COPYUSER_SET_ES
19885 " testl %0, %0\n"
19886 " jz 3f\n"
19887 " andl %0,%%ecx\n"
19888@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
19889 " subl %%ecx,%0\n"
19890 " addl %0,%%eax\n"
19891 "1:\n"
19892+ __COPYUSER_RESTORE_ES
19893 ".section .fixup,\"ax\"\n"
19894 "2: xorl %%eax,%%eax\n"
19895 " jmp 1b\n"
19896@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
19897
19898 #ifdef CONFIG_X86_INTEL_USERCOPY
19899 static unsigned long
19900-__copy_user_intel(void __user *to, const void *from, unsigned long size)
19901+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
19902 {
19903 int d0, d1;
19904 __asm__ __volatile__(
19905@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
19906 " .align 2,0x90\n"
19907 "3: movl 0(%4), %%eax\n"
19908 "4: movl 4(%4), %%edx\n"
19909- "5: movl %%eax, 0(%3)\n"
19910- "6: movl %%edx, 4(%3)\n"
19911+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
19912+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
19913 "7: movl 8(%4), %%eax\n"
19914 "8: movl 12(%4),%%edx\n"
19915- "9: movl %%eax, 8(%3)\n"
19916- "10: movl %%edx, 12(%3)\n"
19917+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
19918+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
19919 "11: movl 16(%4), %%eax\n"
19920 "12: movl 20(%4), %%edx\n"
19921- "13: movl %%eax, 16(%3)\n"
19922- "14: movl %%edx, 20(%3)\n"
19923+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
19924+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
19925 "15: movl 24(%4), %%eax\n"
19926 "16: movl 28(%4), %%edx\n"
19927- "17: movl %%eax, 24(%3)\n"
19928- "18: movl %%edx, 28(%3)\n"
19929+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
19930+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
19931 "19: movl 32(%4), %%eax\n"
19932 "20: movl 36(%4), %%edx\n"
19933- "21: movl %%eax, 32(%3)\n"
19934- "22: movl %%edx, 36(%3)\n"
19935+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
19936+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
19937 "23: movl 40(%4), %%eax\n"
19938 "24: movl 44(%4), %%edx\n"
19939- "25: movl %%eax, 40(%3)\n"
19940- "26: movl %%edx, 44(%3)\n"
19941+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
19942+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
19943 "27: movl 48(%4), %%eax\n"
19944 "28: movl 52(%4), %%edx\n"
19945- "29: movl %%eax, 48(%3)\n"
19946- "30: movl %%edx, 52(%3)\n"
19947+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
19948+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
19949 "31: movl 56(%4), %%eax\n"
19950 "32: movl 60(%4), %%edx\n"
19951- "33: movl %%eax, 56(%3)\n"
19952- "34: movl %%edx, 60(%3)\n"
19953+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
19954+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
19955 " addl $-64, %0\n"
19956 " addl $64, %4\n"
19957 " addl $64, %3\n"
19958@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
19959 " shrl $2, %0\n"
19960 " andl $3, %%eax\n"
19961 " cld\n"
19962+ __COPYUSER_SET_ES
19963 "99: rep; movsl\n"
19964 "36: movl %%eax, %0\n"
19965 "37: rep; movsb\n"
19966 "100:\n"
19967+ __COPYUSER_RESTORE_ES
19968+ ".section .fixup,\"ax\"\n"
19969+ "101: lea 0(%%eax,%0,4),%0\n"
19970+ " jmp 100b\n"
19971+ ".previous\n"
19972+ ".section __ex_table,\"a\"\n"
19973+ " .align 4\n"
19974+ " .long 1b,100b\n"
19975+ " .long 2b,100b\n"
19976+ " .long 3b,100b\n"
19977+ " .long 4b,100b\n"
19978+ " .long 5b,100b\n"
19979+ " .long 6b,100b\n"
19980+ " .long 7b,100b\n"
19981+ " .long 8b,100b\n"
19982+ " .long 9b,100b\n"
19983+ " .long 10b,100b\n"
19984+ " .long 11b,100b\n"
19985+ " .long 12b,100b\n"
19986+ " .long 13b,100b\n"
19987+ " .long 14b,100b\n"
19988+ " .long 15b,100b\n"
19989+ " .long 16b,100b\n"
19990+ " .long 17b,100b\n"
19991+ " .long 18b,100b\n"
19992+ " .long 19b,100b\n"
19993+ " .long 20b,100b\n"
19994+ " .long 21b,100b\n"
19995+ " .long 22b,100b\n"
19996+ " .long 23b,100b\n"
19997+ " .long 24b,100b\n"
19998+ " .long 25b,100b\n"
19999+ " .long 26b,100b\n"
20000+ " .long 27b,100b\n"
20001+ " .long 28b,100b\n"
20002+ " .long 29b,100b\n"
20003+ " .long 30b,100b\n"
20004+ " .long 31b,100b\n"
20005+ " .long 32b,100b\n"
20006+ " .long 33b,100b\n"
20007+ " .long 34b,100b\n"
20008+ " .long 35b,100b\n"
20009+ " .long 36b,100b\n"
20010+ " .long 37b,100b\n"
20011+ " .long 99b,101b\n"
20012+ ".previous"
20013+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
20014+ : "1"(to), "2"(from), "0"(size)
20015+ : "eax", "edx", "memory");
20016+ return size;
20017+}
20018+
20019+static unsigned long
20020+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
20021+{
20022+ int d0, d1;
20023+ __asm__ __volatile__(
20024+ " .align 2,0x90\n"
20025+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
20026+ " cmpl $67, %0\n"
20027+ " jbe 3f\n"
20028+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
20029+ " .align 2,0x90\n"
20030+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20031+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
20032+ "5: movl %%eax, 0(%3)\n"
20033+ "6: movl %%edx, 4(%3)\n"
20034+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20035+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
20036+ "9: movl %%eax, 8(%3)\n"
20037+ "10: movl %%edx, 12(%3)\n"
20038+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20039+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
20040+ "13: movl %%eax, 16(%3)\n"
20041+ "14: movl %%edx, 20(%3)\n"
20042+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20043+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
20044+ "17: movl %%eax, 24(%3)\n"
20045+ "18: movl %%edx, 28(%3)\n"
20046+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20047+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
20048+ "21: movl %%eax, 32(%3)\n"
20049+ "22: movl %%edx, 36(%3)\n"
20050+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20051+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
20052+ "25: movl %%eax, 40(%3)\n"
20053+ "26: movl %%edx, 44(%3)\n"
20054+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20055+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
20056+ "29: movl %%eax, 48(%3)\n"
20057+ "30: movl %%edx, 52(%3)\n"
20058+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20059+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
20060+ "33: movl %%eax, 56(%3)\n"
20061+ "34: movl %%edx, 60(%3)\n"
20062+ " addl $-64, %0\n"
20063+ " addl $64, %4\n"
20064+ " addl $64, %3\n"
20065+ " cmpl $63, %0\n"
20066+ " ja 1b\n"
20067+ "35: movl %0, %%eax\n"
20068+ " shrl $2, %0\n"
20069+ " andl $3, %%eax\n"
20070+ " cld\n"
20071+ "99: rep; "__copyuser_seg" movsl\n"
20072+ "36: movl %%eax, %0\n"
20073+ "37: rep; "__copyuser_seg" movsb\n"
20074+ "100:\n"
20075 ".section .fixup,\"ax\"\n"
20076 "101: lea 0(%%eax,%0,4),%0\n"
20077 " jmp 100b\n"
20078@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
20079 int d0, d1;
20080 __asm__ __volatile__(
20081 " .align 2,0x90\n"
20082- "0: movl 32(%4), %%eax\n"
20083+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20084 " cmpl $67, %0\n"
20085 " jbe 2f\n"
20086- "1: movl 64(%4), %%eax\n"
20087+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20088 " .align 2,0x90\n"
20089- "2: movl 0(%4), %%eax\n"
20090- "21: movl 4(%4), %%edx\n"
20091+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20092+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20093 " movl %%eax, 0(%3)\n"
20094 " movl %%edx, 4(%3)\n"
20095- "3: movl 8(%4), %%eax\n"
20096- "31: movl 12(%4),%%edx\n"
20097+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20098+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20099 " movl %%eax, 8(%3)\n"
20100 " movl %%edx, 12(%3)\n"
20101- "4: movl 16(%4), %%eax\n"
20102- "41: movl 20(%4), %%edx\n"
20103+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20104+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20105 " movl %%eax, 16(%3)\n"
20106 " movl %%edx, 20(%3)\n"
20107- "10: movl 24(%4), %%eax\n"
20108- "51: movl 28(%4), %%edx\n"
20109+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20110+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20111 " movl %%eax, 24(%3)\n"
20112 " movl %%edx, 28(%3)\n"
20113- "11: movl 32(%4), %%eax\n"
20114- "61: movl 36(%4), %%edx\n"
20115+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20116+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20117 " movl %%eax, 32(%3)\n"
20118 " movl %%edx, 36(%3)\n"
20119- "12: movl 40(%4), %%eax\n"
20120- "71: movl 44(%4), %%edx\n"
20121+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20122+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20123 " movl %%eax, 40(%3)\n"
20124 " movl %%edx, 44(%3)\n"
20125- "13: movl 48(%4), %%eax\n"
20126- "81: movl 52(%4), %%edx\n"
20127+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20128+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20129 " movl %%eax, 48(%3)\n"
20130 " movl %%edx, 52(%3)\n"
20131- "14: movl 56(%4), %%eax\n"
20132- "91: movl 60(%4), %%edx\n"
20133+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20134+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20135 " movl %%eax, 56(%3)\n"
20136 " movl %%edx, 60(%3)\n"
20137 " addl $-64, %0\n"
20138@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
20139 " shrl $2, %0\n"
20140 " andl $3, %%eax\n"
20141 " cld\n"
20142- "6: rep; movsl\n"
20143+ "6: rep; "__copyuser_seg" movsl\n"
20144 " movl %%eax,%0\n"
20145- "7: rep; movsb\n"
20146+ "7: rep; "__copyuser_seg" movsb\n"
20147 "8:\n"
20148 ".section .fixup,\"ax\"\n"
20149 "9: lea 0(%%eax,%0,4),%0\n"
20150@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
20151
20152 __asm__ __volatile__(
20153 " .align 2,0x90\n"
20154- "0: movl 32(%4), %%eax\n"
20155+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20156 " cmpl $67, %0\n"
20157 " jbe 2f\n"
20158- "1: movl 64(%4), %%eax\n"
20159+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20160 " .align 2,0x90\n"
20161- "2: movl 0(%4), %%eax\n"
20162- "21: movl 4(%4), %%edx\n"
20163+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20164+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20165 " movnti %%eax, 0(%3)\n"
20166 " movnti %%edx, 4(%3)\n"
20167- "3: movl 8(%4), %%eax\n"
20168- "31: movl 12(%4),%%edx\n"
20169+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20170+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20171 " movnti %%eax, 8(%3)\n"
20172 " movnti %%edx, 12(%3)\n"
20173- "4: movl 16(%4), %%eax\n"
20174- "41: movl 20(%4), %%edx\n"
20175+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20176+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20177 " movnti %%eax, 16(%3)\n"
20178 " movnti %%edx, 20(%3)\n"
20179- "10: movl 24(%4), %%eax\n"
20180- "51: movl 28(%4), %%edx\n"
20181+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20182+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20183 " movnti %%eax, 24(%3)\n"
20184 " movnti %%edx, 28(%3)\n"
20185- "11: movl 32(%4), %%eax\n"
20186- "61: movl 36(%4), %%edx\n"
20187+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20188+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20189 " movnti %%eax, 32(%3)\n"
20190 " movnti %%edx, 36(%3)\n"
20191- "12: movl 40(%4), %%eax\n"
20192- "71: movl 44(%4), %%edx\n"
20193+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20194+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20195 " movnti %%eax, 40(%3)\n"
20196 " movnti %%edx, 44(%3)\n"
20197- "13: movl 48(%4), %%eax\n"
20198- "81: movl 52(%4), %%edx\n"
20199+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20200+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20201 " movnti %%eax, 48(%3)\n"
20202 " movnti %%edx, 52(%3)\n"
20203- "14: movl 56(%4), %%eax\n"
20204- "91: movl 60(%4), %%edx\n"
20205+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20206+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20207 " movnti %%eax, 56(%3)\n"
20208 " movnti %%edx, 60(%3)\n"
20209 " addl $-64, %0\n"
20210@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
20211 " shrl $2, %0\n"
20212 " andl $3, %%eax\n"
20213 " cld\n"
20214- "6: rep; movsl\n"
20215+ "6: rep; "__copyuser_seg" movsl\n"
20216 " movl %%eax,%0\n"
20217- "7: rep; movsb\n"
20218+ "7: rep; "__copyuser_seg" movsb\n"
20219 "8:\n"
20220 ".section .fixup,\"ax\"\n"
20221 "9: lea 0(%%eax,%0,4),%0\n"
20222@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
20223
20224 __asm__ __volatile__(
20225 " .align 2,0x90\n"
20226- "0: movl 32(%4), %%eax\n"
20227+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20228 " cmpl $67, %0\n"
20229 " jbe 2f\n"
20230- "1: movl 64(%4), %%eax\n"
20231+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20232 " .align 2,0x90\n"
20233- "2: movl 0(%4), %%eax\n"
20234- "21: movl 4(%4), %%edx\n"
20235+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20236+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20237 " movnti %%eax, 0(%3)\n"
20238 " movnti %%edx, 4(%3)\n"
20239- "3: movl 8(%4), %%eax\n"
20240- "31: movl 12(%4),%%edx\n"
20241+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20242+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20243 " movnti %%eax, 8(%3)\n"
20244 " movnti %%edx, 12(%3)\n"
20245- "4: movl 16(%4), %%eax\n"
20246- "41: movl 20(%4), %%edx\n"
20247+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20248+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20249 " movnti %%eax, 16(%3)\n"
20250 " movnti %%edx, 20(%3)\n"
20251- "10: movl 24(%4), %%eax\n"
20252- "51: movl 28(%4), %%edx\n"
20253+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20254+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20255 " movnti %%eax, 24(%3)\n"
20256 " movnti %%edx, 28(%3)\n"
20257- "11: movl 32(%4), %%eax\n"
20258- "61: movl 36(%4), %%edx\n"
20259+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20260+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20261 " movnti %%eax, 32(%3)\n"
20262 " movnti %%edx, 36(%3)\n"
20263- "12: movl 40(%4), %%eax\n"
20264- "71: movl 44(%4), %%edx\n"
20265+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20266+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20267 " movnti %%eax, 40(%3)\n"
20268 " movnti %%edx, 44(%3)\n"
20269- "13: movl 48(%4), %%eax\n"
20270- "81: movl 52(%4), %%edx\n"
20271+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20272+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20273 " movnti %%eax, 48(%3)\n"
20274 " movnti %%edx, 52(%3)\n"
20275- "14: movl 56(%4), %%eax\n"
20276- "91: movl 60(%4), %%edx\n"
20277+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20278+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20279 " movnti %%eax, 56(%3)\n"
20280 " movnti %%edx, 60(%3)\n"
20281 " addl $-64, %0\n"
20282@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
20283 " shrl $2, %0\n"
20284 " andl $3, %%eax\n"
20285 " cld\n"
20286- "6: rep; movsl\n"
20287+ "6: rep; "__copyuser_seg" movsl\n"
20288 " movl %%eax,%0\n"
20289- "7: rep; movsb\n"
20290+ "7: rep; "__copyuser_seg" movsb\n"
20291 "8:\n"
20292 ".section .fixup,\"ax\"\n"
20293 "9: lea 0(%%eax,%0,4),%0\n"
20294@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
20295 */
20296 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
20297 unsigned long size);
20298-unsigned long __copy_user_intel(void __user *to, const void *from,
20299+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
20300+ unsigned long size);
20301+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
20302 unsigned long size);
20303 unsigned long __copy_user_zeroing_intel_nocache(void *to,
20304 const void __user *from, unsigned long size);
20305 #endif /* CONFIG_X86_INTEL_USERCOPY */
20306
20307 /* Generic arbitrary sized copy. */
20308-#define __copy_user(to, from, size) \
20309+#define __copy_user(to, from, size, prefix, set, restore) \
20310 do { \
20311 int __d0, __d1, __d2; \
20312 __asm__ __volatile__( \
20313+ set \
20314 " cmp $7,%0\n" \
20315 " jbe 1f\n" \
20316 " movl %1,%0\n" \
20317 " negl %0\n" \
20318 " andl $7,%0\n" \
20319 " subl %0,%3\n" \
20320- "4: rep; movsb\n" \
20321+ "4: rep; "prefix"movsb\n" \
20322 " movl %3,%0\n" \
20323 " shrl $2,%0\n" \
20324 " andl $3,%3\n" \
20325 " .align 2,0x90\n" \
20326- "0: rep; movsl\n" \
20327+ "0: rep; "prefix"movsl\n" \
20328 " movl %3,%0\n" \
20329- "1: rep; movsb\n" \
20330+ "1: rep; "prefix"movsb\n" \
20331 "2:\n" \
20332+ restore \
20333 ".section .fixup,\"ax\"\n" \
20334 "5: addl %3,%0\n" \
20335 " jmp 2b\n" \
20336@@ -682,14 +799,14 @@ do { \
20337 " negl %0\n" \
20338 " andl $7,%0\n" \
20339 " subl %0,%3\n" \
20340- "4: rep; movsb\n" \
20341+ "4: rep; "__copyuser_seg"movsb\n" \
20342 " movl %3,%0\n" \
20343 " shrl $2,%0\n" \
20344 " andl $3,%3\n" \
20345 " .align 2,0x90\n" \
20346- "0: rep; movsl\n" \
20347+ "0: rep; "__copyuser_seg"movsl\n" \
20348 " movl %3,%0\n" \
20349- "1: rep; movsb\n" \
20350+ "1: rep; "__copyuser_seg"movsb\n" \
20351 "2:\n" \
20352 ".section .fixup,\"ax\"\n" \
20353 "5: addl %3,%0\n" \
20354@@ -775,9 +892,9 @@ survive:
20355 }
20356 #endif
20357 if (movsl_is_ok(to, from, n))
20358- __copy_user(to, from, n);
20359+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
20360 else
20361- n = __copy_user_intel(to, from, n);
20362+ n = __generic_copy_to_user_intel(to, from, n);
20363 return n;
20364 }
20365 EXPORT_SYMBOL(__copy_to_user_ll);
20366@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
20367 unsigned long n)
20368 {
20369 if (movsl_is_ok(to, from, n))
20370- __copy_user(to, from, n);
20371+ __copy_user(to, from, n, __copyuser_seg, "", "");
20372 else
20373- n = __copy_user_intel((void __user *)to,
20374- (const void *)from, n);
20375+ n = __generic_copy_from_user_intel(to, from, n);
20376 return n;
20377 }
20378 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
20379@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
20380 if (n > 64 && cpu_has_xmm2)
20381 n = __copy_user_intel_nocache(to, from, n);
20382 else
20383- __copy_user(to, from, n);
20384+ __copy_user(to, from, n, __copyuser_seg, "", "");
20385 #else
20386- __copy_user(to, from, n);
20387+ __copy_user(to, from, n, __copyuser_seg, "", "");
20388 #endif
20389 return n;
20390 }
20391 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
20392
20393-/**
20394- * copy_to_user: - Copy a block of data into user space.
20395- * @to: Destination address, in user space.
20396- * @from: Source address, in kernel space.
20397- * @n: Number of bytes to copy.
20398- *
20399- * Context: User context only. This function may sleep.
20400- *
20401- * Copy data from kernel space to user space.
20402- *
20403- * Returns number of bytes that could not be copied.
20404- * On success, this will be zero.
20405- */
20406-unsigned long
20407-copy_to_user(void __user *to, const void *from, unsigned long n)
20408+void copy_from_user_overflow(void)
20409 {
20410- if (access_ok(VERIFY_WRITE, to, n))
20411- n = __copy_to_user(to, from, n);
20412- return n;
20413+ WARN(1, "Buffer overflow detected!\n");
20414 }
20415-EXPORT_SYMBOL(copy_to_user);
20416+EXPORT_SYMBOL(copy_from_user_overflow);
20417
20418-/**
20419- * copy_from_user: - Copy a block of data from user space.
20420- * @to: Destination address, in kernel space.
20421- * @from: Source address, in user space.
20422- * @n: Number of bytes to copy.
20423- *
20424- * Context: User context only. This function may sleep.
20425- *
20426- * Copy data from user space to kernel space.
20427- *
20428- * Returns number of bytes that could not be copied.
20429- * On success, this will be zero.
20430- *
20431- * If some data could not be copied, this function will pad the copied
20432- * data to the requested size using zero bytes.
20433- */
20434-unsigned long
20435-_copy_from_user(void *to, const void __user *from, unsigned long n)
20436+void copy_to_user_overflow(void)
20437 {
20438- if (access_ok(VERIFY_READ, from, n))
20439- n = __copy_from_user(to, from, n);
20440- else
20441- memset(to, 0, n);
20442- return n;
20443+ WARN(1, "Buffer overflow detected!\n");
20444 }
20445-EXPORT_SYMBOL(_copy_from_user);
20446+EXPORT_SYMBOL(copy_to_user_overflow);
20447
20448-void copy_from_user_overflow(void)
20449+#ifdef CONFIG_PAX_MEMORY_UDEREF
20450+void __set_fs(mm_segment_t x)
20451 {
20452- WARN(1, "Buffer overflow detected!\n");
20453+ switch (x.seg) {
20454+ case 0:
20455+ loadsegment(gs, 0);
20456+ break;
20457+ case TASK_SIZE_MAX:
20458+ loadsegment(gs, __USER_DS);
20459+ break;
20460+ case -1UL:
20461+ loadsegment(gs, __KERNEL_DS);
20462+ break;
20463+ default:
20464+ BUG();
20465+ }
20466+ return;
20467 }
20468-EXPORT_SYMBOL(copy_from_user_overflow);
20469+EXPORT_SYMBOL(__set_fs);
20470+
20471+void set_fs(mm_segment_t x)
20472+{
20473+ current_thread_info()->addr_limit = x;
20474+ __set_fs(x);
20475+}
20476+EXPORT_SYMBOL(set_fs);
20477+#endif
20478diff -urNp linux-3.1.4/arch/x86/lib/usercopy_64.c linux-3.1.4/arch/x86/lib/usercopy_64.c
20479--- linux-3.1.4/arch/x86/lib/usercopy_64.c 2011-11-11 15:19:27.000000000 -0500
20480+++ linux-3.1.4/arch/x86/lib/usercopy_64.c 2011-11-16 18:39:07.000000000 -0500
20481@@ -42,6 +42,12 @@ long
20482 __strncpy_from_user(char *dst, const char __user *src, long count)
20483 {
20484 long res;
20485+
20486+#ifdef CONFIG_PAX_MEMORY_UDEREF
20487+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
20488+ src += PAX_USER_SHADOW_BASE;
20489+#endif
20490+
20491 __do_strncpy_from_user(dst, src, count, res);
20492 return res;
20493 }
20494@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
20495 {
20496 long __d0;
20497 might_fault();
20498+
20499+#ifdef CONFIG_PAX_MEMORY_UDEREF
20500+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
20501+ addr += PAX_USER_SHADOW_BASE;
20502+#endif
20503+
20504 /* no memory constraint because it doesn't change any memory gcc knows
20505 about */
20506 asm volatile(
20507@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
20508
20509 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
20510 {
20511- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20512- return copy_user_generic((__force void *)to, (__force void *)from, len);
20513- }
20514- return len;
20515+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20516+
20517+#ifdef CONFIG_PAX_MEMORY_UDEREF
20518+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
20519+ to += PAX_USER_SHADOW_BASE;
20520+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
20521+ from += PAX_USER_SHADOW_BASE;
20522+#endif
20523+
20524+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
20525+ }
20526+ return len;
20527 }
20528 EXPORT_SYMBOL(copy_in_user);
20529
20530@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
20531 * it is not necessary to optimize tail handling.
20532 */
20533 unsigned long
20534-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
20535+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest)
20536 {
20537 char c;
20538 unsigned zero_len;
20539diff -urNp linux-3.1.4/arch/x86/Makefile linux-3.1.4/arch/x86/Makefile
20540--- linux-3.1.4/arch/x86/Makefile 2011-11-11 15:19:27.000000000 -0500
20541+++ linux-3.1.4/arch/x86/Makefile 2011-11-17 18:30:30.000000000 -0500
20542@@ -46,6 +46,7 @@ else
20543 UTS_MACHINE := x86_64
20544 CHECKFLAGS += -D__x86_64__ -m64
20545
20546+ biarch := $(call cc-option,-m64)
20547 KBUILD_AFLAGS += -m64
20548 KBUILD_CFLAGS += -m64
20549
20550@@ -195,3 +196,12 @@ define archhelp
20551 echo ' FDARGS="..." arguments for the booted kernel'
20552 echo ' FDINITRD=file initrd for the booted kernel'
20553 endef
20554+
20555+define OLD_LD
20556+
20557+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
20558+*** Please upgrade your binutils to 2.18 or newer
20559+endef
20560+
20561+archprepare:
20562+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
20563diff -urNp linux-3.1.4/arch/x86/mm/extable.c linux-3.1.4/arch/x86/mm/extable.c
20564--- linux-3.1.4/arch/x86/mm/extable.c 2011-11-11 15:19:27.000000000 -0500
20565+++ linux-3.1.4/arch/x86/mm/extable.c 2011-11-16 18:39:07.000000000 -0500
20566@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs
20567 const struct exception_table_entry *fixup;
20568
20569 #ifdef CONFIG_PNPBIOS
20570- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
20571+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
20572 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
20573 extern u32 pnp_bios_is_utter_crap;
20574 pnp_bios_is_utter_crap = 1;
20575diff -urNp linux-3.1.4/arch/x86/mm/fault.c linux-3.1.4/arch/x86/mm/fault.c
20576--- linux-3.1.4/arch/x86/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
20577+++ linux-3.1.4/arch/x86/mm/fault.c 2011-11-16 20:43:50.000000000 -0500
20578@@ -13,11 +13,18 @@
20579 #include <linux/perf_event.h> /* perf_sw_event */
20580 #include <linux/hugetlb.h> /* hstate_index_to_shift */
20581 #include <linux/prefetch.h> /* prefetchw */
20582+#include <linux/unistd.h>
20583+#include <linux/compiler.h>
20584
20585 #include <asm/traps.h> /* dotraplinkage, ... */
20586 #include <asm/pgalloc.h> /* pgd_*(), ... */
20587 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
20588 #include <asm/vsyscall.h>
20589+#include <asm/tlbflush.h>
20590+
20591+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20592+#include <asm/stacktrace.h>
20593+#endif
20594
20595 /*
20596 * Page fault error code bits:
20597@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_
20598 int ret = 0;
20599
20600 /* kprobe_running() needs smp_processor_id() */
20601- if (kprobes_built_in() && !user_mode_vm(regs)) {
20602+ if (kprobes_built_in() && !user_mode(regs)) {
20603 preempt_disable();
20604 if (kprobe_running() && kprobe_fault_handler(regs, 14))
20605 ret = 1;
20606@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *re
20607 return !instr_lo || (instr_lo>>1) == 1;
20608 case 0x00:
20609 /* Prefetch instruction is 0x0F0D or 0x0F18 */
20610- if (probe_kernel_address(instr, opcode))
20611+ if (user_mode(regs)) {
20612+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
20613+ return 0;
20614+ } else if (probe_kernel_address(instr, opcode))
20615 return 0;
20616
20617 *prefetch = (instr_lo == 0xF) &&
20618@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsign
20619 while (instr < max_instr) {
20620 unsigned char opcode;
20621
20622- if (probe_kernel_address(instr, opcode))
20623+ if (user_mode(regs)) {
20624+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
20625+ break;
20626+ } else if (probe_kernel_address(instr, opcode))
20627 break;
20628
20629 instr++;
20630@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int s
20631 force_sig_info(si_signo, &info, tsk);
20632 }
20633
20634+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20635+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
20636+#endif
20637+
20638+#ifdef CONFIG_PAX_EMUTRAMP
20639+static int pax_handle_fetch_fault(struct pt_regs *regs);
20640+#endif
20641+
20642+#ifdef CONFIG_PAX_PAGEEXEC
20643+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
20644+{
20645+ pgd_t *pgd;
20646+ pud_t *pud;
20647+ pmd_t *pmd;
20648+
20649+ pgd = pgd_offset(mm, address);
20650+ if (!pgd_present(*pgd))
20651+ return NULL;
20652+ pud = pud_offset(pgd, address);
20653+ if (!pud_present(*pud))
20654+ return NULL;
20655+ pmd = pmd_offset(pud, address);
20656+ if (!pmd_present(*pmd))
20657+ return NULL;
20658+ return pmd;
20659+}
20660+#endif
20661+
20662 DEFINE_SPINLOCK(pgd_lock);
20663 LIST_HEAD(pgd_list);
20664
20665@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
20666 for (address = VMALLOC_START & PMD_MASK;
20667 address >= TASK_SIZE && address < FIXADDR_TOP;
20668 address += PMD_SIZE) {
20669+
20670+#ifdef CONFIG_PAX_PER_CPU_PGD
20671+ unsigned long cpu;
20672+#else
20673 struct page *page;
20674+#endif
20675
20676 spin_lock(&pgd_lock);
20677+
20678+#ifdef CONFIG_PAX_PER_CPU_PGD
20679+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20680+ pgd_t *pgd = get_cpu_pgd(cpu);
20681+ pmd_t *ret;
20682+#else
20683 list_for_each_entry(page, &pgd_list, lru) {
20684+ pgd_t *pgd = page_address(page);
20685 spinlock_t *pgt_lock;
20686 pmd_t *ret;
20687
20688@@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
20689 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
20690
20691 spin_lock(pgt_lock);
20692- ret = vmalloc_sync_one(page_address(page), address);
20693+#endif
20694+
20695+ ret = vmalloc_sync_one(pgd, address);
20696+
20697+#ifndef CONFIG_PAX_PER_CPU_PGD
20698 spin_unlock(pgt_lock);
20699+#endif
20700
20701 if (!ret)
20702 break;
20703@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fa
20704 * an interrupt in the middle of a task switch..
20705 */
20706 pgd_paddr = read_cr3();
20707+
20708+#ifdef CONFIG_PAX_PER_CPU_PGD
20709+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
20710+#endif
20711+
20712 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
20713 if (!pmd_k)
20714 return -1;
20715@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fa
20716 * happen within a race in page table update. In the later
20717 * case just flush:
20718 */
20719+
20720+#ifdef CONFIG_PAX_PER_CPU_PGD
20721+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
20722+ pgd = pgd_offset_cpu(smp_processor_id(), address);
20723+#else
20724 pgd = pgd_offset(current->active_mm, address);
20725+#endif
20726+
20727 pgd_ref = pgd_offset_k(address);
20728 if (pgd_none(*pgd_ref))
20729 return -1;
20730@@ -534,7 +604,7 @@ static int is_errata93(struct pt_regs *r
20731 static int is_errata100(struct pt_regs *regs, unsigned long address)
20732 {
20733 #ifdef CONFIG_X86_64
20734- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
20735+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
20736 return 1;
20737 #endif
20738 return 0;
20739@@ -561,7 +631,7 @@ static int is_f00f_bug(struct pt_regs *r
20740 }
20741
20742 static const char nx_warning[] = KERN_CRIT
20743-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
20744+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
20745
20746 static void
20747 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
20748@@ -570,14 +640,25 @@ show_fault_oops(struct pt_regs *regs, un
20749 if (!oops_may_print())
20750 return;
20751
20752- if (error_code & PF_INSTR) {
20753+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
20754 unsigned int level;
20755
20756 pte_t *pte = lookup_address(address, &level);
20757
20758 if (pte && pte_present(*pte) && !pte_exec(*pte))
20759- printk(nx_warning, current_uid());
20760+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
20761+ }
20762+
20763+#ifdef CONFIG_PAX_KERNEXEC
20764+ if (init_mm.start_code <= address && address < init_mm.end_code) {
20765+ if (current->signal->curr_ip)
20766+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20767+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
20768+ else
20769+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20770+ current->comm, task_pid_nr(current), current_uid(), current_euid());
20771 }
20772+#endif
20773
20774 printk(KERN_ALERT "BUG: unable to handle kernel ");
20775 if (address < PAGE_SIZE)
20776@@ -733,6 +814,21 @@ __bad_area_nosemaphore(struct pt_regs *r
20777 }
20778 #endif
20779
20780+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20781+ if (pax_is_fetch_fault(regs, error_code, address)) {
20782+
20783+#ifdef CONFIG_PAX_EMUTRAMP
20784+ switch (pax_handle_fetch_fault(regs)) {
20785+ case 2:
20786+ return;
20787+ }
20788+#endif
20789+
20790+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20791+ do_group_exit(SIGKILL);
20792+ }
20793+#endif
20794+
20795 if (unlikely(show_unhandled_signals))
20796 show_signal_msg(regs, error_code, address, tsk);
20797
20798@@ -829,7 +925,7 @@ do_sigbus(struct pt_regs *regs, unsigned
20799 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
20800 printk(KERN_ERR
20801 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
20802- tsk->comm, tsk->pid, address);
20803+ tsk->comm, task_pid_nr(tsk), address);
20804 code = BUS_MCEERR_AR;
20805 }
20806 #endif
20807@@ -884,6 +980,99 @@ static int spurious_fault_check(unsigned
20808 return 1;
20809 }
20810
20811+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20812+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
20813+{
20814+ pte_t *pte;
20815+ pmd_t *pmd;
20816+ spinlock_t *ptl;
20817+ unsigned char pte_mask;
20818+
20819+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
20820+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
20821+ return 0;
20822+
20823+ /* PaX: it's our fault, let's handle it if we can */
20824+
20825+ /* PaX: take a look at read faults before acquiring any locks */
20826+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
20827+ /* instruction fetch attempt from a protected page in user mode */
20828+ up_read(&mm->mmap_sem);
20829+
20830+#ifdef CONFIG_PAX_EMUTRAMP
20831+ switch (pax_handle_fetch_fault(regs)) {
20832+ case 2:
20833+ return 1;
20834+ }
20835+#endif
20836+
20837+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20838+ do_group_exit(SIGKILL);
20839+ }
20840+
20841+ pmd = pax_get_pmd(mm, address);
20842+ if (unlikely(!pmd))
20843+ return 0;
20844+
20845+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
20846+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
20847+ pte_unmap_unlock(pte, ptl);
20848+ return 0;
20849+ }
20850+
20851+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
20852+ /* write attempt to a protected page in user mode */
20853+ pte_unmap_unlock(pte, ptl);
20854+ return 0;
20855+ }
20856+
20857+#ifdef CONFIG_SMP
20858+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
20859+#else
20860+ if (likely(address > get_limit(regs->cs)))
20861+#endif
20862+ {
20863+ set_pte(pte, pte_mkread(*pte));
20864+ __flush_tlb_one(address);
20865+ pte_unmap_unlock(pte, ptl);
20866+ up_read(&mm->mmap_sem);
20867+ return 1;
20868+ }
20869+
20870+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
20871+
20872+ /*
20873+ * PaX: fill DTLB with user rights and retry
20874+ */
20875+ __asm__ __volatile__ (
20876+ "orb %2,(%1)\n"
20877+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
20878+/*
20879+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
20880+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
20881+ * page fault when examined during a TLB load attempt. this is true not only
20882+ * for PTEs holding a non-present entry but also present entries that will
20883+ * raise a page fault (such as those set up by PaX, or the copy-on-write
20884+ * mechanism). in effect it means that we do *not* need to flush the TLBs
20885+ * for our target pages since their PTEs are simply not in the TLBs at all.
20886+
20887+ * the best thing in omitting it is that we gain around 15-20% speed in the
20888+ * fast path of the page fault handler and can get rid of tracing since we
20889+ * can no longer flush unintended entries.
20890+ */
20891+ "invlpg (%0)\n"
20892+#endif
20893+ __copyuser_seg"testb $0,(%0)\n"
20894+ "xorb %3,(%1)\n"
20895+ :
20896+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
20897+ : "memory", "cc");
20898+ pte_unmap_unlock(pte, ptl);
20899+ up_read(&mm->mmap_sem);
20900+ return 1;
20901+}
20902+#endif
20903+
20904 /*
20905 * Handle a spurious fault caused by a stale TLB entry.
20906 *
20907@@ -956,6 +1145,9 @@ int show_unhandled_signals = 1;
20908 static inline int
20909 access_error(unsigned long error_code, struct vm_area_struct *vma)
20910 {
20911+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
20912+ return 1;
20913+
20914 if (error_code & PF_WRITE) {
20915 /* write, present and write, not present: */
20916 if (unlikely(!(vma->vm_flags & VM_WRITE)))
20917@@ -989,19 +1181,33 @@ do_page_fault(struct pt_regs *regs, unsi
20918 {
20919 struct vm_area_struct *vma;
20920 struct task_struct *tsk;
20921- unsigned long address;
20922 struct mm_struct *mm;
20923 int fault;
20924 int write = error_code & PF_WRITE;
20925 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
20926 (write ? FAULT_FLAG_WRITE : 0);
20927
20928+ /* Get the faulting address: */
20929+ unsigned long address = read_cr2();
20930+
20931+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20932+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
20933+ if (!search_exception_tables(regs->ip)) {
20934+ bad_area_nosemaphore(regs, error_code, address);
20935+ return;
20936+ }
20937+ if (address < PAX_USER_SHADOW_BASE) {
20938+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
20939+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
20940+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
20941+ } else
20942+ address -= PAX_USER_SHADOW_BASE;
20943+ }
20944+#endif
20945+
20946 tsk = current;
20947 mm = tsk->mm;
20948
20949- /* Get the faulting address: */
20950- address = read_cr2();
20951-
20952 /*
20953 * Detect and handle instructions that would cause a page fault for
20954 * both a tracked kernel page and a userspace page.
20955@@ -1061,7 +1267,7 @@ do_page_fault(struct pt_regs *regs, unsi
20956 * User-mode registers count as a user access even for any
20957 * potential system fault or CPU buglet:
20958 */
20959- if (user_mode_vm(regs)) {
20960+ if (user_mode(regs)) {
20961 local_irq_enable();
20962 error_code |= PF_USER;
20963 } else {
20964@@ -1116,6 +1322,11 @@ retry:
20965 might_sleep();
20966 }
20967
20968+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20969+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
20970+ return;
20971+#endif
20972+
20973 vma = find_vma(mm, address);
20974 if (unlikely(!vma)) {
20975 bad_area(regs, error_code, address);
20976@@ -1127,18 +1338,24 @@ retry:
20977 bad_area(regs, error_code, address);
20978 return;
20979 }
20980- if (error_code & PF_USER) {
20981- /*
20982- * Accessing the stack below %sp is always a bug.
20983- * The large cushion allows instructions like enter
20984- * and pusha to work. ("enter $65535, $31" pushes
20985- * 32 pointers and then decrements %sp by 65535.)
20986- */
20987- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
20988- bad_area(regs, error_code, address);
20989- return;
20990- }
20991+ /*
20992+ * Accessing the stack below %sp is always a bug.
20993+ * The large cushion allows instructions like enter
20994+ * and pusha to work. ("enter $65535, $31" pushes
20995+ * 32 pointers and then decrements %sp by 65535.)
20996+ */
20997+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
20998+ bad_area(regs, error_code, address);
20999+ return;
21000 }
21001+
21002+#ifdef CONFIG_PAX_SEGMEXEC
21003+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21004+ bad_area(regs, error_code, address);
21005+ return;
21006+ }
21007+#endif
21008+
21009 if (unlikely(expand_stack(vma, address))) {
21010 bad_area(regs, error_code, address);
21011 return;
21012@@ -1193,3 +1410,240 @@ good_area:
21013
21014 up_read(&mm->mmap_sem);
21015 }
21016+
21017+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21018+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
21019+{
21020+ struct mm_struct *mm = current->mm;
21021+ unsigned long ip = regs->ip;
21022+
21023+ if (v8086_mode(regs))
21024+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
21025+
21026+#ifdef CONFIG_PAX_PAGEEXEC
21027+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
21028+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
21029+ return true;
21030+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
21031+ return true;
21032+ return false;
21033+ }
21034+#endif
21035+
21036+#ifdef CONFIG_PAX_SEGMEXEC
21037+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
21038+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
21039+ return true;
21040+ return false;
21041+ }
21042+#endif
21043+
21044+ return false;
21045+}
21046+#endif
21047+
21048+#ifdef CONFIG_PAX_EMUTRAMP
21049+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21050+{
21051+ int err;
21052+
21053+ do { /* PaX: gcc trampoline emulation #1 */
21054+ unsigned char mov1, mov2;
21055+ unsigned short jmp;
21056+ unsigned int addr1, addr2;
21057+
21058+#ifdef CONFIG_X86_64
21059+ if ((regs->ip + 11) >> 32)
21060+ break;
21061+#endif
21062+
21063+ err = get_user(mov1, (unsigned char __user *)regs->ip);
21064+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21065+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21066+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21067+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21068+
21069+ if (err)
21070+ break;
21071+
21072+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21073+ regs->cx = addr1;
21074+ regs->ax = addr2;
21075+ regs->ip = addr2;
21076+ return 2;
21077+ }
21078+ } while (0);
21079+
21080+ do { /* PaX: gcc trampoline emulation #2 */
21081+ unsigned char mov, jmp;
21082+ unsigned int addr1, addr2;
21083+
21084+#ifdef CONFIG_X86_64
21085+ if ((regs->ip + 9) >> 32)
21086+ break;
21087+#endif
21088+
21089+ err = get_user(mov, (unsigned char __user *)regs->ip);
21090+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21091+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21092+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21093+
21094+ if (err)
21095+ break;
21096+
21097+ if (mov == 0xB9 && jmp == 0xE9) {
21098+ regs->cx = addr1;
21099+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21100+ return 2;
21101+ }
21102+ } while (0);
21103+
21104+ return 1; /* PaX in action */
21105+}
21106+
21107+#ifdef CONFIG_X86_64
21108+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21109+{
21110+ int err;
21111+
21112+ do { /* PaX: gcc trampoline emulation #1 */
21113+ unsigned short mov1, mov2, jmp1;
21114+ unsigned char jmp2;
21115+ unsigned int addr1;
21116+ unsigned long addr2;
21117+
21118+ err = get_user(mov1, (unsigned short __user *)regs->ip);
21119+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
21120+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
21121+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
21122+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
21123+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
21124+
21125+ if (err)
21126+ break;
21127+
21128+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21129+ regs->r11 = addr1;
21130+ regs->r10 = addr2;
21131+ regs->ip = addr1;
21132+ return 2;
21133+ }
21134+ } while (0);
21135+
21136+ do { /* PaX: gcc trampoline emulation #2 */
21137+ unsigned short mov1, mov2, jmp1;
21138+ unsigned char jmp2;
21139+ unsigned long addr1, addr2;
21140+
21141+ err = get_user(mov1, (unsigned short __user *)regs->ip);
21142+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21143+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21144+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21145+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
21146+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
21147+
21148+ if (err)
21149+ break;
21150+
21151+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21152+ regs->r11 = addr1;
21153+ regs->r10 = addr2;
21154+ regs->ip = addr1;
21155+ return 2;
21156+ }
21157+ } while (0);
21158+
21159+ return 1; /* PaX in action */
21160+}
21161+#endif
21162+
21163+/*
21164+ * PaX: decide what to do with offenders (regs->ip = fault address)
21165+ *
21166+ * returns 1 when task should be killed
21167+ * 2 when gcc trampoline was detected
21168+ */
21169+static int pax_handle_fetch_fault(struct pt_regs *regs)
21170+{
21171+ if (v8086_mode(regs))
21172+ return 1;
21173+
21174+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
21175+ return 1;
21176+
21177+#ifdef CONFIG_X86_32
21178+ return pax_handle_fetch_fault_32(regs);
21179+#else
21180+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
21181+ return pax_handle_fetch_fault_32(regs);
21182+ else
21183+ return pax_handle_fetch_fault_64(regs);
21184+#endif
21185+}
21186+#endif
21187+
21188+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21189+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
21190+{
21191+ long i;
21192+
21193+ printk(KERN_ERR "PAX: bytes at PC: ");
21194+ for (i = 0; i < 20; i++) {
21195+ unsigned char c;
21196+ if (get_user(c, (unsigned char __force_user *)pc+i))
21197+ printk(KERN_CONT "?? ");
21198+ else
21199+ printk(KERN_CONT "%02x ", c);
21200+ }
21201+ printk("\n");
21202+
21203+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
21204+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
21205+ unsigned long c;
21206+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
21207+#ifdef CONFIG_X86_32
21208+ printk(KERN_CONT "???????? ");
21209+#else
21210+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
21211+ printk(KERN_CONT "???????? ???????? ");
21212+ else
21213+ printk(KERN_CONT "???????????????? ");
21214+#endif
21215+ } else {
21216+#ifdef CONFIG_X86_64
21217+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
21218+ printk(KERN_CONT "%08x ", (unsigned int)c);
21219+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
21220+ } else
21221+#endif
21222+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
21223+ }
21224+ }
21225+ printk("\n");
21226+}
21227+#endif
21228+
21229+/**
21230+ * probe_kernel_write(): safely attempt to write to a location
21231+ * @dst: address to write to
21232+ * @src: pointer to the data that shall be written
21233+ * @size: size of the data chunk
21234+ *
21235+ * Safely write to address @dst from the buffer at @src. If a kernel fault
21236+ * happens, handle that and return -EFAULT.
21237+ */
21238+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
21239+{
21240+ long ret;
21241+ mm_segment_t old_fs = get_fs();
21242+
21243+ set_fs(KERNEL_DS);
21244+ pagefault_disable();
21245+ pax_open_kernel();
21246+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
21247+ pax_close_kernel();
21248+ pagefault_enable();
21249+ set_fs(old_fs);
21250+
21251+ return ret ? -EFAULT : 0;
21252+}
21253diff -urNp linux-3.1.4/arch/x86/mm/gup.c linux-3.1.4/arch/x86/mm/gup.c
21254--- linux-3.1.4/arch/x86/mm/gup.c 2011-11-11 15:19:27.000000000 -0500
21255+++ linux-3.1.4/arch/x86/mm/gup.c 2011-11-16 18:39:07.000000000 -0500
21256@@ -253,7 +253,7 @@ int __get_user_pages_fast(unsigned long
21257 addr = start;
21258 len = (unsigned long) nr_pages << PAGE_SHIFT;
21259 end = start + len;
21260- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21261+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21262 (void __user *)start, len)))
21263 return 0;
21264
21265diff -urNp linux-3.1.4/arch/x86/mm/highmem_32.c linux-3.1.4/arch/x86/mm/highmem_32.c
21266--- linux-3.1.4/arch/x86/mm/highmem_32.c 2011-11-11 15:19:27.000000000 -0500
21267+++ linux-3.1.4/arch/x86/mm/highmem_32.c 2011-11-16 18:39:07.000000000 -0500
21268@@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
21269 idx = type + KM_TYPE_NR*smp_processor_id();
21270 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21271 BUG_ON(!pte_none(*(kmap_pte-idx)));
21272+
21273+ pax_open_kernel();
21274 set_pte(kmap_pte-idx, mk_pte(page, prot));
21275+ pax_close_kernel();
21276
21277 return (void *)vaddr;
21278 }
21279diff -urNp linux-3.1.4/arch/x86/mm/hugetlbpage.c linux-3.1.4/arch/x86/mm/hugetlbpage.c
21280--- linux-3.1.4/arch/x86/mm/hugetlbpage.c 2011-11-11 15:19:27.000000000 -0500
21281+++ linux-3.1.4/arch/x86/mm/hugetlbpage.c 2011-11-16 18:39:07.000000000 -0500
21282@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
21283 struct hstate *h = hstate_file(file);
21284 struct mm_struct *mm = current->mm;
21285 struct vm_area_struct *vma;
21286- unsigned long start_addr;
21287+ unsigned long start_addr, pax_task_size = TASK_SIZE;
21288+
21289+#ifdef CONFIG_PAX_SEGMEXEC
21290+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21291+ pax_task_size = SEGMEXEC_TASK_SIZE;
21292+#endif
21293+
21294+ pax_task_size -= PAGE_SIZE;
21295
21296 if (len > mm->cached_hole_size) {
21297- start_addr = mm->free_area_cache;
21298+ start_addr = mm->free_area_cache;
21299 } else {
21300- start_addr = TASK_UNMAPPED_BASE;
21301- mm->cached_hole_size = 0;
21302+ start_addr = mm->mmap_base;
21303+ mm->cached_hole_size = 0;
21304 }
21305
21306 full_search:
21307@@ -280,26 +287,27 @@ full_search:
21308
21309 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
21310 /* At this point: (!vma || addr < vma->vm_end). */
21311- if (TASK_SIZE - len < addr) {
21312+ if (pax_task_size - len < addr) {
21313 /*
21314 * Start a new search - just in case we missed
21315 * some holes.
21316 */
21317- if (start_addr != TASK_UNMAPPED_BASE) {
21318- start_addr = TASK_UNMAPPED_BASE;
21319+ if (start_addr != mm->mmap_base) {
21320+ start_addr = mm->mmap_base;
21321 mm->cached_hole_size = 0;
21322 goto full_search;
21323 }
21324 return -ENOMEM;
21325 }
21326- if (!vma || addr + len <= vma->vm_start) {
21327- mm->free_area_cache = addr + len;
21328- return addr;
21329- }
21330+ if (check_heap_stack_gap(vma, addr, len))
21331+ break;
21332 if (addr + mm->cached_hole_size < vma->vm_start)
21333 mm->cached_hole_size = vma->vm_start - addr;
21334 addr = ALIGN(vma->vm_end, huge_page_size(h));
21335 }
21336+
21337+ mm->free_area_cache = addr + len;
21338+ return addr;
21339 }
21340
21341 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
21342@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
21343 {
21344 struct hstate *h = hstate_file(file);
21345 struct mm_struct *mm = current->mm;
21346- struct vm_area_struct *vma, *prev_vma;
21347- unsigned long base = mm->mmap_base, addr = addr0;
21348+ struct vm_area_struct *vma;
21349+ unsigned long base = mm->mmap_base, addr;
21350 unsigned long largest_hole = mm->cached_hole_size;
21351- int first_time = 1;
21352
21353 /* don't allow allocations above current base */
21354 if (mm->free_area_cache > base)
21355@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
21356 largest_hole = 0;
21357 mm->free_area_cache = base;
21358 }
21359-try_again:
21360+
21361 /* make sure it can fit in the remaining address space */
21362 if (mm->free_area_cache < len)
21363 goto fail;
21364
21365 /* either no address requested or can't fit in requested address hole */
21366- addr = (mm->free_area_cache - len) & huge_page_mask(h);
21367+ addr = (mm->free_area_cache - len);
21368 do {
21369+ addr &= huge_page_mask(h);
21370+ vma = find_vma(mm, addr);
21371 /*
21372 * Lookup failure means no vma is above this address,
21373 * i.e. return with success:
21374- */
21375- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
21376- return addr;
21377-
21378- /*
21379 * new region fits between prev_vma->vm_end and
21380 * vma->vm_start, use it:
21381 */
21382- if (addr + len <= vma->vm_start &&
21383- (!prev_vma || (addr >= prev_vma->vm_end))) {
21384+ if (check_heap_stack_gap(vma, addr, len)) {
21385 /* remember the address as a hint for next time */
21386- mm->cached_hole_size = largest_hole;
21387- return (mm->free_area_cache = addr);
21388- } else {
21389- /* pull free_area_cache down to the first hole */
21390- if (mm->free_area_cache == vma->vm_end) {
21391- mm->free_area_cache = vma->vm_start;
21392- mm->cached_hole_size = largest_hole;
21393- }
21394+ mm->cached_hole_size = largest_hole;
21395+ return (mm->free_area_cache = addr);
21396+ }
21397+ /* pull free_area_cache down to the first hole */
21398+ if (mm->free_area_cache == vma->vm_end) {
21399+ mm->free_area_cache = vma->vm_start;
21400+ mm->cached_hole_size = largest_hole;
21401 }
21402
21403 /* remember the largest hole we saw so far */
21404 if (addr + largest_hole < vma->vm_start)
21405- largest_hole = vma->vm_start - addr;
21406+ largest_hole = vma->vm_start - addr;
21407
21408 /* try just below the current vma->vm_start */
21409- addr = (vma->vm_start - len) & huge_page_mask(h);
21410- } while (len <= vma->vm_start);
21411+ addr = skip_heap_stack_gap(vma, len);
21412+ } while (!IS_ERR_VALUE(addr));
21413
21414 fail:
21415 /*
21416- * if hint left us with no space for the requested
21417- * mapping then try again:
21418- */
21419- if (first_time) {
21420- mm->free_area_cache = base;
21421- largest_hole = 0;
21422- first_time = 0;
21423- goto try_again;
21424- }
21425- /*
21426 * A failed mmap() very likely causes application failure,
21427 * so fall back to the bottom-up function here. This scenario
21428 * can happen with large stack limits and large mmap()
21429 * allocations.
21430 */
21431- mm->free_area_cache = TASK_UNMAPPED_BASE;
21432+
21433+#ifdef CONFIG_PAX_SEGMEXEC
21434+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21435+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
21436+ else
21437+#endif
21438+
21439+ mm->mmap_base = TASK_UNMAPPED_BASE;
21440+
21441+#ifdef CONFIG_PAX_RANDMMAP
21442+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21443+ mm->mmap_base += mm->delta_mmap;
21444+#endif
21445+
21446+ mm->free_area_cache = mm->mmap_base;
21447 mm->cached_hole_size = ~0UL;
21448 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
21449 len, pgoff, flags);
21450@@ -386,6 +392,7 @@ fail:
21451 /*
21452 * Restore the topdown base:
21453 */
21454+ mm->mmap_base = base;
21455 mm->free_area_cache = base;
21456 mm->cached_hole_size = ~0UL;
21457
21458@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
21459 struct hstate *h = hstate_file(file);
21460 struct mm_struct *mm = current->mm;
21461 struct vm_area_struct *vma;
21462+ unsigned long pax_task_size = TASK_SIZE;
21463
21464 if (len & ~huge_page_mask(h))
21465 return -EINVAL;
21466- if (len > TASK_SIZE)
21467+
21468+#ifdef CONFIG_PAX_SEGMEXEC
21469+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21470+ pax_task_size = SEGMEXEC_TASK_SIZE;
21471+#endif
21472+
21473+ pax_task_size -= PAGE_SIZE;
21474+
21475+ if (len > pax_task_size)
21476 return -ENOMEM;
21477
21478 if (flags & MAP_FIXED) {
21479@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
21480 if (addr) {
21481 addr = ALIGN(addr, huge_page_size(h));
21482 vma = find_vma(mm, addr);
21483- if (TASK_SIZE - len >= addr &&
21484- (!vma || addr + len <= vma->vm_start))
21485+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
21486 return addr;
21487 }
21488 if (mm->get_unmapped_area == arch_get_unmapped_area)
21489diff -urNp linux-3.1.4/arch/x86/mm/init_32.c linux-3.1.4/arch/x86/mm/init_32.c
21490--- linux-3.1.4/arch/x86/mm/init_32.c 2011-11-11 15:19:27.000000000 -0500
21491+++ linux-3.1.4/arch/x86/mm/init_32.c 2011-11-16 18:39:07.000000000 -0500
21492@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
21493 }
21494
21495 /*
21496- * Creates a middle page table and puts a pointer to it in the
21497- * given global directory entry. This only returns the gd entry
21498- * in non-PAE compilation mode, since the middle layer is folded.
21499- */
21500-static pmd_t * __init one_md_table_init(pgd_t *pgd)
21501-{
21502- pud_t *pud;
21503- pmd_t *pmd_table;
21504-
21505-#ifdef CONFIG_X86_PAE
21506- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
21507- if (after_bootmem)
21508- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
21509- else
21510- pmd_table = (pmd_t *)alloc_low_page();
21511- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
21512- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
21513- pud = pud_offset(pgd, 0);
21514- BUG_ON(pmd_table != pmd_offset(pud, 0));
21515-
21516- return pmd_table;
21517- }
21518-#endif
21519- pud = pud_offset(pgd, 0);
21520- pmd_table = pmd_offset(pud, 0);
21521-
21522- return pmd_table;
21523-}
21524-
21525-/*
21526 * Create a page table and place a pointer to it in a middle page
21527 * directory entry:
21528 */
21529@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
21530 page_table = (pte_t *)alloc_low_page();
21531
21532 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
21533+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21534+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
21535+#else
21536 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
21537+#endif
21538 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
21539 }
21540
21541 return pte_offset_kernel(pmd, 0);
21542 }
21543
21544+static pmd_t * __init one_md_table_init(pgd_t *pgd)
21545+{
21546+ pud_t *pud;
21547+ pmd_t *pmd_table;
21548+
21549+ pud = pud_offset(pgd, 0);
21550+ pmd_table = pmd_offset(pud, 0);
21551+
21552+ return pmd_table;
21553+}
21554+
21555 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
21556 {
21557 int pgd_idx = pgd_index(vaddr);
21558@@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
21559 int pgd_idx, pmd_idx;
21560 unsigned long vaddr;
21561 pgd_t *pgd;
21562+ pud_t *pud;
21563 pmd_t *pmd;
21564 pte_t *pte = NULL;
21565
21566@@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
21567 pgd = pgd_base + pgd_idx;
21568
21569 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
21570- pmd = one_md_table_init(pgd);
21571- pmd = pmd + pmd_index(vaddr);
21572+ pud = pud_offset(pgd, vaddr);
21573+ pmd = pmd_offset(pud, vaddr);
21574+
21575+#ifdef CONFIG_X86_PAE
21576+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21577+#endif
21578+
21579 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
21580 pmd++, pmd_idx++) {
21581 pte = page_table_kmap_check(one_page_table_init(pmd),
21582@@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
21583 }
21584 }
21585
21586-static inline int is_kernel_text(unsigned long addr)
21587+static inline int is_kernel_text(unsigned long start, unsigned long end)
21588 {
21589- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
21590- return 1;
21591- return 0;
21592+ if ((start > ktla_ktva((unsigned long)_etext) ||
21593+ end <= ktla_ktva((unsigned long)_stext)) &&
21594+ (start > ktla_ktva((unsigned long)_einittext) ||
21595+ end <= ktla_ktva((unsigned long)_sinittext)) &&
21596+
21597+#ifdef CONFIG_ACPI_SLEEP
21598+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
21599+#endif
21600+
21601+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
21602+ return 0;
21603+ return 1;
21604 }
21605
21606 /*
21607@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
21608 unsigned long last_map_addr = end;
21609 unsigned long start_pfn, end_pfn;
21610 pgd_t *pgd_base = swapper_pg_dir;
21611- int pgd_idx, pmd_idx, pte_ofs;
21612+ unsigned int pgd_idx, pmd_idx, pte_ofs;
21613 unsigned long pfn;
21614 pgd_t *pgd;
21615+ pud_t *pud;
21616 pmd_t *pmd;
21617 pte_t *pte;
21618 unsigned pages_2m, pages_4k;
21619@@ -281,8 +282,13 @@ repeat:
21620 pfn = start_pfn;
21621 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21622 pgd = pgd_base + pgd_idx;
21623- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
21624- pmd = one_md_table_init(pgd);
21625+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
21626+ pud = pud_offset(pgd, 0);
21627+ pmd = pmd_offset(pud, 0);
21628+
21629+#ifdef CONFIG_X86_PAE
21630+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21631+#endif
21632
21633 if (pfn >= end_pfn)
21634 continue;
21635@@ -294,14 +300,13 @@ repeat:
21636 #endif
21637 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
21638 pmd++, pmd_idx++) {
21639- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
21640+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
21641
21642 /*
21643 * Map with big pages if possible, otherwise
21644 * create normal page tables:
21645 */
21646 if (use_pse) {
21647- unsigned int addr2;
21648 pgprot_t prot = PAGE_KERNEL_LARGE;
21649 /*
21650 * first pass will use the same initial
21651@@ -311,11 +316,7 @@ repeat:
21652 __pgprot(PTE_IDENT_ATTR |
21653 _PAGE_PSE);
21654
21655- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
21656- PAGE_OFFSET + PAGE_SIZE-1;
21657-
21658- if (is_kernel_text(addr) ||
21659- is_kernel_text(addr2))
21660+ if (is_kernel_text(address, address + PMD_SIZE))
21661 prot = PAGE_KERNEL_LARGE_EXEC;
21662
21663 pages_2m++;
21664@@ -332,7 +333,7 @@ repeat:
21665 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21666 pte += pte_ofs;
21667 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
21668- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
21669+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
21670 pgprot_t prot = PAGE_KERNEL;
21671 /*
21672 * first pass will use the same initial
21673@@ -340,7 +341,7 @@ repeat:
21674 */
21675 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
21676
21677- if (is_kernel_text(addr))
21678+ if (is_kernel_text(address, address + PAGE_SIZE))
21679 prot = PAGE_KERNEL_EXEC;
21680
21681 pages_4k++;
21682@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
21683
21684 pud = pud_offset(pgd, va);
21685 pmd = pmd_offset(pud, va);
21686- if (!pmd_present(*pmd))
21687+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
21688 break;
21689
21690 pte = pte_offset_kernel(pmd, va);
21691@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
21692
21693 static void __init pagetable_init(void)
21694 {
21695- pgd_t *pgd_base = swapper_pg_dir;
21696-
21697- permanent_kmaps_init(pgd_base);
21698+ permanent_kmaps_init(swapper_pg_dir);
21699 }
21700
21701-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21702+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21703 EXPORT_SYMBOL_GPL(__supported_pte_mask);
21704
21705 /* user-defined highmem size */
21706@@ -757,6 +756,12 @@ void __init mem_init(void)
21707
21708 pci_iommu_alloc();
21709
21710+#ifdef CONFIG_PAX_PER_CPU_PGD
21711+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21712+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21713+ KERNEL_PGD_PTRS);
21714+#endif
21715+
21716 #ifdef CONFIG_FLATMEM
21717 BUG_ON(!mem_map);
21718 #endif
21719@@ -774,7 +779,7 @@ void __init mem_init(void)
21720 set_highmem_pages_init();
21721
21722 codesize = (unsigned long) &_etext - (unsigned long) &_text;
21723- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
21724+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
21725 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
21726
21727 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
21728@@ -815,10 +820,10 @@ void __init mem_init(void)
21729 ((unsigned long)&__init_end -
21730 (unsigned long)&__init_begin) >> 10,
21731
21732- (unsigned long)&_etext, (unsigned long)&_edata,
21733- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
21734+ (unsigned long)&_sdata, (unsigned long)&_edata,
21735+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
21736
21737- (unsigned long)&_text, (unsigned long)&_etext,
21738+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
21739 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
21740
21741 /*
21742@@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
21743 if (!kernel_set_to_readonly)
21744 return;
21745
21746+ start = ktla_ktva(start);
21747 pr_debug("Set kernel text: %lx - %lx for read write\n",
21748 start, start+size);
21749
21750@@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
21751 if (!kernel_set_to_readonly)
21752 return;
21753
21754+ start = ktla_ktva(start);
21755 pr_debug("Set kernel text: %lx - %lx for read only\n",
21756 start, start+size);
21757
21758@@ -938,6 +945,7 @@ void mark_rodata_ro(void)
21759 unsigned long start = PFN_ALIGN(_text);
21760 unsigned long size = PFN_ALIGN(_etext) - start;
21761
21762+ start = ktla_ktva(start);
21763 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
21764 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
21765 size >> 10);
21766diff -urNp linux-3.1.4/arch/x86/mm/init_64.c linux-3.1.4/arch/x86/mm/init_64.c
21767--- linux-3.1.4/arch/x86/mm/init_64.c 2011-11-11 15:19:27.000000000 -0500
21768+++ linux-3.1.4/arch/x86/mm/init_64.c 2011-11-16 18:39:07.000000000 -0500
21769@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpa
21770 * around without checking the pgd every time.
21771 */
21772
21773-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
21774+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
21775 EXPORT_SYMBOL_GPL(__supported_pte_mask);
21776
21777 int force_personality32;
21778@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long star
21779
21780 for (address = start; address <= end; address += PGDIR_SIZE) {
21781 const pgd_t *pgd_ref = pgd_offset_k(address);
21782+
21783+#ifdef CONFIG_PAX_PER_CPU_PGD
21784+ unsigned long cpu;
21785+#else
21786 struct page *page;
21787+#endif
21788
21789 if (pgd_none(*pgd_ref))
21790 continue;
21791
21792 spin_lock(&pgd_lock);
21793+
21794+#ifdef CONFIG_PAX_PER_CPU_PGD
21795+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21796+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
21797+#else
21798 list_for_each_entry(page, &pgd_list, lru) {
21799 pgd_t *pgd;
21800 spinlock_t *pgt_lock;
21801@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long star
21802 /* the pgt_lock only for Xen */
21803 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
21804 spin_lock(pgt_lock);
21805+#endif
21806
21807 if (pgd_none(*pgd))
21808 set_pgd(pgd, *pgd_ref);
21809@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long star
21810 BUG_ON(pgd_page_vaddr(*pgd)
21811 != pgd_page_vaddr(*pgd_ref));
21812
21813+#ifndef CONFIG_PAX_PER_CPU_PGD
21814 spin_unlock(pgt_lock);
21815+#endif
21816+
21817 }
21818 spin_unlock(&pgd_lock);
21819 }
21820@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
21821 pmd = fill_pmd(pud, vaddr);
21822 pte = fill_pte(pmd, vaddr);
21823
21824+ pax_open_kernel();
21825 set_pte(pte, new_pte);
21826+ pax_close_kernel();
21827
21828 /*
21829 * It's enough to flush this one mapping.
21830@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(
21831 pgd = pgd_offset_k((unsigned long)__va(phys));
21832 if (pgd_none(*pgd)) {
21833 pud = (pud_t *) spp_getpage();
21834- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
21835- _PAGE_USER));
21836+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
21837 }
21838 pud = pud_offset(pgd, (unsigned long)__va(phys));
21839 if (pud_none(*pud)) {
21840 pmd = (pmd_t *) spp_getpage();
21841- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
21842- _PAGE_USER));
21843+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
21844 }
21845 pmd = pmd_offset(pud, phys);
21846 BUG_ON(!pmd_none(*pmd));
21847@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsign
21848 if (pfn >= pgt_buf_top)
21849 panic("alloc_low_page: ran out of memory");
21850
21851- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
21852+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
21853 clear_page(adr);
21854 *phys = pfn * PAGE_SIZE;
21855 return adr;
21856@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *vi
21857
21858 phys = __pa(virt);
21859 left = phys & (PAGE_SIZE - 1);
21860- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
21861+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
21862 adr = (void *)(((unsigned long)adr) | left);
21863
21864 return adr;
21865@@ -693,6 +707,12 @@ void __init mem_init(void)
21866
21867 pci_iommu_alloc();
21868
21869+#ifdef CONFIG_PAX_PER_CPU_PGD
21870+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21871+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21872+ KERNEL_PGD_PTRS);
21873+#endif
21874+
21875 /* clear_bss() already clear the empty_zero_page */
21876
21877 reservedpages = 0;
21878@@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
21879 static struct vm_area_struct gate_vma = {
21880 .vm_start = VSYSCALL_START,
21881 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
21882- .vm_page_prot = PAGE_READONLY_EXEC,
21883- .vm_flags = VM_READ | VM_EXEC
21884+ .vm_page_prot = PAGE_READONLY,
21885+ .vm_flags = VM_READ
21886 };
21887
21888 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
21889@@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long add
21890
21891 const char *arch_vma_name(struct vm_area_struct *vma)
21892 {
21893- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21894+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21895 return "[vdso]";
21896 if (vma == &gate_vma)
21897 return "[vsyscall]";
21898diff -urNp linux-3.1.4/arch/x86/mm/init.c linux-3.1.4/arch/x86/mm/init.c
21899--- linux-3.1.4/arch/x86/mm/init.c 2011-11-11 15:19:27.000000000 -0500
21900+++ linux-3.1.4/arch/x86/mm/init.c 2011-11-17 18:31:28.000000000 -0500
21901@@ -31,7 +31,7 @@ int direct_gbpages
21902 static void __init find_early_table_space(unsigned long end, int use_pse,
21903 int use_gbpages)
21904 {
21905- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
21906+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
21907 phys_addr_t base;
21908
21909 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
21910@@ -312,8 +312,29 @@ unsigned long __init_refok init_memory_m
21911 */
21912 int devmem_is_allowed(unsigned long pagenr)
21913 {
21914- if (pagenr <= 256)
21915+#ifdef CONFIG_GRKERNSEC_KMEM
21916+ /* allow BDA */
21917+ if (!pagenr)
21918+ return 1;
21919+ /* allow EBDA */
21920+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
21921+ return 1;
21922+#else
21923+ if (!pagenr)
21924+ return 1;
21925+#ifdef CONFIG_VM86
21926+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
21927+ return 1;
21928+#endif
21929+#endif
21930+
21931+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21932 return 1;
21933+#ifdef CONFIG_GRKERNSEC_KMEM
21934+ /* throw out everything else below 1MB */
21935+ if (pagenr <= 256)
21936+ return 0;
21937+#endif
21938 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
21939 return 0;
21940 if (!page_is_ram(pagenr))
21941@@ -372,6 +393,86 @@ void free_init_pages(char *what, unsigne
21942
21943 void free_initmem(void)
21944 {
21945+
21946+#ifdef CONFIG_PAX_KERNEXEC
21947+#ifdef CONFIG_X86_32
21948+ /* PaX: limit KERNEL_CS to actual size */
21949+ unsigned long addr, limit;
21950+ struct desc_struct d;
21951+ int cpu;
21952+
21953+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
21954+ limit = (limit - 1UL) >> PAGE_SHIFT;
21955+
21956+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
21957+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
21958+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
21959+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
21960+ }
21961+
21962+ /* PaX: make KERNEL_CS read-only */
21963+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
21964+ if (!paravirt_enabled())
21965+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
21966+/*
21967+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
21968+ pgd = pgd_offset_k(addr);
21969+ pud = pud_offset(pgd, addr);
21970+ pmd = pmd_offset(pud, addr);
21971+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21972+ }
21973+*/
21974+#ifdef CONFIG_X86_PAE
21975+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
21976+/*
21977+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
21978+ pgd = pgd_offset_k(addr);
21979+ pud = pud_offset(pgd, addr);
21980+ pmd = pmd_offset(pud, addr);
21981+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21982+ }
21983+*/
21984+#endif
21985+
21986+#ifdef CONFIG_MODULES
21987+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
21988+#endif
21989+
21990+#else
21991+ pgd_t *pgd;
21992+ pud_t *pud;
21993+ pmd_t *pmd;
21994+ unsigned long addr, end;
21995+
21996+ /* PaX: make kernel code/rodata read-only, rest non-executable */
21997+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
21998+ pgd = pgd_offset_k(addr);
21999+ pud = pud_offset(pgd, addr);
22000+ pmd = pmd_offset(pud, addr);
22001+ if (!pmd_present(*pmd))
22002+ continue;
22003+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22004+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22005+ else
22006+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22007+ }
22008+
22009+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22010+ end = addr + KERNEL_IMAGE_SIZE;
22011+ for (; addr < end; addr += PMD_SIZE) {
22012+ pgd = pgd_offset_k(addr);
22013+ pud = pud_offset(pgd, addr);
22014+ pmd = pmd_offset(pud, addr);
22015+ if (!pmd_present(*pmd))
22016+ continue;
22017+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22018+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22019+ }
22020+#endif
22021+
22022+ flush_tlb_all();
22023+#endif
22024+
22025 free_init_pages("unused kernel memory",
22026 (unsigned long)(&__init_begin),
22027 (unsigned long)(&__init_end));
22028diff -urNp linux-3.1.4/arch/x86/mm/iomap_32.c linux-3.1.4/arch/x86/mm/iomap_32.c
22029--- linux-3.1.4/arch/x86/mm/iomap_32.c 2011-11-11 15:19:27.000000000 -0500
22030+++ linux-3.1.4/arch/x86/mm/iomap_32.c 2011-11-16 18:39:07.000000000 -0500
22031@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
22032 type = kmap_atomic_idx_push();
22033 idx = type + KM_TYPE_NR * smp_processor_id();
22034 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22035+
22036+ pax_open_kernel();
22037 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
22038+ pax_close_kernel();
22039+
22040 arch_flush_lazy_mmu_mode();
22041
22042 return (void *)vaddr;
22043diff -urNp linux-3.1.4/arch/x86/mm/ioremap.c linux-3.1.4/arch/x86/mm/ioremap.c
22044--- linux-3.1.4/arch/x86/mm/ioremap.c 2011-11-11 15:19:27.000000000 -0500
22045+++ linux-3.1.4/arch/x86/mm/ioremap.c 2011-11-16 18:39:07.000000000 -0500
22046@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
22047 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
22048 int is_ram = page_is_ram(pfn);
22049
22050- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22051+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22052 return NULL;
22053 WARN_ON_ONCE(is_ram);
22054 }
22055@@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
22056 early_param("early_ioremap_debug", early_ioremap_debug_setup);
22057
22058 static __initdata int after_paging_init;
22059-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
22060+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
22061
22062 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
22063 {
22064@@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
22065 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
22066
22067 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
22068- memset(bm_pte, 0, sizeof(bm_pte));
22069- pmd_populate_kernel(&init_mm, pmd, bm_pte);
22070+ pmd_populate_user(&init_mm, pmd, bm_pte);
22071
22072 /*
22073 * The boot-ioremap range spans multiple pmds, for which
22074diff -urNp linux-3.1.4/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.1.4/arch/x86/mm/kmemcheck/kmemcheck.c
22075--- linux-3.1.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-11-11 15:19:27.000000000 -0500
22076+++ linux-3.1.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-11-16 18:39:07.000000000 -0500
22077@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
22078 * memory (e.g. tracked pages)? For now, we need this to avoid
22079 * invoking kmemcheck for PnP BIOS calls.
22080 */
22081- if (regs->flags & X86_VM_MASK)
22082+ if (v8086_mode(regs))
22083 return false;
22084- if (regs->cs != __KERNEL_CS)
22085+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
22086 return false;
22087
22088 pte = kmemcheck_pte_lookup(address);
22089diff -urNp linux-3.1.4/arch/x86/mm/mmap.c linux-3.1.4/arch/x86/mm/mmap.c
22090--- linux-3.1.4/arch/x86/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
22091+++ linux-3.1.4/arch/x86/mm/mmap.c 2011-11-16 18:39:07.000000000 -0500
22092@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
22093 * Leave an at least ~128 MB hole with possible stack randomization.
22094 */
22095 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
22096-#define MAX_GAP (TASK_SIZE/6*5)
22097+#define MAX_GAP (pax_task_size/6*5)
22098
22099 /*
22100 * True on X86_32 or when emulating IA32 on X86_64
22101@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
22102 return rnd << PAGE_SHIFT;
22103 }
22104
22105-static unsigned long mmap_base(void)
22106+static unsigned long mmap_base(struct mm_struct *mm)
22107 {
22108 unsigned long gap = rlimit(RLIMIT_STACK);
22109+ unsigned long pax_task_size = TASK_SIZE;
22110+
22111+#ifdef CONFIG_PAX_SEGMEXEC
22112+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22113+ pax_task_size = SEGMEXEC_TASK_SIZE;
22114+#endif
22115
22116 if (gap < MIN_GAP)
22117 gap = MIN_GAP;
22118 else if (gap > MAX_GAP)
22119 gap = MAX_GAP;
22120
22121- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
22122+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
22123 }
22124
22125 /*
22126 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
22127 * does, but not when emulating X86_32
22128 */
22129-static unsigned long mmap_legacy_base(void)
22130+static unsigned long mmap_legacy_base(struct mm_struct *mm)
22131 {
22132- if (mmap_is_ia32())
22133+ if (mmap_is_ia32()) {
22134+
22135+#ifdef CONFIG_PAX_SEGMEXEC
22136+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22137+ return SEGMEXEC_TASK_UNMAPPED_BASE;
22138+ else
22139+#endif
22140+
22141 return TASK_UNMAPPED_BASE;
22142- else
22143+ } else
22144 return TASK_UNMAPPED_BASE + mmap_rnd();
22145 }
22146
22147@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
22148 void arch_pick_mmap_layout(struct mm_struct *mm)
22149 {
22150 if (mmap_is_legacy()) {
22151- mm->mmap_base = mmap_legacy_base();
22152+ mm->mmap_base = mmap_legacy_base(mm);
22153+
22154+#ifdef CONFIG_PAX_RANDMMAP
22155+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22156+ mm->mmap_base += mm->delta_mmap;
22157+#endif
22158+
22159 mm->get_unmapped_area = arch_get_unmapped_area;
22160 mm->unmap_area = arch_unmap_area;
22161 } else {
22162- mm->mmap_base = mmap_base();
22163+ mm->mmap_base = mmap_base(mm);
22164+
22165+#ifdef CONFIG_PAX_RANDMMAP
22166+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22167+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
22168+#endif
22169+
22170 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
22171 mm->unmap_area = arch_unmap_area_topdown;
22172 }
22173diff -urNp linux-3.1.4/arch/x86/mm/mmio-mod.c linux-3.1.4/arch/x86/mm/mmio-mod.c
22174--- linux-3.1.4/arch/x86/mm/mmio-mod.c 2011-11-11 15:19:27.000000000 -0500
22175+++ linux-3.1.4/arch/x86/mm/mmio-mod.c 2011-11-16 18:39:07.000000000 -0500
22176@@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
22177 break;
22178 default:
22179 {
22180- unsigned char *ip = (unsigned char *)instptr;
22181+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
22182 my_trace->opcode = MMIO_UNKNOWN_OP;
22183 my_trace->width = 0;
22184 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
22185@@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
22186 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
22187 void __iomem *addr)
22188 {
22189- static atomic_t next_id;
22190+ static atomic_unchecked_t next_id;
22191 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
22192 /* These are page-unaligned. */
22193 struct mmiotrace_map map = {
22194@@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
22195 .private = trace
22196 },
22197 .phys = offset,
22198- .id = atomic_inc_return(&next_id)
22199+ .id = atomic_inc_return_unchecked(&next_id)
22200 };
22201 map.map_id = trace->id;
22202
22203diff -urNp linux-3.1.4/arch/x86/mm/pageattr.c linux-3.1.4/arch/x86/mm/pageattr.c
22204--- linux-3.1.4/arch/x86/mm/pageattr.c 2011-11-11 15:19:27.000000000 -0500
22205+++ linux-3.1.4/arch/x86/mm/pageattr.c 2011-11-16 18:39:07.000000000 -0500
22206@@ -261,7 +261,7 @@ static inline pgprot_t static_protection
22207 */
22208 #ifdef CONFIG_PCI_BIOS
22209 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
22210- pgprot_val(forbidden) |= _PAGE_NX;
22211+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22212 #endif
22213
22214 /*
22215@@ -269,9 +269,10 @@ static inline pgprot_t static_protection
22216 * Does not cover __inittext since that is gone later on. On
22217 * 64bit we do not enforce !NX on the low mapping
22218 */
22219- if (within(address, (unsigned long)_text, (unsigned long)_etext))
22220- pgprot_val(forbidden) |= _PAGE_NX;
22221+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
22222+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22223
22224+#ifdef CONFIG_DEBUG_RODATA
22225 /*
22226 * The .rodata section needs to be read-only. Using the pfn
22227 * catches all aliases.
22228@@ -279,6 +280,7 @@ static inline pgprot_t static_protection
22229 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
22230 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
22231 pgprot_val(forbidden) |= _PAGE_RW;
22232+#endif
22233
22234 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
22235 /*
22236@@ -317,6 +319,13 @@ static inline pgprot_t static_protection
22237 }
22238 #endif
22239
22240+#ifdef CONFIG_PAX_KERNEXEC
22241+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
22242+ pgprot_val(forbidden) |= _PAGE_RW;
22243+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22244+ }
22245+#endif
22246+
22247 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
22248
22249 return prot;
22250@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
22251 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
22252 {
22253 /* change init_mm */
22254+ pax_open_kernel();
22255 set_pte_atomic(kpte, pte);
22256+
22257 #ifdef CONFIG_X86_32
22258 if (!SHARED_KERNEL_PMD) {
22259+
22260+#ifdef CONFIG_PAX_PER_CPU_PGD
22261+ unsigned long cpu;
22262+#else
22263 struct page *page;
22264+#endif
22265
22266+#ifdef CONFIG_PAX_PER_CPU_PGD
22267+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22268+ pgd_t *pgd = get_cpu_pgd(cpu);
22269+#else
22270 list_for_each_entry(page, &pgd_list, lru) {
22271- pgd_t *pgd;
22272+ pgd_t *pgd = (pgd_t *)page_address(page);
22273+#endif
22274+
22275 pud_t *pud;
22276 pmd_t *pmd;
22277
22278- pgd = (pgd_t *)page_address(page) + pgd_index(address);
22279+ pgd += pgd_index(address);
22280 pud = pud_offset(pgd, address);
22281 pmd = pmd_offset(pud, address);
22282 set_pte_atomic((pte_t *)pmd, pte);
22283 }
22284 }
22285 #endif
22286+ pax_close_kernel();
22287 }
22288
22289 static int
22290diff -urNp linux-3.1.4/arch/x86/mm/pageattr-test.c linux-3.1.4/arch/x86/mm/pageattr-test.c
22291--- linux-3.1.4/arch/x86/mm/pageattr-test.c 2011-11-11 15:19:27.000000000 -0500
22292+++ linux-3.1.4/arch/x86/mm/pageattr-test.c 2011-11-16 18:39:07.000000000 -0500
22293@@ -36,7 +36,7 @@ enum {
22294
22295 static int pte_testbit(pte_t pte)
22296 {
22297- return pte_flags(pte) & _PAGE_UNUSED1;
22298+ return pte_flags(pte) & _PAGE_CPA_TEST;
22299 }
22300
22301 struct split_state {
22302diff -urNp linux-3.1.4/arch/x86/mm/pat.c linux-3.1.4/arch/x86/mm/pat.c
22303--- linux-3.1.4/arch/x86/mm/pat.c 2011-11-11 15:19:27.000000000 -0500
22304+++ linux-3.1.4/arch/x86/mm/pat.c 2011-11-16 18:39:07.000000000 -0500
22305@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
22306
22307 if (!entry) {
22308 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
22309- current->comm, current->pid, start, end);
22310+ current->comm, task_pid_nr(current), start, end);
22311 return -EINVAL;
22312 }
22313
22314@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
22315 while (cursor < to) {
22316 if (!devmem_is_allowed(pfn)) {
22317 printk(KERN_INFO
22318- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
22319- current->comm, from, to);
22320+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
22321+ current->comm, from, to, cursor);
22322 return 0;
22323 }
22324 cursor += PAGE_SIZE;
22325@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
22326 printk(KERN_INFO
22327 "%s:%d ioremap_change_attr failed %s "
22328 "for %Lx-%Lx\n",
22329- current->comm, current->pid,
22330+ current->comm, task_pid_nr(current),
22331 cattr_name(flags),
22332 base, (unsigned long long)(base + size));
22333 return -EINVAL;
22334@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
22335 if (want_flags != flags) {
22336 printk(KERN_WARNING
22337 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
22338- current->comm, current->pid,
22339+ current->comm, task_pid_nr(current),
22340 cattr_name(want_flags),
22341 (unsigned long long)paddr,
22342 (unsigned long long)(paddr + size),
22343@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
22344 free_memtype(paddr, paddr + size);
22345 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
22346 " for %Lx-%Lx, got %s\n",
22347- current->comm, current->pid,
22348+ current->comm, task_pid_nr(current),
22349 cattr_name(want_flags),
22350 (unsigned long long)paddr,
22351 (unsigned long long)(paddr + size),
22352diff -urNp linux-3.1.4/arch/x86/mm/pf_in.c linux-3.1.4/arch/x86/mm/pf_in.c
22353--- linux-3.1.4/arch/x86/mm/pf_in.c 2011-11-11 15:19:27.000000000 -0500
22354+++ linux-3.1.4/arch/x86/mm/pf_in.c 2011-11-16 18:39:07.000000000 -0500
22355@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
22356 int i;
22357 enum reason_type rv = OTHERS;
22358
22359- p = (unsigned char *)ins_addr;
22360+ p = (unsigned char *)ktla_ktva(ins_addr);
22361 p += skip_prefix(p, &prf);
22362 p += get_opcode(p, &opcode);
22363
22364@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
22365 struct prefix_bits prf;
22366 int i;
22367
22368- p = (unsigned char *)ins_addr;
22369+ p = (unsigned char *)ktla_ktva(ins_addr);
22370 p += skip_prefix(p, &prf);
22371 p += get_opcode(p, &opcode);
22372
22373@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
22374 struct prefix_bits prf;
22375 int i;
22376
22377- p = (unsigned char *)ins_addr;
22378+ p = (unsigned char *)ktla_ktva(ins_addr);
22379 p += skip_prefix(p, &prf);
22380 p += get_opcode(p, &opcode);
22381
22382@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
22383 struct prefix_bits prf;
22384 int i;
22385
22386- p = (unsigned char *)ins_addr;
22387+ p = (unsigned char *)ktla_ktva(ins_addr);
22388 p += skip_prefix(p, &prf);
22389 p += get_opcode(p, &opcode);
22390 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
22391@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
22392 struct prefix_bits prf;
22393 int i;
22394
22395- p = (unsigned char *)ins_addr;
22396+ p = (unsigned char *)ktla_ktva(ins_addr);
22397 p += skip_prefix(p, &prf);
22398 p += get_opcode(p, &opcode);
22399 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
22400diff -urNp linux-3.1.4/arch/x86/mm/pgtable_32.c linux-3.1.4/arch/x86/mm/pgtable_32.c
22401--- linux-3.1.4/arch/x86/mm/pgtable_32.c 2011-11-11 15:19:27.000000000 -0500
22402+++ linux-3.1.4/arch/x86/mm/pgtable_32.c 2011-11-16 18:39:07.000000000 -0500
22403@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
22404 return;
22405 }
22406 pte = pte_offset_kernel(pmd, vaddr);
22407+
22408+ pax_open_kernel();
22409 if (pte_val(pteval))
22410 set_pte_at(&init_mm, vaddr, pte, pteval);
22411 else
22412 pte_clear(&init_mm, vaddr, pte);
22413+ pax_close_kernel();
22414
22415 /*
22416 * It's enough to flush this one mapping.
22417diff -urNp linux-3.1.4/arch/x86/mm/pgtable.c linux-3.1.4/arch/x86/mm/pgtable.c
22418--- linux-3.1.4/arch/x86/mm/pgtable.c 2011-11-11 15:19:27.000000000 -0500
22419+++ linux-3.1.4/arch/x86/mm/pgtable.c 2011-11-16 18:39:07.000000000 -0500
22420@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
22421 list_del(&page->lru);
22422 }
22423
22424-#define UNSHARED_PTRS_PER_PGD \
22425- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22426+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22427+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
22428
22429+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22430+{
22431+ while (count--)
22432+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
22433+}
22434+#endif
22435+
22436+#ifdef CONFIG_PAX_PER_CPU_PGD
22437+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22438+{
22439+ while (count--)
22440+
22441+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22442+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
22443+#else
22444+ *dst++ = *src++;
22445+#endif
22446
22447+}
22448+#endif
22449+
22450+#ifdef CONFIG_X86_64
22451+#define pxd_t pud_t
22452+#define pyd_t pgd_t
22453+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
22454+#define pxd_free(mm, pud) pud_free((mm), (pud))
22455+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
22456+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
22457+#define PYD_SIZE PGDIR_SIZE
22458+#else
22459+#define pxd_t pmd_t
22460+#define pyd_t pud_t
22461+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
22462+#define pxd_free(mm, pud) pmd_free((mm), (pud))
22463+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
22464+#define pyd_offset(mm ,address) pud_offset((mm), (address))
22465+#define PYD_SIZE PUD_SIZE
22466+#endif
22467+
22468+#ifdef CONFIG_PAX_PER_CPU_PGD
22469+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
22470+static inline void pgd_dtor(pgd_t *pgd) {}
22471+#else
22472 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
22473 {
22474 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
22475@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
22476 pgd_list_del(pgd);
22477 spin_unlock(&pgd_lock);
22478 }
22479+#endif
22480
22481 /*
22482 * List of all pgd's needed for non-PAE so it can invalidate entries
22483@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
22484 * -- wli
22485 */
22486
22487-#ifdef CONFIG_X86_PAE
22488+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22489 /*
22490 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
22491 * updating the top-level pagetable entries to guarantee the
22492@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
22493 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
22494 * and initialize the kernel pmds here.
22495 */
22496-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
22497+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22498
22499 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
22500 {
22501@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
22502 */
22503 flush_tlb_mm(mm);
22504 }
22505+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
22506+#define PREALLOCATED_PXDS USER_PGD_PTRS
22507 #else /* !CONFIG_X86_PAE */
22508
22509 /* No need to prepopulate any pagetable entries in non-PAE modes. */
22510-#define PREALLOCATED_PMDS 0
22511+#define PREALLOCATED_PXDS 0
22512
22513 #endif /* CONFIG_X86_PAE */
22514
22515-static void free_pmds(pmd_t *pmds[])
22516+static void free_pxds(pxd_t *pxds[])
22517 {
22518 int i;
22519
22520- for(i = 0; i < PREALLOCATED_PMDS; i++)
22521- if (pmds[i])
22522- free_page((unsigned long)pmds[i]);
22523+ for(i = 0; i < PREALLOCATED_PXDS; i++)
22524+ if (pxds[i])
22525+ free_page((unsigned long)pxds[i]);
22526 }
22527
22528-static int preallocate_pmds(pmd_t *pmds[])
22529+static int preallocate_pxds(pxd_t *pxds[])
22530 {
22531 int i;
22532 bool failed = false;
22533
22534- for(i = 0; i < PREALLOCATED_PMDS; i++) {
22535- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
22536- if (pmd == NULL)
22537+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
22538+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
22539+ if (pxd == NULL)
22540 failed = true;
22541- pmds[i] = pmd;
22542+ pxds[i] = pxd;
22543 }
22544
22545 if (failed) {
22546- free_pmds(pmds);
22547+ free_pxds(pxds);
22548 return -ENOMEM;
22549 }
22550
22551@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
22552 * preallocate which never got a corresponding vma will need to be
22553 * freed manually.
22554 */
22555-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
22556+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
22557 {
22558 int i;
22559
22560- for(i = 0; i < PREALLOCATED_PMDS; i++) {
22561+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
22562 pgd_t pgd = pgdp[i];
22563
22564 if (pgd_val(pgd) != 0) {
22565- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
22566+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
22567
22568- pgdp[i] = native_make_pgd(0);
22569+ set_pgd(pgdp + i, native_make_pgd(0));
22570
22571- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
22572- pmd_free(mm, pmd);
22573+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
22574+ pxd_free(mm, pxd);
22575 }
22576 }
22577 }
22578
22579-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
22580+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
22581 {
22582- pud_t *pud;
22583+ pyd_t *pyd;
22584 unsigned long addr;
22585 int i;
22586
22587- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
22588+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
22589 return;
22590
22591- pud = pud_offset(pgd, 0);
22592+#ifdef CONFIG_X86_64
22593+ pyd = pyd_offset(mm, 0L);
22594+#else
22595+ pyd = pyd_offset(pgd, 0L);
22596+#endif
22597
22598- for (addr = i = 0; i < PREALLOCATED_PMDS;
22599- i++, pud++, addr += PUD_SIZE) {
22600- pmd_t *pmd = pmds[i];
22601+ for (addr = i = 0; i < PREALLOCATED_PXDS;
22602+ i++, pyd++, addr += PYD_SIZE) {
22603+ pxd_t *pxd = pxds[i];
22604
22605 if (i >= KERNEL_PGD_BOUNDARY)
22606- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22607- sizeof(pmd_t) * PTRS_PER_PMD);
22608+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22609+ sizeof(pxd_t) * PTRS_PER_PMD);
22610
22611- pud_populate(mm, pud, pmd);
22612+ pyd_populate(mm, pyd, pxd);
22613 }
22614 }
22615
22616 pgd_t *pgd_alloc(struct mm_struct *mm)
22617 {
22618 pgd_t *pgd;
22619- pmd_t *pmds[PREALLOCATED_PMDS];
22620+ pxd_t *pxds[PREALLOCATED_PXDS];
22621
22622 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
22623
22624@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22625
22626 mm->pgd = pgd;
22627
22628- if (preallocate_pmds(pmds) != 0)
22629+ if (preallocate_pxds(pxds) != 0)
22630 goto out_free_pgd;
22631
22632 if (paravirt_pgd_alloc(mm) != 0)
22633- goto out_free_pmds;
22634+ goto out_free_pxds;
22635
22636 /*
22637 * Make sure that pre-populating the pmds is atomic with
22638@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22639 spin_lock(&pgd_lock);
22640
22641 pgd_ctor(mm, pgd);
22642- pgd_prepopulate_pmd(mm, pgd, pmds);
22643+ pgd_prepopulate_pxd(mm, pgd, pxds);
22644
22645 spin_unlock(&pgd_lock);
22646
22647 return pgd;
22648
22649-out_free_pmds:
22650- free_pmds(pmds);
22651+out_free_pxds:
22652+ free_pxds(pxds);
22653 out_free_pgd:
22654 free_page((unsigned long)pgd);
22655 out:
22656@@ -295,7 +344,7 @@ out:
22657
22658 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
22659 {
22660- pgd_mop_up_pmds(mm, pgd);
22661+ pgd_mop_up_pxds(mm, pgd);
22662 pgd_dtor(pgd);
22663 paravirt_pgd_free(mm, pgd);
22664 free_page((unsigned long)pgd);
22665diff -urNp linux-3.1.4/arch/x86/mm/setup_nx.c linux-3.1.4/arch/x86/mm/setup_nx.c
22666--- linux-3.1.4/arch/x86/mm/setup_nx.c 2011-11-11 15:19:27.000000000 -0500
22667+++ linux-3.1.4/arch/x86/mm/setup_nx.c 2011-11-16 18:39:07.000000000 -0500
22668@@ -5,8 +5,10 @@
22669 #include <asm/pgtable.h>
22670 #include <asm/proto.h>
22671
22672+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22673 static int disable_nx __cpuinitdata;
22674
22675+#ifndef CONFIG_PAX_PAGEEXEC
22676 /*
22677 * noexec = on|off
22678 *
22679@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
22680 return 0;
22681 }
22682 early_param("noexec", noexec_setup);
22683+#endif
22684+
22685+#endif
22686
22687 void __cpuinit x86_configure_nx(void)
22688 {
22689+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22690 if (cpu_has_nx && !disable_nx)
22691 __supported_pte_mask |= _PAGE_NX;
22692 else
22693+#endif
22694 __supported_pte_mask &= ~_PAGE_NX;
22695 }
22696
22697diff -urNp linux-3.1.4/arch/x86/mm/tlb.c linux-3.1.4/arch/x86/mm/tlb.c
22698--- linux-3.1.4/arch/x86/mm/tlb.c 2011-11-11 15:19:27.000000000 -0500
22699+++ linux-3.1.4/arch/x86/mm/tlb.c 2011-11-16 18:39:07.000000000 -0500
22700@@ -65,7 +65,11 @@ void leave_mm(int cpu)
22701 BUG();
22702 cpumask_clear_cpu(cpu,
22703 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
22704+
22705+#ifndef CONFIG_PAX_PER_CPU_PGD
22706 load_cr3(swapper_pg_dir);
22707+#endif
22708+
22709 }
22710 EXPORT_SYMBOL_GPL(leave_mm);
22711
22712diff -urNp linux-3.1.4/arch/x86/net/bpf_jit_comp.c linux-3.1.4/arch/x86/net/bpf_jit_comp.c
22713--- linux-3.1.4/arch/x86/net/bpf_jit_comp.c 2011-11-11 15:19:27.000000000 -0500
22714+++ linux-3.1.4/arch/x86/net/bpf_jit_comp.c 2011-11-20 19:21:53.000000000 -0500
22715@@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void
22716 set_fs(old_fs);
22717 }
22718
22719+struct bpf_jit_work {
22720+ struct work_struct work;
22721+ void *image;
22722+};
22723
22724 void bpf_jit_compile(struct sk_filter *fp)
22725 {
22726@@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *f
22727 if (addrs == NULL)
22728 return;
22729
22730+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
22731+ if (!fp->work)
22732+ goto out;
22733+
22734 /* Before first pass, make a rough estimation of addrs[]
22735 * each bpf instruction is translated to less than 64 bytes
22736 */
22737@@ -585,11 +593,12 @@ cond_branch: f_offset = addrs[i + filt
22738 if (image) {
22739 if (unlikely(proglen + ilen > oldproglen)) {
22740 pr_err("bpb_jit_compile fatal error\n");
22741- kfree(addrs);
22742- module_free(NULL, image);
22743- return;
22744+ module_free_exec(NULL, image);
22745+ goto out;
22746 }
22747+ pax_open_kernel();
22748 memcpy(image + proglen, temp, ilen);
22749+ pax_close_kernel();
22750 }
22751 proglen += ilen;
22752 addrs[i] = proglen;
22753@@ -609,7 +618,7 @@ cond_branch: f_offset = addrs[i + filt
22754 break;
22755 }
22756 if (proglen == oldproglen) {
22757- image = module_alloc(max_t(unsigned int,
22758+ image = module_alloc_exec(max_t(unsigned int,
22759 proglen,
22760 sizeof(struct work_struct)));
22761 if (!image)
22762@@ -631,24 +640,27 @@ cond_branch: f_offset = addrs[i + filt
22763 fp->bpf_func = (void *)image;
22764 }
22765 out:
22766+ kfree(fp->work);
22767 kfree(addrs);
22768 return;
22769 }
22770
22771 static void jit_free_defer(struct work_struct *arg)
22772 {
22773- module_free(NULL, arg);
22774+ module_free_exec(NULL, ((struct bpf_jit_work*)arg)->image);
22775+ kfree(arg);
22776 }
22777
22778 /* run from softirq, we must use a work_struct to call
22779- * module_free() from process context
22780+ * module_free_exec() from process context
22781 */
22782 void bpf_jit_free(struct sk_filter *fp)
22783 {
22784 if (fp->bpf_func != sk_run_filter) {
22785- struct work_struct *work = (struct work_struct *)fp->bpf_func;
22786+ struct work_struct *work = &fp->work->work;
22787
22788 INIT_WORK(work, jit_free_defer);
22789+ fp->work->image = fp->bpf_func;
22790 schedule_work(work);
22791 }
22792 }
22793diff -urNp linux-3.1.4/arch/x86/net/bpf_jit.S linux-3.1.4/arch/x86/net/bpf_jit.S
22794--- linux-3.1.4/arch/x86/net/bpf_jit.S 2011-11-11 15:19:27.000000000 -0500
22795+++ linux-3.1.4/arch/x86/net/bpf_jit.S 2011-11-16 18:39:07.000000000 -0500
22796@@ -9,6 +9,7 @@
22797 */
22798 #include <linux/linkage.h>
22799 #include <asm/dwarf2.h>
22800+#include <asm/alternative-asm.h>
22801
22802 /*
22803 * Calling convention :
22804@@ -35,6 +36,7 @@ sk_load_word:
22805 jle bpf_slow_path_word
22806 mov (SKBDATA,%rsi),%eax
22807 bswap %eax /* ntohl() */
22808+ pax_force_retaddr
22809 ret
22810
22811
22812@@ -53,6 +55,7 @@ sk_load_half:
22813 jle bpf_slow_path_half
22814 movzwl (SKBDATA,%rsi),%eax
22815 rol $8,%ax # ntohs()
22816+ pax_force_retaddr
22817 ret
22818
22819 sk_load_byte_ind:
22820@@ -66,6 +69,7 @@ sk_load_byte:
22821 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
22822 jle bpf_slow_path_byte
22823 movzbl (SKBDATA,%rsi),%eax
22824+ pax_force_retaddr
22825 ret
22826
22827 /**
22828@@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
22829 movzbl (SKBDATA,%rsi),%ebx
22830 and $15,%bl
22831 shl $2,%bl
22832+ pax_force_retaddr
22833 ret
22834 CFI_ENDPROC
22835 ENDPROC(sk_load_byte_msh)
22836@@ -91,6 +96,7 @@ bpf_error:
22837 xor %eax,%eax
22838 mov -8(%rbp),%rbx
22839 leaveq
22840+ pax_force_retaddr
22841 ret
22842
22843 /* rsi contains offset and can be scratched */
22844@@ -113,6 +119,7 @@ bpf_slow_path_word:
22845 js bpf_error
22846 mov -12(%rbp),%eax
22847 bswap %eax
22848+ pax_force_retaddr
22849 ret
22850
22851 bpf_slow_path_half:
22852@@ -121,12 +128,14 @@ bpf_slow_path_half:
22853 mov -12(%rbp),%ax
22854 rol $8,%ax
22855 movzwl %ax,%eax
22856+ pax_force_retaddr
22857 ret
22858
22859 bpf_slow_path_byte:
22860 bpf_slow_path_common(1)
22861 js bpf_error
22862 movzbl -12(%rbp),%eax
22863+ pax_force_retaddr
22864 ret
22865
22866 bpf_slow_path_byte_msh:
22867@@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
22868 and $15,%al
22869 shl $2,%al
22870 xchg %eax,%ebx
22871+ pax_force_retaddr
22872 ret
22873diff -urNp linux-3.1.4/arch/x86/oprofile/backtrace.c linux-3.1.4/arch/x86/oprofile/backtrace.c
22874--- linux-3.1.4/arch/x86/oprofile/backtrace.c 2011-11-11 15:19:27.000000000 -0500
22875+++ linux-3.1.4/arch/x86/oprofile/backtrace.c 2011-11-16 18:39:07.000000000 -0500
22876@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_fram
22877 struct stack_frame_ia32 *fp;
22878 unsigned long bytes;
22879
22880- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
22881+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
22882 if (bytes != sizeof(bufhead))
22883 return NULL;
22884
22885- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
22886+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
22887
22888 oprofile_add_trace(bufhead[0].return_address);
22889
22890@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_bac
22891 struct stack_frame bufhead[2];
22892 unsigned long bytes;
22893
22894- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
22895+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
22896 if (bytes != sizeof(bufhead))
22897 return NULL;
22898
22899@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const reg
22900 {
22901 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
22902
22903- if (!user_mode_vm(regs)) {
22904+ if (!user_mode(regs)) {
22905 unsigned long stack = kernel_stack_pointer(regs);
22906 if (depth)
22907 dump_trace(NULL, regs, (unsigned long *)stack, 0,
22908diff -urNp linux-3.1.4/arch/x86/pci/mrst.c linux-3.1.4/arch/x86/pci/mrst.c
22909--- linux-3.1.4/arch/x86/pci/mrst.c 2011-11-11 15:19:27.000000000 -0500
22910+++ linux-3.1.4/arch/x86/pci/mrst.c 2011-11-16 18:39:07.000000000 -0500
22911@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
22912 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
22913 pci_mmcfg_late_init();
22914 pcibios_enable_irq = mrst_pci_irq_enable;
22915- pci_root_ops = pci_mrst_ops;
22916+ pax_open_kernel();
22917+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
22918+ pax_close_kernel();
22919 /* Continue with standard init */
22920 return 1;
22921 }
22922diff -urNp linux-3.1.4/arch/x86/pci/pcbios.c linux-3.1.4/arch/x86/pci/pcbios.c
22923--- linux-3.1.4/arch/x86/pci/pcbios.c 2011-11-11 15:19:27.000000000 -0500
22924+++ linux-3.1.4/arch/x86/pci/pcbios.c 2011-11-16 18:39:07.000000000 -0500
22925@@ -79,50 +79,93 @@ union bios32 {
22926 static struct {
22927 unsigned long address;
22928 unsigned short segment;
22929-} bios32_indirect = { 0, __KERNEL_CS };
22930+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
22931
22932 /*
22933 * Returns the entry point for the given service, NULL on error
22934 */
22935
22936-static unsigned long bios32_service(unsigned long service)
22937+static unsigned long __devinit bios32_service(unsigned long service)
22938 {
22939 unsigned char return_code; /* %al */
22940 unsigned long address; /* %ebx */
22941 unsigned long length; /* %ecx */
22942 unsigned long entry; /* %edx */
22943 unsigned long flags;
22944+ struct desc_struct d, *gdt;
22945
22946 local_irq_save(flags);
22947- __asm__("lcall *(%%edi); cld"
22948+
22949+ gdt = get_cpu_gdt_table(smp_processor_id());
22950+
22951+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
22952+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22953+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
22954+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22955+
22956+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
22957 : "=a" (return_code),
22958 "=b" (address),
22959 "=c" (length),
22960 "=d" (entry)
22961 : "0" (service),
22962 "1" (0),
22963- "D" (&bios32_indirect));
22964+ "D" (&bios32_indirect),
22965+ "r"(__PCIBIOS_DS)
22966+ : "memory");
22967+
22968+ pax_open_kernel();
22969+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
22970+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
22971+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
22972+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
22973+ pax_close_kernel();
22974+
22975 local_irq_restore(flags);
22976
22977 switch (return_code) {
22978- case 0:
22979- return address + entry;
22980- case 0x80: /* Not present */
22981- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22982- return 0;
22983- default: /* Shouldn't happen */
22984- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22985- service, return_code);
22986+ case 0: {
22987+ int cpu;
22988+ unsigned char flags;
22989+
22990+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
22991+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
22992+ printk(KERN_WARNING "bios32_service: not valid\n");
22993 return 0;
22994+ }
22995+ address = address + PAGE_OFFSET;
22996+ length += 16UL; /* some BIOSs underreport this... */
22997+ flags = 4;
22998+ if (length >= 64*1024*1024) {
22999+ length >>= PAGE_SHIFT;
23000+ flags |= 8;
23001+ }
23002+
23003+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
23004+ gdt = get_cpu_gdt_table(cpu);
23005+ pack_descriptor(&d, address, length, 0x9b, flags);
23006+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23007+ pack_descriptor(&d, address, length, 0x93, flags);
23008+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23009+ }
23010+ return entry;
23011+ }
23012+ case 0x80: /* Not present */
23013+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23014+ return 0;
23015+ default: /* Shouldn't happen */
23016+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23017+ service, return_code);
23018+ return 0;
23019 }
23020 }
23021
23022 static struct {
23023 unsigned long address;
23024 unsigned short segment;
23025-} pci_indirect = { 0, __KERNEL_CS };
23026+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
23027
23028-static int pci_bios_present;
23029+static int pci_bios_present __read_only;
23030
23031 static int __devinit check_pcibios(void)
23032 {
23033@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
23034 unsigned long flags, pcibios_entry;
23035
23036 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
23037- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
23038+ pci_indirect.address = pcibios_entry;
23039
23040 local_irq_save(flags);
23041- __asm__(
23042- "lcall *(%%edi); cld\n\t"
23043+ __asm__("movw %w6, %%ds\n\t"
23044+ "lcall *%%ss:(%%edi); cld\n\t"
23045+ "push %%ss\n\t"
23046+ "pop %%ds\n\t"
23047 "jc 1f\n\t"
23048 "xor %%ah, %%ah\n"
23049 "1:"
23050@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
23051 "=b" (ebx),
23052 "=c" (ecx)
23053 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
23054- "D" (&pci_indirect)
23055+ "D" (&pci_indirect),
23056+ "r" (__PCIBIOS_DS)
23057 : "memory");
23058 local_irq_restore(flags);
23059
23060@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int se
23061
23062 switch (len) {
23063 case 1:
23064- __asm__("lcall *(%%esi); cld\n\t"
23065+ __asm__("movw %w6, %%ds\n\t"
23066+ "lcall *%%ss:(%%esi); cld\n\t"
23067+ "push %%ss\n\t"
23068+ "pop %%ds\n\t"
23069 "jc 1f\n\t"
23070 "xor %%ah, %%ah\n"
23071 "1:"
23072@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int se
23073 : "1" (PCIBIOS_READ_CONFIG_BYTE),
23074 "b" (bx),
23075 "D" ((long)reg),
23076- "S" (&pci_indirect));
23077+ "S" (&pci_indirect),
23078+ "r" (__PCIBIOS_DS));
23079 /*
23080 * Zero-extend the result beyond 8 bits, do not trust the
23081 * BIOS having done it:
23082@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int se
23083 *value &= 0xff;
23084 break;
23085 case 2:
23086- __asm__("lcall *(%%esi); cld\n\t"
23087+ __asm__("movw %w6, %%ds\n\t"
23088+ "lcall *%%ss:(%%esi); cld\n\t"
23089+ "push %%ss\n\t"
23090+ "pop %%ds\n\t"
23091 "jc 1f\n\t"
23092 "xor %%ah, %%ah\n"
23093 "1:"
23094@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int se
23095 : "1" (PCIBIOS_READ_CONFIG_WORD),
23096 "b" (bx),
23097 "D" ((long)reg),
23098- "S" (&pci_indirect));
23099+ "S" (&pci_indirect),
23100+ "r" (__PCIBIOS_DS));
23101 /*
23102 * Zero-extend the result beyond 16 bits, do not trust the
23103 * BIOS having done it:
23104@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int se
23105 *value &= 0xffff;
23106 break;
23107 case 4:
23108- __asm__("lcall *(%%esi); cld\n\t"
23109+ __asm__("movw %w6, %%ds\n\t"
23110+ "lcall *%%ss:(%%esi); cld\n\t"
23111+ "push %%ss\n\t"
23112+ "pop %%ds\n\t"
23113 "jc 1f\n\t"
23114 "xor %%ah, %%ah\n"
23115 "1:"
23116@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int se
23117 : "1" (PCIBIOS_READ_CONFIG_DWORD),
23118 "b" (bx),
23119 "D" ((long)reg),
23120- "S" (&pci_indirect));
23121+ "S" (&pci_indirect),
23122+ "r" (__PCIBIOS_DS));
23123 break;
23124 }
23125
23126@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int s
23127
23128 switch (len) {
23129 case 1:
23130- __asm__("lcall *(%%esi); cld\n\t"
23131+ __asm__("movw %w6, %%ds\n\t"
23132+ "lcall *%%ss:(%%esi); cld\n\t"
23133+ "push %%ss\n\t"
23134+ "pop %%ds\n\t"
23135 "jc 1f\n\t"
23136 "xor %%ah, %%ah\n"
23137 "1:"
23138@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int s
23139 "c" (value),
23140 "b" (bx),
23141 "D" ((long)reg),
23142- "S" (&pci_indirect));
23143+ "S" (&pci_indirect),
23144+ "r" (__PCIBIOS_DS));
23145 break;
23146 case 2:
23147- __asm__("lcall *(%%esi); cld\n\t"
23148+ __asm__("movw %w6, %%ds\n\t"
23149+ "lcall *%%ss:(%%esi); cld\n\t"
23150+ "push %%ss\n\t"
23151+ "pop %%ds\n\t"
23152 "jc 1f\n\t"
23153 "xor %%ah, %%ah\n"
23154 "1:"
23155@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int s
23156 "c" (value),
23157 "b" (bx),
23158 "D" ((long)reg),
23159- "S" (&pci_indirect));
23160+ "S" (&pci_indirect),
23161+ "r" (__PCIBIOS_DS));
23162 break;
23163 case 4:
23164- __asm__("lcall *(%%esi); cld\n\t"
23165+ __asm__("movw %w6, %%ds\n\t"
23166+ "lcall *%%ss:(%%esi); cld\n\t"
23167+ "push %%ss\n\t"
23168+ "pop %%ds\n\t"
23169 "jc 1f\n\t"
23170 "xor %%ah, %%ah\n"
23171 "1:"
23172@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int s
23173 "c" (value),
23174 "b" (bx),
23175 "D" ((long)reg),
23176- "S" (&pci_indirect));
23177+ "S" (&pci_indirect),
23178+ "r" (__PCIBIOS_DS));
23179 break;
23180 }
23181
23182@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_i
23183
23184 DBG("PCI: Fetching IRQ routing table... ");
23185 __asm__("push %%es\n\t"
23186+ "movw %w8, %%ds\n\t"
23187 "push %%ds\n\t"
23188 "pop %%es\n\t"
23189- "lcall *(%%esi); cld\n\t"
23190+ "lcall *%%ss:(%%esi); cld\n\t"
23191 "pop %%es\n\t"
23192+ "push %%ss\n\t"
23193+ "pop %%ds\n"
23194 "jc 1f\n\t"
23195 "xor %%ah, %%ah\n"
23196 "1:"
23197@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_i
23198 "1" (0),
23199 "D" ((long) &opt),
23200 "S" (&pci_indirect),
23201- "m" (opt)
23202+ "m" (opt),
23203+ "r" (__PCIBIOS_DS)
23204 : "memory");
23205 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
23206 if (ret & 0xff00)
23207@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_d
23208 {
23209 int ret;
23210
23211- __asm__("lcall *(%%esi); cld\n\t"
23212+ __asm__("movw %w5, %%ds\n\t"
23213+ "lcall *%%ss:(%%esi); cld\n\t"
23214+ "push %%ss\n\t"
23215+ "pop %%ds\n"
23216 "jc 1f\n\t"
23217 "xor %%ah, %%ah\n"
23218 "1:"
23219@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_d
23220 : "0" (PCIBIOS_SET_PCI_HW_INT),
23221 "b" ((dev->bus->number << 8) | dev->devfn),
23222 "c" ((irq << 8) | (pin + 10)),
23223- "S" (&pci_indirect));
23224+ "S" (&pci_indirect),
23225+ "r" (__PCIBIOS_DS));
23226 return !(ret & 0xff00);
23227 }
23228 EXPORT_SYMBOL(pcibios_set_irq_routing);
23229diff -urNp linux-3.1.4/arch/x86/platform/efi/efi_32.c linux-3.1.4/arch/x86/platform/efi/efi_32.c
23230--- linux-3.1.4/arch/x86/platform/efi/efi_32.c 2011-11-11 15:19:27.000000000 -0500
23231+++ linux-3.1.4/arch/x86/platform/efi/efi_32.c 2011-11-16 18:39:07.000000000 -0500
23232@@ -38,70 +38,56 @@
23233 */
23234
23235 static unsigned long efi_rt_eflags;
23236-static pgd_t efi_bak_pg_dir_pointer[2];
23237+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
23238
23239-void efi_call_phys_prelog(void)
23240+void __init efi_call_phys_prelog(void)
23241 {
23242- unsigned long cr4;
23243- unsigned long temp;
23244 struct desc_ptr gdt_descr;
23245
23246- local_irq_save(efi_rt_eflags);
23247+#ifdef CONFIG_PAX_KERNEXEC
23248+ struct desc_struct d;
23249+#endif
23250
23251- /*
23252- * If I don't have PAE, I should just duplicate two entries in page
23253- * directory. If I have PAE, I just need to duplicate one entry in
23254- * page directory.
23255- */
23256- cr4 = read_cr4_safe();
23257+ local_irq_save(efi_rt_eflags);
23258
23259- if (cr4 & X86_CR4_PAE) {
23260- efi_bak_pg_dir_pointer[0].pgd =
23261- swapper_pg_dir[pgd_index(0)].pgd;
23262- swapper_pg_dir[0].pgd =
23263- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
23264- } else {
23265- efi_bak_pg_dir_pointer[0].pgd =
23266- swapper_pg_dir[pgd_index(0)].pgd;
23267- efi_bak_pg_dir_pointer[1].pgd =
23268- swapper_pg_dir[pgd_index(0x400000)].pgd;
23269- swapper_pg_dir[pgd_index(0)].pgd =
23270- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
23271- temp = PAGE_OFFSET + 0x400000;
23272- swapper_pg_dir[pgd_index(0x400000)].pgd =
23273- swapper_pg_dir[pgd_index(temp)].pgd;
23274- }
23275+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
23276+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
23277+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
23278
23279 /*
23280 * After the lock is released, the original page table is restored.
23281 */
23282 __flush_tlb_all();
23283
23284+#ifdef CONFIG_PAX_KERNEXEC
23285+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
23286+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
23287+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
23288+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
23289+#endif
23290+
23291 gdt_descr.address = __pa(get_cpu_gdt_table(0));
23292 gdt_descr.size = GDT_SIZE - 1;
23293 load_gdt(&gdt_descr);
23294 }
23295
23296-void efi_call_phys_epilog(void)
23297+void __init efi_call_phys_epilog(void)
23298 {
23299- unsigned long cr4;
23300 struct desc_ptr gdt_descr;
23301
23302+#ifdef CONFIG_PAX_KERNEXEC
23303+ struct desc_struct d;
23304+
23305+ memset(&d, 0, sizeof d);
23306+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
23307+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
23308+#endif
23309+
23310 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
23311 gdt_descr.size = GDT_SIZE - 1;
23312 load_gdt(&gdt_descr);
23313
23314- cr4 = read_cr4_safe();
23315-
23316- if (cr4 & X86_CR4_PAE) {
23317- swapper_pg_dir[pgd_index(0)].pgd =
23318- efi_bak_pg_dir_pointer[0].pgd;
23319- } else {
23320- swapper_pg_dir[pgd_index(0)].pgd =
23321- efi_bak_pg_dir_pointer[0].pgd;
23322- swapper_pg_dir[pgd_index(0x400000)].pgd =
23323- efi_bak_pg_dir_pointer[1].pgd;
23324- }
23325+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
23326
23327 /*
23328 * After the lock is released, the original page table is restored.
23329diff -urNp linux-3.1.4/arch/x86/platform/efi/efi_stub_32.S linux-3.1.4/arch/x86/platform/efi/efi_stub_32.S
23330--- linux-3.1.4/arch/x86/platform/efi/efi_stub_32.S 2011-11-11 15:19:27.000000000 -0500
23331+++ linux-3.1.4/arch/x86/platform/efi/efi_stub_32.S 2011-11-16 18:39:07.000000000 -0500
23332@@ -6,7 +6,9 @@
23333 */
23334
23335 #include <linux/linkage.h>
23336+#include <linux/init.h>
23337 #include <asm/page_types.h>
23338+#include <asm/segment.h>
23339
23340 /*
23341 * efi_call_phys(void *, ...) is a function with variable parameters.
23342@@ -20,7 +22,7 @@
23343 * service functions will comply with gcc calling convention, too.
23344 */
23345
23346-.text
23347+__INIT
23348 ENTRY(efi_call_phys)
23349 /*
23350 * 0. The function can only be called in Linux kernel. So CS has been
23351@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
23352 * The mapping of lower virtual memory has been created in prelog and
23353 * epilog.
23354 */
23355- movl $1f, %edx
23356- subl $__PAGE_OFFSET, %edx
23357- jmp *%edx
23358+ movl $(__KERNEXEC_EFI_DS), %edx
23359+ mov %edx, %ds
23360+ mov %edx, %es
23361+ mov %edx, %ss
23362+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
23363 1:
23364
23365 /*
23366@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
23367 * parameter 2, ..., param n. To make things easy, we save the return
23368 * address of efi_call_phys in a global variable.
23369 */
23370- popl %edx
23371- movl %edx, saved_return_addr
23372- /* get the function pointer into ECX*/
23373- popl %ecx
23374- movl %ecx, efi_rt_function_ptr
23375- movl $2f, %edx
23376- subl $__PAGE_OFFSET, %edx
23377- pushl %edx
23378+ popl (saved_return_addr)
23379+ popl (efi_rt_function_ptr)
23380
23381 /*
23382 * 3. Clear PG bit in %CR0.
23383@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
23384 /*
23385 * 5. Call the physical function.
23386 */
23387- jmp *%ecx
23388+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
23389
23390-2:
23391 /*
23392 * 6. After EFI runtime service returns, control will return to
23393 * following instruction. We'd better readjust stack pointer first.
23394@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
23395 movl %cr0, %edx
23396 orl $0x80000000, %edx
23397 movl %edx, %cr0
23398- jmp 1f
23399-1:
23400+
23401 /*
23402 * 8. Now restore the virtual mode from flat mode by
23403 * adding EIP with PAGE_OFFSET.
23404 */
23405- movl $1f, %edx
23406- jmp *%edx
23407+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
23408 1:
23409+ movl $(__KERNEL_DS), %edx
23410+ mov %edx, %ds
23411+ mov %edx, %es
23412+ mov %edx, %ss
23413
23414 /*
23415 * 9. Balance the stack. And because EAX contain the return value,
23416 * we'd better not clobber it.
23417 */
23418- leal efi_rt_function_ptr, %edx
23419- movl (%edx), %ecx
23420- pushl %ecx
23421+ pushl (efi_rt_function_ptr)
23422
23423 /*
23424- * 10. Push the saved return address onto the stack and return.
23425+ * 10. Return to the saved return address.
23426 */
23427- leal saved_return_addr, %edx
23428- movl (%edx), %ecx
23429- pushl %ecx
23430- ret
23431+ jmpl *(saved_return_addr)
23432 ENDPROC(efi_call_phys)
23433 .previous
23434
23435-.data
23436+__INITDATA
23437 saved_return_addr:
23438 .long 0
23439 efi_rt_function_ptr:
23440diff -urNp linux-3.1.4/arch/x86/platform/efi/efi_stub_64.S linux-3.1.4/arch/x86/platform/efi/efi_stub_64.S
23441--- linux-3.1.4/arch/x86/platform/efi/efi_stub_64.S 2011-11-11 15:19:27.000000000 -0500
23442+++ linux-3.1.4/arch/x86/platform/efi/efi_stub_64.S 2011-12-02 17:38:47.000000000 -0500
23443@@ -7,6 +7,7 @@
23444 */
23445
23446 #include <linux/linkage.h>
23447+#include <asm/alternative-asm.h>
23448
23449 #define SAVE_XMM \
23450 mov %rsp, %rax; \
23451@@ -40,6 +41,7 @@ ENTRY(efi_call0)
23452 call *%rdi
23453 addq $32, %rsp
23454 RESTORE_XMM
23455+ pax_force_retaddr 0, 1
23456 ret
23457 ENDPROC(efi_call0)
23458
23459@@ -50,6 +52,7 @@ ENTRY(efi_call1)
23460 call *%rdi
23461 addq $32, %rsp
23462 RESTORE_XMM
23463+ pax_force_retaddr 0, 1
23464 ret
23465 ENDPROC(efi_call1)
23466
23467@@ -60,6 +63,7 @@ ENTRY(efi_call2)
23468 call *%rdi
23469 addq $32, %rsp
23470 RESTORE_XMM
23471+ pax_force_retaddr 0, 1
23472 ret
23473 ENDPROC(efi_call2)
23474
23475@@ -71,6 +75,7 @@ ENTRY(efi_call3)
23476 call *%rdi
23477 addq $32, %rsp
23478 RESTORE_XMM
23479+ pax_force_retaddr 0, 1
23480 ret
23481 ENDPROC(efi_call3)
23482
23483@@ -83,6 +88,7 @@ ENTRY(efi_call4)
23484 call *%rdi
23485 addq $32, %rsp
23486 RESTORE_XMM
23487+ pax_force_retaddr 0, 1
23488 ret
23489 ENDPROC(efi_call4)
23490
23491@@ -96,6 +102,7 @@ ENTRY(efi_call5)
23492 call *%rdi
23493 addq $48, %rsp
23494 RESTORE_XMM
23495+ pax_force_retaddr 0, 1
23496 ret
23497 ENDPROC(efi_call5)
23498
23499@@ -112,5 +119,6 @@ ENTRY(efi_call6)
23500 call *%rdi
23501 addq $48, %rsp
23502 RESTORE_XMM
23503+ pax_force_retaddr 0, 1
23504 ret
23505 ENDPROC(efi_call6)
23506diff -urNp linux-3.1.4/arch/x86/platform/mrst/mrst.c linux-3.1.4/arch/x86/platform/mrst/mrst.c
23507--- linux-3.1.4/arch/x86/platform/mrst/mrst.c 2011-11-11 15:19:27.000000000 -0500
23508+++ linux-3.1.4/arch/x86/platform/mrst/mrst.c 2011-11-16 18:39:07.000000000 -0500
23509@@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
23510 }
23511
23512 /* Reboot and power off are handled by the SCU on a MID device */
23513-static void mrst_power_off(void)
23514+static __noreturn void mrst_power_off(void)
23515 {
23516 intel_scu_ipc_simple_command(0xf1, 1);
23517+ BUG();
23518 }
23519
23520-static void mrst_reboot(void)
23521+static __noreturn void mrst_reboot(void)
23522 {
23523 intel_scu_ipc_simple_command(0xf1, 0);
23524+ BUG();
23525 }
23526
23527 /*
23528diff -urNp linux-3.1.4/arch/x86/platform/uv/tlb_uv.c linux-3.1.4/arch/x86/platform/uv/tlb_uv.c
23529--- linux-3.1.4/arch/x86/platform/uv/tlb_uv.c 2011-11-11 15:19:27.000000000 -0500
23530+++ linux-3.1.4/arch/x86/platform/uv/tlb_uv.c 2011-11-16 19:39:11.000000000 -0500
23531@@ -377,6 +377,8 @@ static void reset_with_ipi(struct pnmask
23532 struct bau_control *smaster = bcp->socket_master;
23533 struct reset_args reset_args;
23534
23535+ pax_track_stack();
23536+
23537 reset_args.sender = sender;
23538 cpus_clear(*mask);
23539 /* find a single cpu for each uvhub in this distribution mask */
23540diff -urNp linux-3.1.4/arch/x86/power/cpu.c linux-3.1.4/arch/x86/power/cpu.c
23541--- linux-3.1.4/arch/x86/power/cpu.c 2011-11-11 15:19:27.000000000 -0500
23542+++ linux-3.1.4/arch/x86/power/cpu.c 2011-11-16 18:39:07.000000000 -0500
23543@@ -130,7 +130,7 @@ static void do_fpu_end(void)
23544 static void fix_processor_context(void)
23545 {
23546 int cpu = smp_processor_id();
23547- struct tss_struct *t = &per_cpu(init_tss, cpu);
23548+ struct tss_struct *t = init_tss + cpu;
23549
23550 set_tss_desc(cpu, t); /*
23551 * This just modifies memory; should not be
23552@@ -140,7 +140,9 @@ static void fix_processor_context(void)
23553 */
23554
23555 #ifdef CONFIG_X86_64
23556+ pax_open_kernel();
23557 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
23558+ pax_close_kernel();
23559
23560 syscall_init(); /* This sets MSR_*STAR and related */
23561 #endif
23562diff -urNp linux-3.1.4/arch/x86/vdso/Makefile linux-3.1.4/arch/x86/vdso/Makefile
23563--- linux-3.1.4/arch/x86/vdso/Makefile 2011-11-11 15:19:27.000000000 -0500
23564+++ linux-3.1.4/arch/x86/vdso/Makefile 2011-11-16 18:39:07.000000000 -0500
23565@@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
23566 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
23567 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
23568
23569-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23570+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23571 GCOV_PROFILE := n
23572
23573 #
23574diff -urNp linux-3.1.4/arch/x86/vdso/vdso32-setup.c linux-3.1.4/arch/x86/vdso/vdso32-setup.c
23575--- linux-3.1.4/arch/x86/vdso/vdso32-setup.c 2011-11-11 15:19:27.000000000 -0500
23576+++ linux-3.1.4/arch/x86/vdso/vdso32-setup.c 2011-11-16 18:39:07.000000000 -0500
23577@@ -25,6 +25,7 @@
23578 #include <asm/tlbflush.h>
23579 #include <asm/vdso.h>
23580 #include <asm/proto.h>
23581+#include <asm/mman.h>
23582
23583 enum {
23584 VDSO_DISABLED = 0,
23585@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
23586 void enable_sep_cpu(void)
23587 {
23588 int cpu = get_cpu();
23589- struct tss_struct *tss = &per_cpu(init_tss, cpu);
23590+ struct tss_struct *tss = init_tss + cpu;
23591
23592 if (!boot_cpu_has(X86_FEATURE_SEP)) {
23593 put_cpu();
23594@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
23595 gate_vma.vm_start = FIXADDR_USER_START;
23596 gate_vma.vm_end = FIXADDR_USER_END;
23597 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
23598- gate_vma.vm_page_prot = __P101;
23599+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
23600 /*
23601 * Make sure the vDSO gets into every core dump.
23602 * Dumping its contents makes post-mortem fully interpretable later
23603@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
23604 if (compat)
23605 addr = VDSO_HIGH_BASE;
23606 else {
23607- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
23608+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
23609 if (IS_ERR_VALUE(addr)) {
23610 ret = addr;
23611 goto up_fail;
23612 }
23613 }
23614
23615- current->mm->context.vdso = (void *)addr;
23616+ current->mm->context.vdso = addr;
23617
23618 if (compat_uses_vma || !compat) {
23619 /*
23620@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
23621 }
23622
23623 current_thread_info()->sysenter_return =
23624- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23625+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23626
23627 up_fail:
23628 if (ret)
23629- current->mm->context.vdso = NULL;
23630+ current->mm->context.vdso = 0;
23631
23632 up_write(&mm->mmap_sem);
23633
23634@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
23635
23636 const char *arch_vma_name(struct vm_area_struct *vma)
23637 {
23638- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
23639+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
23640 return "[vdso]";
23641+
23642+#ifdef CONFIG_PAX_SEGMEXEC
23643+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
23644+ return "[vdso]";
23645+#endif
23646+
23647 return NULL;
23648 }
23649
23650@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
23651 * Check to see if the corresponding task was created in compat vdso
23652 * mode.
23653 */
23654- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
23655+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
23656 return &gate_vma;
23657 return NULL;
23658 }
23659diff -urNp linux-3.1.4/arch/x86/vdso/vma.c linux-3.1.4/arch/x86/vdso/vma.c
23660--- linux-3.1.4/arch/x86/vdso/vma.c 2011-11-11 15:19:27.000000000 -0500
23661+++ linux-3.1.4/arch/x86/vdso/vma.c 2011-11-16 18:39:07.000000000 -0500
23662@@ -16,8 +16,6 @@
23663 #include <asm/vdso.h>
23664 #include <asm/page.h>
23665
23666-unsigned int __read_mostly vdso_enabled = 1;
23667-
23668 extern char vdso_start[], vdso_end[];
23669 extern unsigned short vdso_sync_cpuid;
23670
23671@@ -97,13 +95,15 @@ static unsigned long vdso_addr(unsigned
23672 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
23673 {
23674 struct mm_struct *mm = current->mm;
23675- unsigned long addr;
23676+ unsigned long addr = 0;
23677 int ret;
23678
23679- if (!vdso_enabled)
23680- return 0;
23681-
23682 down_write(&mm->mmap_sem);
23683+
23684+#ifdef CONFIG_PAX_RANDMMAP
23685+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23686+#endif
23687+
23688 addr = vdso_addr(mm->start_stack, vdso_size);
23689 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
23690 if (IS_ERR_VALUE(addr)) {
23691@@ -111,26 +111,18 @@ int arch_setup_additional_pages(struct l
23692 goto up_fail;
23693 }
23694
23695- current->mm->context.vdso = (void *)addr;
23696+ mm->context.vdso = addr;
23697
23698 ret = install_special_mapping(mm, addr, vdso_size,
23699 VM_READ|VM_EXEC|
23700 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
23701 VM_ALWAYSDUMP,
23702 vdso_pages);
23703- if (ret) {
23704- current->mm->context.vdso = NULL;
23705- goto up_fail;
23706- }
23707+
23708+ if (ret)
23709+ mm->context.vdso = 0;
23710
23711 up_fail:
23712 up_write(&mm->mmap_sem);
23713 return ret;
23714 }
23715-
23716-static __init int vdso_setup(char *s)
23717-{
23718- vdso_enabled = simple_strtoul(s, NULL, 0);
23719- return 0;
23720-}
23721-__setup("vdso=", vdso_setup);
23722diff -urNp linux-3.1.4/arch/x86/xen/enlighten.c linux-3.1.4/arch/x86/xen/enlighten.c
23723--- linux-3.1.4/arch/x86/xen/enlighten.c 2011-11-26 19:57:27.000000000 -0500
23724+++ linux-3.1.4/arch/x86/xen/enlighten.c 2011-11-26 19:58:40.000000000 -0500
23725@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
23726
23727 struct shared_info xen_dummy_shared_info;
23728
23729-void *xen_initial_gdt;
23730-
23731 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
23732 __read_mostly int xen_have_vector_callback;
23733 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
23734@@ -1028,7 +1026,7 @@ static const struct pv_apic_ops xen_apic
23735 #endif
23736 };
23737
23738-static void xen_reboot(int reason)
23739+static __noreturn void xen_reboot(int reason)
23740 {
23741 struct sched_shutdown r = { .reason = reason };
23742
23743@@ -1036,17 +1034,17 @@ static void xen_reboot(int reason)
23744 BUG();
23745 }
23746
23747-static void xen_restart(char *msg)
23748+static __noreturn void xen_restart(char *msg)
23749 {
23750 xen_reboot(SHUTDOWN_reboot);
23751 }
23752
23753-static void xen_emergency_restart(void)
23754+static __noreturn void xen_emergency_restart(void)
23755 {
23756 xen_reboot(SHUTDOWN_reboot);
23757 }
23758
23759-static void xen_machine_halt(void)
23760+static __noreturn void xen_machine_halt(void)
23761 {
23762 xen_reboot(SHUTDOWN_poweroff);
23763 }
23764@@ -1152,7 +1150,17 @@ asmlinkage void __init xen_start_kernel(
23765 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
23766
23767 /* Work out if we support NX */
23768- x86_configure_nx();
23769+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23770+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
23771+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
23772+ unsigned l, h;
23773+
23774+ __supported_pte_mask |= _PAGE_NX;
23775+ rdmsr(MSR_EFER, l, h);
23776+ l |= EFER_NX;
23777+ wrmsr(MSR_EFER, l, h);
23778+ }
23779+#endif
23780
23781 xen_setup_features();
23782
23783@@ -1183,13 +1191,6 @@ asmlinkage void __init xen_start_kernel(
23784
23785 machine_ops = xen_machine_ops;
23786
23787- /*
23788- * The only reliable way to retain the initial address of the
23789- * percpu gdt_page is to remember it here, so we can go and
23790- * mark it RW later, when the initial percpu area is freed.
23791- */
23792- xen_initial_gdt = &per_cpu(gdt_page, 0);
23793-
23794 xen_smp_init();
23795
23796 #ifdef CONFIG_ACPI_NUMA
23797diff -urNp linux-3.1.4/arch/x86/xen/mmu.c linux-3.1.4/arch/x86/xen/mmu.c
23798--- linux-3.1.4/arch/x86/xen/mmu.c 2011-11-11 15:19:27.000000000 -0500
23799+++ linux-3.1.4/arch/x86/xen/mmu.c 2011-11-16 18:39:07.000000000 -0500
23800@@ -1768,6 +1768,8 @@ pgd_t * __init xen_setup_kernel_pagetabl
23801 convert_pfn_mfn(init_level4_pgt);
23802 convert_pfn_mfn(level3_ident_pgt);
23803 convert_pfn_mfn(level3_kernel_pgt);
23804+ convert_pfn_mfn(level3_vmalloc_pgt);
23805+ convert_pfn_mfn(level3_vmemmap_pgt);
23806
23807 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
23808 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
23809@@ -1786,7 +1788,10 @@ pgd_t * __init xen_setup_kernel_pagetabl
23810 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
23811 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
23812 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
23813+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
23814+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
23815 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
23816+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
23817 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
23818 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
23819
23820@@ -2000,6 +2005,7 @@ static void __init xen_post_allocator_in
23821 pv_mmu_ops.set_pud = xen_set_pud;
23822 #if PAGETABLE_LEVELS == 4
23823 pv_mmu_ops.set_pgd = xen_set_pgd;
23824+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
23825 #endif
23826
23827 /* This will work as long as patching hasn't happened yet
23828@@ -2081,6 +2087,7 @@ static const struct pv_mmu_ops xen_mmu_o
23829 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
23830 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
23831 .set_pgd = xen_set_pgd_hyper,
23832+ .set_pgd_batched = xen_set_pgd_hyper,
23833
23834 .alloc_pud = xen_alloc_pmd_init,
23835 .release_pud = xen_release_pmd_init,
23836diff -urNp linux-3.1.4/arch/x86/xen/smp.c linux-3.1.4/arch/x86/xen/smp.c
23837--- linux-3.1.4/arch/x86/xen/smp.c 2011-11-11 15:19:27.000000000 -0500
23838+++ linux-3.1.4/arch/x86/xen/smp.c 2011-11-16 18:39:07.000000000 -0500
23839@@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_
23840 {
23841 BUG_ON(smp_processor_id() != 0);
23842 native_smp_prepare_boot_cpu();
23843-
23844- /* We've switched to the "real" per-cpu gdt, so make sure the
23845- old memory can be recycled */
23846- make_lowmem_page_readwrite(xen_initial_gdt);
23847-
23848 xen_filter_cpu_maps();
23849 xen_setup_vcpu_info_placement();
23850 }
23851@@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu,
23852 gdt = get_cpu_gdt_table(cpu);
23853
23854 ctxt->flags = VGCF_IN_KERNEL;
23855- ctxt->user_regs.ds = __USER_DS;
23856- ctxt->user_regs.es = __USER_DS;
23857+ ctxt->user_regs.ds = __KERNEL_DS;
23858+ ctxt->user_regs.es = __KERNEL_DS;
23859 ctxt->user_regs.ss = __KERNEL_DS;
23860 #ifdef CONFIG_X86_32
23861 ctxt->user_regs.fs = __KERNEL_PERCPU;
23862- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
23863+ savesegment(gs, ctxt->user_regs.gs);
23864 #else
23865 ctxt->gs_base_kernel = per_cpu_offset(cpu);
23866 #endif
23867@@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned
23868 int rc;
23869
23870 per_cpu(current_task, cpu) = idle;
23871+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
23872 #ifdef CONFIG_X86_32
23873 irq_ctx_init(cpu);
23874 #else
23875 clear_tsk_thread_flag(idle, TIF_FORK);
23876- per_cpu(kernel_stack, cpu) =
23877- (unsigned long)task_stack_page(idle) -
23878- KERNEL_STACK_OFFSET + THREAD_SIZE;
23879+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
23880 #endif
23881 xen_setup_runstate_info(cpu);
23882 xen_setup_timer(cpu);
23883diff -urNp linux-3.1.4/arch/x86/xen/xen-asm_32.S linux-3.1.4/arch/x86/xen/xen-asm_32.S
23884--- linux-3.1.4/arch/x86/xen/xen-asm_32.S 2011-11-11 15:19:27.000000000 -0500
23885+++ linux-3.1.4/arch/x86/xen/xen-asm_32.S 2011-11-16 18:39:07.000000000 -0500
23886@@ -83,14 +83,14 @@ ENTRY(xen_iret)
23887 ESP_OFFSET=4 # bytes pushed onto stack
23888
23889 /*
23890- * Store vcpu_info pointer for easy access. Do it this way to
23891- * avoid having to reload %fs
23892+ * Store vcpu_info pointer for easy access.
23893 */
23894 #ifdef CONFIG_SMP
23895- GET_THREAD_INFO(%eax)
23896- movl TI_cpu(%eax), %eax
23897- movl __per_cpu_offset(,%eax,4), %eax
23898- mov xen_vcpu(%eax), %eax
23899+ push %fs
23900+ mov $(__KERNEL_PERCPU), %eax
23901+ mov %eax, %fs
23902+ mov PER_CPU_VAR(xen_vcpu), %eax
23903+ pop %fs
23904 #else
23905 movl xen_vcpu, %eax
23906 #endif
23907diff -urNp linux-3.1.4/arch/x86/xen/xen-head.S linux-3.1.4/arch/x86/xen/xen-head.S
23908--- linux-3.1.4/arch/x86/xen/xen-head.S 2011-11-11 15:19:27.000000000 -0500
23909+++ linux-3.1.4/arch/x86/xen/xen-head.S 2011-11-16 18:39:07.000000000 -0500
23910@@ -19,6 +19,17 @@ ENTRY(startup_xen)
23911 #ifdef CONFIG_X86_32
23912 mov %esi,xen_start_info
23913 mov $init_thread_union+THREAD_SIZE,%esp
23914+#ifdef CONFIG_SMP
23915+ movl $cpu_gdt_table,%edi
23916+ movl $__per_cpu_load,%eax
23917+ movw %ax,__KERNEL_PERCPU + 2(%edi)
23918+ rorl $16,%eax
23919+ movb %al,__KERNEL_PERCPU + 4(%edi)
23920+ movb %ah,__KERNEL_PERCPU + 7(%edi)
23921+ movl $__per_cpu_end - 1,%eax
23922+ subl $__per_cpu_start,%eax
23923+ movw %ax,__KERNEL_PERCPU + 0(%edi)
23924+#endif
23925 #else
23926 mov %rsi,xen_start_info
23927 mov $init_thread_union+THREAD_SIZE,%rsp
23928diff -urNp linux-3.1.4/arch/x86/xen/xen-ops.h linux-3.1.4/arch/x86/xen/xen-ops.h
23929--- linux-3.1.4/arch/x86/xen/xen-ops.h 2011-11-11 15:19:27.000000000 -0500
23930+++ linux-3.1.4/arch/x86/xen/xen-ops.h 2011-11-16 18:39:07.000000000 -0500
23931@@ -10,8 +10,6 @@
23932 extern const char xen_hypervisor_callback[];
23933 extern const char xen_failsafe_callback[];
23934
23935-extern void *xen_initial_gdt;
23936-
23937 struct trap_info;
23938 void xen_copy_trap_info(struct trap_info *traps);
23939
23940diff -urNp linux-3.1.4/block/blk-iopoll.c linux-3.1.4/block/blk-iopoll.c
23941--- linux-3.1.4/block/blk-iopoll.c 2011-11-11 15:19:27.000000000 -0500
23942+++ linux-3.1.4/block/blk-iopoll.c 2011-11-16 18:39:07.000000000 -0500
23943@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
23944 }
23945 EXPORT_SYMBOL(blk_iopoll_complete);
23946
23947-static void blk_iopoll_softirq(struct softirq_action *h)
23948+static void blk_iopoll_softirq(void)
23949 {
23950 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
23951 int rearm = 0, budget = blk_iopoll_budget;
23952diff -urNp linux-3.1.4/block/blk-map.c linux-3.1.4/block/blk-map.c
23953--- linux-3.1.4/block/blk-map.c 2011-11-26 19:57:27.000000000 -0500
23954+++ linux-3.1.4/block/blk-map.c 2011-11-26 19:58:40.000000000 -0500
23955@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue
23956 if (!len || !kbuf)
23957 return -EINVAL;
23958
23959- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
23960+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
23961 if (do_copy)
23962 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
23963 else
23964diff -urNp linux-3.1.4/block/blk-softirq.c linux-3.1.4/block/blk-softirq.c
23965--- linux-3.1.4/block/blk-softirq.c 2011-11-11 15:19:27.000000000 -0500
23966+++ linux-3.1.4/block/blk-softirq.c 2011-11-16 18:39:07.000000000 -0500
23967@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
23968 * Softirq action handler - move entries to local list and loop over them
23969 * while passing them to the queue registered handler.
23970 */
23971-static void blk_done_softirq(struct softirq_action *h)
23972+static void blk_done_softirq(void)
23973 {
23974 struct list_head *cpu_list, local_list;
23975
23976diff -urNp linux-3.1.4/block/bsg.c linux-3.1.4/block/bsg.c
23977--- linux-3.1.4/block/bsg.c 2011-11-11 15:19:27.000000000 -0500
23978+++ linux-3.1.4/block/bsg.c 2011-11-16 18:39:07.000000000 -0500
23979@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
23980 struct sg_io_v4 *hdr, struct bsg_device *bd,
23981 fmode_t has_write_perm)
23982 {
23983+ unsigned char tmpcmd[sizeof(rq->__cmd)];
23984+ unsigned char *cmdptr;
23985+
23986 if (hdr->request_len > BLK_MAX_CDB) {
23987 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
23988 if (!rq->cmd)
23989 return -ENOMEM;
23990- }
23991+ cmdptr = rq->cmd;
23992+ } else
23993+ cmdptr = tmpcmd;
23994
23995- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
23996+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
23997 hdr->request_len))
23998 return -EFAULT;
23999
24000+ if (cmdptr != rq->cmd)
24001+ memcpy(rq->cmd, cmdptr, hdr->request_len);
24002+
24003 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
24004 if (blk_verify_command(rq->cmd, has_write_perm))
24005 return -EPERM;
24006diff -urNp linux-3.1.4/block/compat_ioctl.c linux-3.1.4/block/compat_ioctl.c
24007--- linux-3.1.4/block/compat_ioctl.c 2011-11-11 15:19:27.000000000 -0500
24008+++ linux-3.1.4/block/compat_ioctl.c 2011-11-16 18:39:07.000000000 -0500
24009@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_
24010 err |= __get_user(f->spec1, &uf->spec1);
24011 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
24012 err |= __get_user(name, &uf->name);
24013- f->name = compat_ptr(name);
24014+ f->name = (void __force_kernel *)compat_ptr(name);
24015 if (err) {
24016 err = -EFAULT;
24017 goto out;
24018diff -urNp linux-3.1.4/block/scsi_ioctl.c linux-3.1.4/block/scsi_ioctl.c
24019--- linux-3.1.4/block/scsi_ioctl.c 2011-11-11 15:19:27.000000000 -0500
24020+++ linux-3.1.4/block/scsi_ioctl.c 2011-11-16 18:39:07.000000000 -0500
24021@@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
24022 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
24023 struct sg_io_hdr *hdr, fmode_t mode)
24024 {
24025- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
24026+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24027+ unsigned char *cmdptr;
24028+
24029+ if (rq->cmd != rq->__cmd)
24030+ cmdptr = rq->cmd;
24031+ else
24032+ cmdptr = tmpcmd;
24033+
24034+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
24035 return -EFAULT;
24036+
24037+ if (cmdptr != rq->cmd)
24038+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
24039+
24040 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
24041 return -EPERM;
24042
24043@@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
24044 int err;
24045 unsigned int in_len, out_len, bytes, opcode, cmdlen;
24046 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
24047+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24048+ unsigned char *cmdptr;
24049
24050 if (!sic)
24051 return -EINVAL;
24052@@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
24053 */
24054 err = -EFAULT;
24055 rq->cmd_len = cmdlen;
24056- if (copy_from_user(rq->cmd, sic->data, cmdlen))
24057+
24058+ if (rq->cmd != rq->__cmd)
24059+ cmdptr = rq->cmd;
24060+ else
24061+ cmdptr = tmpcmd;
24062+
24063+ if (copy_from_user(cmdptr, sic->data, cmdlen))
24064 goto error;
24065
24066+ if (rq->cmd != cmdptr)
24067+ memcpy(rq->cmd, cmdptr, cmdlen);
24068+
24069 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
24070 goto error;
24071
24072diff -urNp linux-3.1.4/crypto/cryptd.c linux-3.1.4/crypto/cryptd.c
24073--- linux-3.1.4/crypto/cryptd.c 2011-11-11 15:19:27.000000000 -0500
24074+++ linux-3.1.4/crypto/cryptd.c 2011-11-16 18:39:07.000000000 -0500
24075@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
24076
24077 struct cryptd_blkcipher_request_ctx {
24078 crypto_completion_t complete;
24079-};
24080+} __no_const;
24081
24082 struct cryptd_hash_ctx {
24083 struct crypto_shash *child;
24084@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
24085
24086 struct cryptd_aead_request_ctx {
24087 crypto_completion_t complete;
24088-};
24089+} __no_const;
24090
24091 static void cryptd_queue_worker(struct work_struct *work);
24092
24093diff -urNp linux-3.1.4/crypto/serpent.c linux-3.1.4/crypto/serpent.c
24094--- linux-3.1.4/crypto/serpent.c 2011-11-11 15:19:27.000000000 -0500
24095+++ linux-3.1.4/crypto/serpent.c 2011-11-16 18:40:10.000000000 -0500
24096@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
24097 u32 r0,r1,r2,r3,r4;
24098 int i;
24099
24100+ pax_track_stack();
24101+
24102 /* Copy key, add padding */
24103
24104 for (i = 0; i < keylen; ++i)
24105diff -urNp linux-3.1.4/Documentation/dontdiff linux-3.1.4/Documentation/dontdiff
24106--- linux-3.1.4/Documentation/dontdiff 2011-11-11 15:19:27.000000000 -0500
24107+++ linux-3.1.4/Documentation/dontdiff 2011-12-02 17:38:47.000000000 -0500
24108@@ -5,6 +5,7 @@
24109 *.cis
24110 *.cpio
24111 *.csp
24112+*.dbg
24113 *.dsp
24114 *.dvi
24115 *.elf
24116@@ -14,6 +15,7 @@
24117 *.gcov
24118 *.gen.S
24119 *.gif
24120+*.gmo
24121 *.grep
24122 *.grp
24123 *.gz
24124@@ -48,9 +50,11 @@
24125 *.tab.h
24126 *.tex
24127 *.ver
24128+*.vim
24129 *.xml
24130 *.xz
24131 *_MODULES
24132+*_reg_safe.h
24133 *_vga16.c
24134 *~
24135 \#*#
24136@@ -70,6 +74,7 @@ Kerntypes
24137 Module.markers
24138 Module.symvers
24139 PENDING
24140+PERF*
24141 SCCS
24142 System.map*
24143 TAGS
24144@@ -93,19 +98,24 @@ bounds.h
24145 bsetup
24146 btfixupprep
24147 build
24148+builtin-policy.h
24149 bvmlinux
24150 bzImage*
24151 capability_names.h
24152 capflags.c
24153 classlist.h*
24154+clut_vga16.c
24155+common-cmds.h
24156 comp*.log
24157 compile.h*
24158 conf
24159 config
24160 config-*
24161 config_data.h*
24162+config.c
24163 config.mak
24164 config.mak.autogen
24165+config.tmp
24166 conmakehash
24167 consolemap_deftbl.c*
24168 cpustr.h
24169@@ -119,6 +129,7 @@ dslm
24170 elf2ecoff
24171 elfconfig.h*
24172 evergreen_reg_safe.h
24173+exception_policy.conf
24174 fixdep
24175 flask.h
24176 fore200e_mkfirm
24177@@ -126,12 +137,15 @@ fore200e_pca_fw.c*
24178 gconf
24179 gconf.glade.h
24180 gen-devlist
24181+gen-kdb_cmds.c
24182 gen_crc32table
24183 gen_init_cpio
24184 generated
24185 genheaders
24186 genksyms
24187 *_gray256.c
24188+hash
24189+hid-example
24190 hpet_example
24191 hugepage-mmap
24192 hugepage-shm
24193@@ -146,7 +160,7 @@ int32.c
24194 int4.c
24195 int8.c
24196 kallsyms
24197-kconfig
24198+kern_constants.h
24199 keywords.c
24200 ksym.c*
24201 ksym.h*
24202@@ -154,7 +168,6 @@ kxgettext
24203 lkc_defs.h
24204 lex.c
24205 lex.*.c
24206-linux
24207 logo_*.c
24208 logo_*_clut224.c
24209 logo_*_mono.c
24210@@ -166,14 +179,15 @@ machtypes.h
24211 map
24212 map_hugetlb
24213 maui_boot.h
24214-media
24215 mconf
24216+mdp
24217 miboot*
24218 mk_elfconfig
24219 mkboot
24220 mkbugboot
24221 mkcpustr
24222 mkdep
24223+mkpiggy
24224 mkprep
24225 mkregtable
24226 mktables
24227@@ -209,6 +223,7 @@ r300_reg_safe.h
24228 r420_reg_safe.h
24229 r600_reg_safe.h
24230 recordmcount
24231+regdb.c
24232 relocs
24233 rlim_names.h
24234 rn50_reg_safe.h
24235@@ -219,6 +234,7 @@ setup
24236 setup.bin
24237 setup.elf
24238 sImage
24239+slabinfo
24240 sm_tbl*
24241 split-include
24242 syscalltab.h
24243@@ -229,6 +245,7 @@ tftpboot.img
24244 timeconst.h
24245 times.h*
24246 trix_boot.h
24247+user_constants.h
24248 utsrelease.h*
24249 vdso-syms.lds
24250 vdso.lds
24251@@ -246,7 +263,9 @@ vmlinux
24252 vmlinux-*
24253 vmlinux.aout
24254 vmlinux.bin.all
24255+vmlinux.bin.bz2
24256 vmlinux.lds
24257+vmlinux.relocs
24258 vmlinuz
24259 voffset.h
24260 vsyscall.lds
24261@@ -254,9 +273,11 @@ vsyscall_32.lds
24262 wanxlfw.inc
24263 uImage
24264 unifdef
24265+utsrelease.h
24266 wakeup.bin
24267 wakeup.elf
24268 wakeup.lds
24269 zImage*
24270 zconf.hash.c
24271+zconf.lex.c
24272 zoffset.h
24273diff -urNp linux-3.1.4/Documentation/kernel-parameters.txt linux-3.1.4/Documentation/kernel-parameters.txt
24274--- linux-3.1.4/Documentation/kernel-parameters.txt 2011-11-11 15:19:27.000000000 -0500
24275+++ linux-3.1.4/Documentation/kernel-parameters.txt 2011-11-16 18:39:07.000000000 -0500
24276@@ -1898,6 +1898,13 @@ bytes respectively. Such letter suffixes
24277 the specified number of seconds. This is to be used if
24278 your oopses keep scrolling off the screen.
24279
24280+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
24281+ virtualization environments that don't cope well with the
24282+ expand down segment used by UDEREF on X86-32 or the frequent
24283+ page table updates on X86-64.
24284+
24285+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
24286+
24287 pcbit= [HW,ISDN]
24288
24289 pcd. [PARIDE]
24290diff -urNp linux-3.1.4/drivers/acpi/apei/cper.c linux-3.1.4/drivers/acpi/apei/cper.c
24291--- linux-3.1.4/drivers/acpi/apei/cper.c 2011-11-11 15:19:27.000000000 -0500
24292+++ linux-3.1.4/drivers/acpi/apei/cper.c 2011-11-16 18:39:07.000000000 -0500
24293@@ -38,12 +38,12 @@
24294 */
24295 u64 cper_next_record_id(void)
24296 {
24297- static atomic64_t seq;
24298+ static atomic64_unchecked_t seq;
24299
24300- if (!atomic64_read(&seq))
24301- atomic64_set(&seq, ((u64)get_seconds()) << 32);
24302+ if (!atomic64_read_unchecked(&seq))
24303+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
24304
24305- return atomic64_inc_return(&seq);
24306+ return atomic64_inc_return_unchecked(&seq);
24307 }
24308 EXPORT_SYMBOL_GPL(cper_next_record_id);
24309
24310diff -urNp linux-3.1.4/drivers/acpi/ec_sys.c linux-3.1.4/drivers/acpi/ec_sys.c
24311--- linux-3.1.4/drivers/acpi/ec_sys.c 2011-11-11 15:19:27.000000000 -0500
24312+++ linux-3.1.4/drivers/acpi/ec_sys.c 2011-11-16 18:39:07.000000000 -0500
24313@@ -11,6 +11,7 @@
24314 #include <linux/kernel.h>
24315 #include <linux/acpi.h>
24316 #include <linux/debugfs.h>
24317+#include <asm/uaccess.h>
24318 #include "internal.h"
24319
24320 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
24321@@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct fi
24322 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
24323 */
24324 unsigned int size = EC_SPACE_SIZE;
24325- u8 *data = (u8 *) buf;
24326+ u8 data;
24327 loff_t init_off = *off;
24328 int err = 0;
24329
24330@@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct fi
24331 size = count;
24332
24333 while (size) {
24334- err = ec_read(*off, &data[*off - init_off]);
24335+ err = ec_read(*off, &data);
24336 if (err)
24337 return err;
24338+ if (put_user(data, &buf[*off - init_off]))
24339+ return -EFAULT;
24340 *off += 1;
24341 size--;
24342 }
24343@@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct f
24344
24345 unsigned int size = count;
24346 loff_t init_off = *off;
24347- u8 *data = (u8 *) buf;
24348 int err = 0;
24349
24350 if (*off >= EC_SPACE_SIZE)
24351@@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct f
24352 }
24353
24354 while (size) {
24355- u8 byte_write = data[*off - init_off];
24356+ u8 byte_write;
24357+ if (get_user(byte_write, &buf[*off - init_off]))
24358+ return -EFAULT;
24359 err = ec_write(*off, byte_write);
24360 if (err)
24361 return err;
24362diff -urNp linux-3.1.4/drivers/acpi/proc.c linux-3.1.4/drivers/acpi/proc.c
24363--- linux-3.1.4/drivers/acpi/proc.c 2011-11-11 15:19:27.000000000 -0500
24364+++ linux-3.1.4/drivers/acpi/proc.c 2011-11-16 18:39:07.000000000 -0500
24365@@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
24366 size_t count, loff_t * ppos)
24367 {
24368 struct list_head *node, *next;
24369- char strbuf[5];
24370- char str[5] = "";
24371- unsigned int len = count;
24372-
24373- if (len > 4)
24374- len = 4;
24375- if (len < 0)
24376- return -EFAULT;
24377+ char strbuf[5] = {0};
24378
24379- if (copy_from_user(strbuf, buffer, len))
24380+ if (count > 4)
24381+ count = 4;
24382+ if (copy_from_user(strbuf, buffer, count))
24383 return -EFAULT;
24384- strbuf[len] = '\0';
24385- sscanf(strbuf, "%s", str);
24386+ strbuf[count] = '\0';
24387
24388 mutex_lock(&acpi_device_lock);
24389 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
24390@@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
24391 if (!dev->wakeup.flags.valid)
24392 continue;
24393
24394- if (!strncmp(dev->pnp.bus_id, str, 4)) {
24395+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
24396 if (device_can_wakeup(&dev->dev)) {
24397 bool enable = !device_may_wakeup(&dev->dev);
24398 device_set_wakeup_enable(&dev->dev, enable);
24399diff -urNp linux-3.1.4/drivers/acpi/processor_driver.c linux-3.1.4/drivers/acpi/processor_driver.c
24400--- linux-3.1.4/drivers/acpi/processor_driver.c 2011-11-11 15:19:27.000000000 -0500
24401+++ linux-3.1.4/drivers/acpi/processor_driver.c 2011-11-16 18:39:07.000000000 -0500
24402@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
24403 return 0;
24404 #endif
24405
24406- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
24407+ BUG_ON(pr->id >= nr_cpu_ids);
24408
24409 /*
24410 * Buggy BIOS check
24411diff -urNp linux-3.1.4/drivers/ata/libata-core.c linux-3.1.4/drivers/ata/libata-core.c
24412--- linux-3.1.4/drivers/ata/libata-core.c 2011-11-11 15:19:27.000000000 -0500
24413+++ linux-3.1.4/drivers/ata/libata-core.c 2011-11-16 18:39:07.000000000 -0500
24414@@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *
24415 struct ata_port *ap;
24416 unsigned int tag;
24417
24418- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24419+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24420 ap = qc->ap;
24421
24422 qc->flags = 0;
24423@@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued
24424 struct ata_port *ap;
24425 struct ata_link *link;
24426
24427- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24428+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24429 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
24430 ap = qc->ap;
24431 link = qc->dev->link;
24432@@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct
24433 return;
24434
24435 spin_lock(&lock);
24436+ pax_open_kernel();
24437
24438 for (cur = ops->inherits; cur; cur = cur->inherits) {
24439 void **inherit = (void **)cur;
24440@@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct
24441 if (IS_ERR(*pp))
24442 *pp = NULL;
24443
24444- ops->inherits = NULL;
24445+ *(struct ata_port_operations **)&ops->inherits = NULL;
24446
24447+ pax_close_kernel();
24448 spin_unlock(&lock);
24449 }
24450
24451diff -urNp linux-3.1.4/drivers/ata/libata-eh.c linux-3.1.4/drivers/ata/libata-eh.c
24452--- linux-3.1.4/drivers/ata/libata-eh.c 2011-11-11 15:19:27.000000000 -0500
24453+++ linux-3.1.4/drivers/ata/libata-eh.c 2011-11-16 18:40:10.000000000 -0500
24454@@ -2515,6 +2515,8 @@ void ata_eh_report(struct ata_port *ap)
24455 {
24456 struct ata_link *link;
24457
24458+ pax_track_stack();
24459+
24460 ata_for_each_link(link, ap, HOST_FIRST)
24461 ata_eh_link_report(link);
24462 }
24463diff -urNp linux-3.1.4/drivers/ata/pata_arasan_cf.c linux-3.1.4/drivers/ata/pata_arasan_cf.c
24464--- linux-3.1.4/drivers/ata/pata_arasan_cf.c 2011-11-11 15:19:27.000000000 -0500
24465+++ linux-3.1.4/drivers/ata/pata_arasan_cf.c 2011-11-16 18:39:07.000000000 -0500
24466@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
24467 /* Handle platform specific quirks */
24468 if (pdata->quirk) {
24469 if (pdata->quirk & CF_BROKEN_PIO) {
24470- ap->ops->set_piomode = NULL;
24471+ pax_open_kernel();
24472+ *(void **)&ap->ops->set_piomode = NULL;
24473+ pax_close_kernel();
24474 ap->pio_mask = 0;
24475 }
24476 if (pdata->quirk & CF_BROKEN_MWDMA)
24477diff -urNp linux-3.1.4/drivers/atm/adummy.c linux-3.1.4/drivers/atm/adummy.c
24478--- linux-3.1.4/drivers/atm/adummy.c 2011-11-11 15:19:27.000000000 -0500
24479+++ linux-3.1.4/drivers/atm/adummy.c 2011-11-16 18:39:07.000000000 -0500
24480@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
24481 vcc->pop(vcc, skb);
24482 else
24483 dev_kfree_skb_any(skb);
24484- atomic_inc(&vcc->stats->tx);
24485+ atomic_inc_unchecked(&vcc->stats->tx);
24486
24487 return 0;
24488 }
24489diff -urNp linux-3.1.4/drivers/atm/ambassador.c linux-3.1.4/drivers/atm/ambassador.c
24490--- linux-3.1.4/drivers/atm/ambassador.c 2011-11-11 15:19:27.000000000 -0500
24491+++ linux-3.1.4/drivers/atm/ambassador.c 2011-11-16 18:39:07.000000000 -0500
24492@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
24493 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
24494
24495 // VC layer stats
24496- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
24497+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
24498
24499 // free the descriptor
24500 kfree (tx_descr);
24501@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
24502 dump_skb ("<<<", vc, skb);
24503
24504 // VC layer stats
24505- atomic_inc(&atm_vcc->stats->rx);
24506+ atomic_inc_unchecked(&atm_vcc->stats->rx);
24507 __net_timestamp(skb);
24508 // end of our responsibility
24509 atm_vcc->push (atm_vcc, skb);
24510@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
24511 } else {
24512 PRINTK (KERN_INFO, "dropped over-size frame");
24513 // should we count this?
24514- atomic_inc(&atm_vcc->stats->rx_drop);
24515+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
24516 }
24517
24518 } else {
24519@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * at
24520 }
24521
24522 if (check_area (skb->data, skb->len)) {
24523- atomic_inc(&atm_vcc->stats->tx_err);
24524+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
24525 return -ENOMEM; // ?
24526 }
24527
24528diff -urNp linux-3.1.4/drivers/atm/atmtcp.c linux-3.1.4/drivers/atm/atmtcp.c
24529--- linux-3.1.4/drivers/atm/atmtcp.c 2011-11-11 15:19:27.000000000 -0500
24530+++ linux-3.1.4/drivers/atm/atmtcp.c 2011-11-16 18:39:07.000000000 -0500
24531@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
24532 if (vcc->pop) vcc->pop(vcc,skb);
24533 else dev_kfree_skb(skb);
24534 if (dev_data) return 0;
24535- atomic_inc(&vcc->stats->tx_err);
24536+ atomic_inc_unchecked(&vcc->stats->tx_err);
24537 return -ENOLINK;
24538 }
24539 size = skb->len+sizeof(struct atmtcp_hdr);
24540@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
24541 if (!new_skb) {
24542 if (vcc->pop) vcc->pop(vcc,skb);
24543 else dev_kfree_skb(skb);
24544- atomic_inc(&vcc->stats->tx_err);
24545+ atomic_inc_unchecked(&vcc->stats->tx_err);
24546 return -ENOBUFS;
24547 }
24548 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
24549@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
24550 if (vcc->pop) vcc->pop(vcc,skb);
24551 else dev_kfree_skb(skb);
24552 out_vcc->push(out_vcc,new_skb);
24553- atomic_inc(&vcc->stats->tx);
24554- atomic_inc(&out_vcc->stats->rx);
24555+ atomic_inc_unchecked(&vcc->stats->tx);
24556+ atomic_inc_unchecked(&out_vcc->stats->rx);
24557 return 0;
24558 }
24559
24560@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
24561 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
24562 read_unlock(&vcc_sklist_lock);
24563 if (!out_vcc) {
24564- atomic_inc(&vcc->stats->tx_err);
24565+ atomic_inc_unchecked(&vcc->stats->tx_err);
24566 goto done;
24567 }
24568 skb_pull(skb,sizeof(struct atmtcp_hdr));
24569@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
24570 __net_timestamp(new_skb);
24571 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
24572 out_vcc->push(out_vcc,new_skb);
24573- atomic_inc(&vcc->stats->tx);
24574- atomic_inc(&out_vcc->stats->rx);
24575+ atomic_inc_unchecked(&vcc->stats->tx);
24576+ atomic_inc_unchecked(&out_vcc->stats->rx);
24577 done:
24578 if (vcc->pop) vcc->pop(vcc,skb);
24579 else dev_kfree_skb(skb);
24580diff -urNp linux-3.1.4/drivers/atm/eni.c linux-3.1.4/drivers/atm/eni.c
24581--- linux-3.1.4/drivers/atm/eni.c 2011-11-11 15:19:27.000000000 -0500
24582+++ linux-3.1.4/drivers/atm/eni.c 2011-11-16 18:39:07.000000000 -0500
24583@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
24584 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
24585 vcc->dev->number);
24586 length = 0;
24587- atomic_inc(&vcc->stats->rx_err);
24588+ atomic_inc_unchecked(&vcc->stats->rx_err);
24589 }
24590 else {
24591 length = ATM_CELL_SIZE-1; /* no HEC */
24592@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
24593 size);
24594 }
24595 eff = length = 0;
24596- atomic_inc(&vcc->stats->rx_err);
24597+ atomic_inc_unchecked(&vcc->stats->rx_err);
24598 }
24599 else {
24600 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
24601@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
24602 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
24603 vcc->dev->number,vcc->vci,length,size << 2,descr);
24604 length = eff = 0;
24605- atomic_inc(&vcc->stats->rx_err);
24606+ atomic_inc_unchecked(&vcc->stats->rx_err);
24607 }
24608 }
24609 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
24610@@ -771,7 +771,7 @@ rx_dequeued++;
24611 vcc->push(vcc,skb);
24612 pushed++;
24613 }
24614- atomic_inc(&vcc->stats->rx);
24615+ atomic_inc_unchecked(&vcc->stats->rx);
24616 }
24617 wake_up(&eni_dev->rx_wait);
24618 }
24619@@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
24620 PCI_DMA_TODEVICE);
24621 if (vcc->pop) vcc->pop(vcc,skb);
24622 else dev_kfree_skb_irq(skb);
24623- atomic_inc(&vcc->stats->tx);
24624+ atomic_inc_unchecked(&vcc->stats->tx);
24625 wake_up(&eni_dev->tx_wait);
24626 dma_complete++;
24627 }
24628@@ -1568,7 +1568,7 @@ tx_complete++;
24629 /*--------------------------------- entries ---------------------------------*/
24630
24631
24632-static const char *media_name[] __devinitdata = {
24633+static const char *media_name[] __devinitconst = {
24634 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
24635 "UTP", "05?", "06?", "07?", /* 4- 7 */
24636 "TAXI","09?", "10?", "11?", /* 8-11 */
24637diff -urNp linux-3.1.4/drivers/atm/firestream.c linux-3.1.4/drivers/atm/firestream.c
24638--- linux-3.1.4/drivers/atm/firestream.c 2011-11-11 15:19:27.000000000 -0500
24639+++ linux-3.1.4/drivers/atm/firestream.c 2011-11-16 18:39:07.000000000 -0500
24640@@ -750,7 +750,7 @@ static void process_txdone_queue (struct
24641 }
24642 }
24643
24644- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
24645+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
24646
24647 fs_dprintk (FS_DEBUG_TXMEM, "i");
24648 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
24649@@ -817,7 +817,7 @@ static void process_incoming (struct fs_
24650 #endif
24651 skb_put (skb, qe->p1 & 0xffff);
24652 ATM_SKB(skb)->vcc = atm_vcc;
24653- atomic_inc(&atm_vcc->stats->rx);
24654+ atomic_inc_unchecked(&atm_vcc->stats->rx);
24655 __net_timestamp(skb);
24656 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
24657 atm_vcc->push (atm_vcc, skb);
24658@@ -838,12 +838,12 @@ static void process_incoming (struct fs_
24659 kfree (pe);
24660 }
24661 if (atm_vcc)
24662- atomic_inc(&atm_vcc->stats->rx_drop);
24663+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
24664 break;
24665 case 0x1f: /* Reassembly abort: no buffers. */
24666 /* Silently increment error counter. */
24667 if (atm_vcc)
24668- atomic_inc(&atm_vcc->stats->rx_drop);
24669+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
24670 break;
24671 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
24672 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
24673diff -urNp linux-3.1.4/drivers/atm/fore200e.c linux-3.1.4/drivers/atm/fore200e.c
24674--- linux-3.1.4/drivers/atm/fore200e.c 2011-11-11 15:19:27.000000000 -0500
24675+++ linux-3.1.4/drivers/atm/fore200e.c 2011-11-16 18:39:07.000000000 -0500
24676@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
24677 #endif
24678 /* check error condition */
24679 if (*entry->status & STATUS_ERROR)
24680- atomic_inc(&vcc->stats->tx_err);
24681+ atomic_inc_unchecked(&vcc->stats->tx_err);
24682 else
24683- atomic_inc(&vcc->stats->tx);
24684+ atomic_inc_unchecked(&vcc->stats->tx);
24685 }
24686 }
24687
24688@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
24689 if (skb == NULL) {
24690 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
24691
24692- atomic_inc(&vcc->stats->rx_drop);
24693+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24694 return -ENOMEM;
24695 }
24696
24697@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
24698
24699 dev_kfree_skb_any(skb);
24700
24701- atomic_inc(&vcc->stats->rx_drop);
24702+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24703 return -ENOMEM;
24704 }
24705
24706 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
24707
24708 vcc->push(vcc, skb);
24709- atomic_inc(&vcc->stats->rx);
24710+ atomic_inc_unchecked(&vcc->stats->rx);
24711
24712 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
24713
24714@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
24715 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
24716 fore200e->atm_dev->number,
24717 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
24718- atomic_inc(&vcc->stats->rx_err);
24719+ atomic_inc_unchecked(&vcc->stats->rx_err);
24720 }
24721 }
24722
24723@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
24724 goto retry_here;
24725 }
24726
24727- atomic_inc(&vcc->stats->tx_err);
24728+ atomic_inc_unchecked(&vcc->stats->tx_err);
24729
24730 fore200e->tx_sat++;
24731 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
24732diff -urNp linux-3.1.4/drivers/atm/he.c linux-3.1.4/drivers/atm/he.c
24733--- linux-3.1.4/drivers/atm/he.c 2011-11-11 15:19:27.000000000 -0500
24734+++ linux-3.1.4/drivers/atm/he.c 2011-11-16 18:39:07.000000000 -0500
24735@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
24736
24737 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
24738 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
24739- atomic_inc(&vcc->stats->rx_drop);
24740+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24741 goto return_host_buffers;
24742 }
24743
24744@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
24745 RBRQ_LEN_ERR(he_dev->rbrq_head)
24746 ? "LEN_ERR" : "",
24747 vcc->vpi, vcc->vci);
24748- atomic_inc(&vcc->stats->rx_err);
24749+ atomic_inc_unchecked(&vcc->stats->rx_err);
24750 goto return_host_buffers;
24751 }
24752
24753@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
24754 vcc->push(vcc, skb);
24755 spin_lock(&he_dev->global_lock);
24756
24757- atomic_inc(&vcc->stats->rx);
24758+ atomic_inc_unchecked(&vcc->stats->rx);
24759
24760 return_host_buffers:
24761 ++pdus_assembled;
24762@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
24763 tpd->vcc->pop(tpd->vcc, tpd->skb);
24764 else
24765 dev_kfree_skb_any(tpd->skb);
24766- atomic_inc(&tpd->vcc->stats->tx_err);
24767+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
24768 }
24769 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
24770 return;
24771@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
24772 vcc->pop(vcc, skb);
24773 else
24774 dev_kfree_skb_any(skb);
24775- atomic_inc(&vcc->stats->tx_err);
24776+ atomic_inc_unchecked(&vcc->stats->tx_err);
24777 return -EINVAL;
24778 }
24779
24780@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
24781 vcc->pop(vcc, skb);
24782 else
24783 dev_kfree_skb_any(skb);
24784- atomic_inc(&vcc->stats->tx_err);
24785+ atomic_inc_unchecked(&vcc->stats->tx_err);
24786 return -EINVAL;
24787 }
24788 #endif
24789@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
24790 vcc->pop(vcc, skb);
24791 else
24792 dev_kfree_skb_any(skb);
24793- atomic_inc(&vcc->stats->tx_err);
24794+ atomic_inc_unchecked(&vcc->stats->tx_err);
24795 spin_unlock_irqrestore(&he_dev->global_lock, flags);
24796 return -ENOMEM;
24797 }
24798@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
24799 vcc->pop(vcc, skb);
24800 else
24801 dev_kfree_skb_any(skb);
24802- atomic_inc(&vcc->stats->tx_err);
24803+ atomic_inc_unchecked(&vcc->stats->tx_err);
24804 spin_unlock_irqrestore(&he_dev->global_lock, flags);
24805 return -ENOMEM;
24806 }
24807@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
24808 __enqueue_tpd(he_dev, tpd, cid);
24809 spin_unlock_irqrestore(&he_dev->global_lock, flags);
24810
24811- atomic_inc(&vcc->stats->tx);
24812+ atomic_inc_unchecked(&vcc->stats->tx);
24813
24814 return 0;
24815 }
24816diff -urNp linux-3.1.4/drivers/atm/horizon.c linux-3.1.4/drivers/atm/horizon.c
24817--- linux-3.1.4/drivers/atm/horizon.c 2011-11-11 15:19:27.000000000 -0500
24818+++ linux-3.1.4/drivers/atm/horizon.c 2011-11-16 18:39:07.000000000 -0500
24819@@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev,
24820 {
24821 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
24822 // VC layer stats
24823- atomic_inc(&vcc->stats->rx);
24824+ atomic_inc_unchecked(&vcc->stats->rx);
24825 __net_timestamp(skb);
24826 // end of our responsibility
24827 vcc->push (vcc, skb);
24828@@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const
24829 dev->tx_iovec = NULL;
24830
24831 // VC layer stats
24832- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
24833+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
24834
24835 // free the skb
24836 hrz_kfree_skb (skb);
24837diff -urNp linux-3.1.4/drivers/atm/idt77252.c linux-3.1.4/drivers/atm/idt77252.c
24838--- linux-3.1.4/drivers/atm/idt77252.c 2011-11-11 15:19:27.000000000 -0500
24839+++ linux-3.1.4/drivers/atm/idt77252.c 2011-11-16 18:39:07.000000000 -0500
24840@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, str
24841 else
24842 dev_kfree_skb(skb);
24843
24844- atomic_inc(&vcc->stats->tx);
24845+ atomic_inc_unchecked(&vcc->stats->tx);
24846 }
24847
24848 atomic_dec(&scq->used);
24849@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, st
24850 if ((sb = dev_alloc_skb(64)) == NULL) {
24851 printk("%s: Can't allocate buffers for aal0.\n",
24852 card->name);
24853- atomic_add(i, &vcc->stats->rx_drop);
24854+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
24855 break;
24856 }
24857 if (!atm_charge(vcc, sb->truesize)) {
24858 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
24859 card->name);
24860- atomic_add(i - 1, &vcc->stats->rx_drop);
24861+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
24862 dev_kfree_skb(sb);
24863 break;
24864 }
24865@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, st
24866 ATM_SKB(sb)->vcc = vcc;
24867 __net_timestamp(sb);
24868 vcc->push(vcc, sb);
24869- atomic_inc(&vcc->stats->rx);
24870+ atomic_inc_unchecked(&vcc->stats->rx);
24871
24872 cell += ATM_CELL_PAYLOAD;
24873 }
24874@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, st
24875 "(CDC: %08x)\n",
24876 card->name, len, rpp->len, readl(SAR_REG_CDC));
24877 recycle_rx_pool_skb(card, rpp);
24878- atomic_inc(&vcc->stats->rx_err);
24879+ atomic_inc_unchecked(&vcc->stats->rx_err);
24880 return;
24881 }
24882 if (stat & SAR_RSQE_CRC) {
24883 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
24884 recycle_rx_pool_skb(card, rpp);
24885- atomic_inc(&vcc->stats->rx_err);
24886+ atomic_inc_unchecked(&vcc->stats->rx_err);
24887 return;
24888 }
24889 if (skb_queue_len(&rpp->queue) > 1) {
24890@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, st
24891 RXPRINTK("%s: Can't alloc RX skb.\n",
24892 card->name);
24893 recycle_rx_pool_skb(card, rpp);
24894- atomic_inc(&vcc->stats->rx_err);
24895+ atomic_inc_unchecked(&vcc->stats->rx_err);
24896 return;
24897 }
24898 if (!atm_charge(vcc, skb->truesize)) {
24899@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, st
24900 __net_timestamp(skb);
24901
24902 vcc->push(vcc, skb);
24903- atomic_inc(&vcc->stats->rx);
24904+ atomic_inc_unchecked(&vcc->stats->rx);
24905
24906 return;
24907 }
24908@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, st
24909 __net_timestamp(skb);
24910
24911 vcc->push(vcc, skb);
24912- atomic_inc(&vcc->stats->rx);
24913+ atomic_inc_unchecked(&vcc->stats->rx);
24914
24915 if (skb->truesize > SAR_FB_SIZE_3)
24916 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
24917@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *car
24918 if (vcc->qos.aal != ATM_AAL0) {
24919 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
24920 card->name, vpi, vci);
24921- atomic_inc(&vcc->stats->rx_drop);
24922+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24923 goto drop;
24924 }
24925
24926 if ((sb = dev_alloc_skb(64)) == NULL) {
24927 printk("%s: Can't allocate buffers for AAL0.\n",
24928 card->name);
24929- atomic_inc(&vcc->stats->rx_err);
24930+ atomic_inc_unchecked(&vcc->stats->rx_err);
24931 goto drop;
24932 }
24933
24934@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *car
24935 ATM_SKB(sb)->vcc = vcc;
24936 __net_timestamp(sb);
24937 vcc->push(vcc, sb);
24938- atomic_inc(&vcc->stats->rx);
24939+ atomic_inc_unchecked(&vcc->stats->rx);
24940
24941 drop:
24942 skb_pull(queue, 64);
24943@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
24944
24945 if (vc == NULL) {
24946 printk("%s: NULL connection in send().\n", card->name);
24947- atomic_inc(&vcc->stats->tx_err);
24948+ atomic_inc_unchecked(&vcc->stats->tx_err);
24949 dev_kfree_skb(skb);
24950 return -EINVAL;
24951 }
24952 if (!test_bit(VCF_TX, &vc->flags)) {
24953 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
24954- atomic_inc(&vcc->stats->tx_err);
24955+ atomic_inc_unchecked(&vcc->stats->tx_err);
24956 dev_kfree_skb(skb);
24957 return -EINVAL;
24958 }
24959@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
24960 break;
24961 default:
24962 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
24963- atomic_inc(&vcc->stats->tx_err);
24964+ atomic_inc_unchecked(&vcc->stats->tx_err);
24965 dev_kfree_skb(skb);
24966 return -EINVAL;
24967 }
24968
24969 if (skb_shinfo(skb)->nr_frags != 0) {
24970 printk("%s: No scatter-gather yet.\n", card->name);
24971- atomic_inc(&vcc->stats->tx_err);
24972+ atomic_inc_unchecked(&vcc->stats->tx_err);
24973 dev_kfree_skb(skb);
24974 return -EINVAL;
24975 }
24976@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
24977
24978 err = queue_skb(card, vc, skb, oam);
24979 if (err) {
24980- atomic_inc(&vcc->stats->tx_err);
24981+ atomic_inc_unchecked(&vcc->stats->tx_err);
24982 dev_kfree_skb(skb);
24983 return err;
24984 }
24985@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
24986 skb = dev_alloc_skb(64);
24987 if (!skb) {
24988 printk("%s: Out of memory in send_oam().\n", card->name);
24989- atomic_inc(&vcc->stats->tx_err);
24990+ atomic_inc_unchecked(&vcc->stats->tx_err);
24991 return -ENOMEM;
24992 }
24993 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
24994diff -urNp linux-3.1.4/drivers/atm/iphase.c linux-3.1.4/drivers/atm/iphase.c
24995--- linux-3.1.4/drivers/atm/iphase.c 2011-11-11 15:19:27.000000000 -0500
24996+++ linux-3.1.4/drivers/atm/iphase.c 2011-11-16 18:39:07.000000000 -0500
24997@@ -1121,7 +1121,7 @@ static int rx_pkt(struct atm_dev *dev)
24998 status = (u_short) (buf_desc_ptr->desc_mode);
24999 if (status & (RX_CER | RX_PTE | RX_OFL))
25000 {
25001- atomic_inc(&vcc->stats->rx_err);
25002+ atomic_inc_unchecked(&vcc->stats->rx_err);
25003 IF_ERR(printk("IA: bad packet, dropping it");)
25004 if (status & RX_CER) {
25005 IF_ERR(printk(" cause: packet CRC error\n");)
25006@@ -1144,7 +1144,7 @@ static int rx_pkt(struct atm_dev *dev)
25007 len = dma_addr - buf_addr;
25008 if (len > iadev->rx_buf_sz) {
25009 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
25010- atomic_inc(&vcc->stats->rx_err);
25011+ atomic_inc_unchecked(&vcc->stats->rx_err);
25012 goto out_free_desc;
25013 }
25014
25015@@ -1294,7 +1294,7 @@ static void rx_dle_intr(struct atm_dev *
25016 ia_vcc = INPH_IA_VCC(vcc);
25017 if (ia_vcc == NULL)
25018 {
25019- atomic_inc(&vcc->stats->rx_err);
25020+ atomic_inc_unchecked(&vcc->stats->rx_err);
25021 dev_kfree_skb_any(skb);
25022 atm_return(vcc, atm_guess_pdu2truesize(len));
25023 goto INCR_DLE;
25024@@ -1306,7 +1306,7 @@ static void rx_dle_intr(struct atm_dev *
25025 if ((length > iadev->rx_buf_sz) || (length >
25026 (skb->len - sizeof(struct cpcs_trailer))))
25027 {
25028- atomic_inc(&vcc->stats->rx_err);
25029+ atomic_inc_unchecked(&vcc->stats->rx_err);
25030 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
25031 length, skb->len);)
25032 dev_kfree_skb_any(skb);
25033@@ -1322,7 +1322,7 @@ static void rx_dle_intr(struct atm_dev *
25034
25035 IF_RX(printk("rx_dle_intr: skb push");)
25036 vcc->push(vcc,skb);
25037- atomic_inc(&vcc->stats->rx);
25038+ atomic_inc_unchecked(&vcc->stats->rx);
25039 iadev->rx_pkt_cnt++;
25040 }
25041 INCR_DLE:
25042@@ -2802,15 +2802,15 @@ static int ia_ioctl(struct atm_dev *dev,
25043 {
25044 struct k_sonet_stats *stats;
25045 stats = &PRIV(_ia_dev[board])->sonet_stats;
25046- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
25047- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
25048- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
25049- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
25050- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
25051- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
25052- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
25053- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
25054- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
25055+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
25056+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
25057+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
25058+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
25059+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
25060+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
25061+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
25062+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
25063+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
25064 }
25065 ia_cmds.status = 0;
25066 break;
25067@@ -2915,7 +2915,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
25068 if ((desc == 0) || (desc > iadev->num_tx_desc))
25069 {
25070 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
25071- atomic_inc(&vcc->stats->tx);
25072+ atomic_inc_unchecked(&vcc->stats->tx);
25073 if (vcc->pop)
25074 vcc->pop(vcc, skb);
25075 else
25076@@ -3020,14 +3020,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
25077 ATM_DESC(skb) = vcc->vci;
25078 skb_queue_tail(&iadev->tx_dma_q, skb);
25079
25080- atomic_inc(&vcc->stats->tx);
25081+ atomic_inc_unchecked(&vcc->stats->tx);
25082 iadev->tx_pkt_cnt++;
25083 /* Increment transaction counter */
25084 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
25085
25086 #if 0
25087 /* add flow control logic */
25088- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
25089+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
25090 if (iavcc->vc_desc_cnt > 10) {
25091 vcc->tx_quota = vcc->tx_quota * 3 / 4;
25092 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
25093diff -urNp linux-3.1.4/drivers/atm/lanai.c linux-3.1.4/drivers/atm/lanai.c
25094--- linux-3.1.4/drivers/atm/lanai.c 2011-11-11 15:19:27.000000000 -0500
25095+++ linux-3.1.4/drivers/atm/lanai.c 2011-11-16 18:39:07.000000000 -0500
25096@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
25097 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
25098 lanai_endtx(lanai, lvcc);
25099 lanai_free_skb(lvcc->tx.atmvcc, skb);
25100- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
25101+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
25102 }
25103
25104 /* Try to fill the buffer - don't call unless there is backlog */
25105@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
25106 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
25107 __net_timestamp(skb);
25108 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
25109- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
25110+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
25111 out:
25112 lvcc->rx.buf.ptr = end;
25113 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
25114@@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
25115 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
25116 "vcc %d\n", lanai->number, (unsigned int) s, vci);
25117 lanai->stats.service_rxnotaal5++;
25118- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25119+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25120 return 0;
25121 }
25122 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
25123@@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
25124 int bytes;
25125 read_unlock(&vcc_sklist_lock);
25126 DPRINTK("got trashed rx pdu on vci %d\n", vci);
25127- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25128+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25129 lvcc->stats.x.aal5.service_trash++;
25130 bytes = (SERVICE_GET_END(s) * 16) -
25131 (((unsigned long) lvcc->rx.buf.ptr) -
25132@@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
25133 }
25134 if (s & SERVICE_STREAM) {
25135 read_unlock(&vcc_sklist_lock);
25136- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25137+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25138 lvcc->stats.x.aal5.service_stream++;
25139 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
25140 "PDU on VCI %d!\n", lanai->number, vci);
25141@@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
25142 return 0;
25143 }
25144 DPRINTK("got rx crc error on vci %d\n", vci);
25145- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25146+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25147 lvcc->stats.x.aal5.service_rxcrc++;
25148 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
25149 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
25150diff -urNp linux-3.1.4/drivers/atm/nicstar.c linux-3.1.4/drivers/atm/nicstar.c
25151--- linux-3.1.4/drivers/atm/nicstar.c 2011-11-11 15:19:27.000000000 -0500
25152+++ linux-3.1.4/drivers/atm/nicstar.c 2011-11-16 18:39:07.000000000 -0500
25153@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
25154 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
25155 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
25156 card->index);
25157- atomic_inc(&vcc->stats->tx_err);
25158+ atomic_inc_unchecked(&vcc->stats->tx_err);
25159 dev_kfree_skb_any(skb);
25160 return -EINVAL;
25161 }
25162@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
25163 if (!vc->tx) {
25164 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
25165 card->index);
25166- atomic_inc(&vcc->stats->tx_err);
25167+ atomic_inc_unchecked(&vcc->stats->tx_err);
25168 dev_kfree_skb_any(skb);
25169 return -EINVAL;
25170 }
25171@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
25172 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
25173 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
25174 card->index);
25175- atomic_inc(&vcc->stats->tx_err);
25176+ atomic_inc_unchecked(&vcc->stats->tx_err);
25177 dev_kfree_skb_any(skb);
25178 return -EINVAL;
25179 }
25180
25181 if (skb_shinfo(skb)->nr_frags != 0) {
25182 printk("nicstar%d: No scatter-gather yet.\n", card->index);
25183- atomic_inc(&vcc->stats->tx_err);
25184+ atomic_inc_unchecked(&vcc->stats->tx_err);
25185 dev_kfree_skb_any(skb);
25186 return -EINVAL;
25187 }
25188@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
25189 }
25190
25191 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
25192- atomic_inc(&vcc->stats->tx_err);
25193+ atomic_inc_unchecked(&vcc->stats->tx_err);
25194 dev_kfree_skb_any(skb);
25195 return -EIO;
25196 }
25197- atomic_inc(&vcc->stats->tx);
25198+ atomic_inc_unchecked(&vcc->stats->tx);
25199
25200 return 0;
25201 }
25202@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
25203 printk
25204 ("nicstar%d: Can't allocate buffers for aal0.\n",
25205 card->index);
25206- atomic_add(i, &vcc->stats->rx_drop);
25207+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
25208 break;
25209 }
25210 if (!atm_charge(vcc, sb->truesize)) {
25211 RXPRINTK
25212 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
25213 card->index);
25214- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
25215+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
25216 dev_kfree_skb_any(sb);
25217 break;
25218 }
25219@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
25220 ATM_SKB(sb)->vcc = vcc;
25221 __net_timestamp(sb);
25222 vcc->push(vcc, sb);
25223- atomic_inc(&vcc->stats->rx);
25224+ atomic_inc_unchecked(&vcc->stats->rx);
25225 cell += ATM_CELL_PAYLOAD;
25226 }
25227
25228@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
25229 if (iovb == NULL) {
25230 printk("nicstar%d: Out of iovec buffers.\n",
25231 card->index);
25232- atomic_inc(&vcc->stats->rx_drop);
25233+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25234 recycle_rx_buf(card, skb);
25235 return;
25236 }
25237@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
25238 small or large buffer itself. */
25239 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
25240 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
25241- atomic_inc(&vcc->stats->rx_err);
25242+ atomic_inc_unchecked(&vcc->stats->rx_err);
25243 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
25244 NS_MAX_IOVECS);
25245 NS_PRV_IOVCNT(iovb) = 0;
25246@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
25247 ("nicstar%d: Expected a small buffer, and this is not one.\n",
25248 card->index);
25249 which_list(card, skb);
25250- atomic_inc(&vcc->stats->rx_err);
25251+ atomic_inc_unchecked(&vcc->stats->rx_err);
25252 recycle_rx_buf(card, skb);
25253 vc->rx_iov = NULL;
25254 recycle_iov_buf(card, iovb);
25255@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
25256 ("nicstar%d: Expected a large buffer, and this is not one.\n",
25257 card->index);
25258 which_list(card, skb);
25259- atomic_inc(&vcc->stats->rx_err);
25260+ atomic_inc_unchecked(&vcc->stats->rx_err);
25261 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
25262 NS_PRV_IOVCNT(iovb));
25263 vc->rx_iov = NULL;
25264@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
25265 printk(" - PDU size mismatch.\n");
25266 else
25267 printk(".\n");
25268- atomic_inc(&vcc->stats->rx_err);
25269+ atomic_inc_unchecked(&vcc->stats->rx_err);
25270 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
25271 NS_PRV_IOVCNT(iovb));
25272 vc->rx_iov = NULL;
25273@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
25274 /* skb points to a small buffer */
25275 if (!atm_charge(vcc, skb->truesize)) {
25276 push_rxbufs(card, skb);
25277- atomic_inc(&vcc->stats->rx_drop);
25278+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25279 } else {
25280 skb_put(skb, len);
25281 dequeue_sm_buf(card, skb);
25282@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
25283 ATM_SKB(skb)->vcc = vcc;
25284 __net_timestamp(skb);
25285 vcc->push(vcc, skb);
25286- atomic_inc(&vcc->stats->rx);
25287+ atomic_inc_unchecked(&vcc->stats->rx);
25288 }
25289 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
25290 struct sk_buff *sb;
25291@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
25292 if (len <= NS_SMBUFSIZE) {
25293 if (!atm_charge(vcc, sb->truesize)) {
25294 push_rxbufs(card, sb);
25295- atomic_inc(&vcc->stats->rx_drop);
25296+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25297 } else {
25298 skb_put(sb, len);
25299 dequeue_sm_buf(card, sb);
25300@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
25301 ATM_SKB(sb)->vcc = vcc;
25302 __net_timestamp(sb);
25303 vcc->push(vcc, sb);
25304- atomic_inc(&vcc->stats->rx);
25305+ atomic_inc_unchecked(&vcc->stats->rx);
25306 }
25307
25308 push_rxbufs(card, skb);
25309@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
25310
25311 if (!atm_charge(vcc, skb->truesize)) {
25312 push_rxbufs(card, skb);
25313- atomic_inc(&vcc->stats->rx_drop);
25314+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25315 } else {
25316 dequeue_lg_buf(card, skb);
25317 #ifdef NS_USE_DESTRUCTORS
25318@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
25319 ATM_SKB(skb)->vcc = vcc;
25320 __net_timestamp(skb);
25321 vcc->push(vcc, skb);
25322- atomic_inc(&vcc->stats->rx);
25323+ atomic_inc_unchecked(&vcc->stats->rx);
25324 }
25325
25326 push_rxbufs(card, sb);
25327@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
25328 printk
25329 ("nicstar%d: Out of huge buffers.\n",
25330 card->index);
25331- atomic_inc(&vcc->stats->rx_drop);
25332+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25333 recycle_iovec_rx_bufs(card,
25334 (struct iovec *)
25335 iovb->data,
25336@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
25337 card->hbpool.count++;
25338 } else
25339 dev_kfree_skb_any(hb);
25340- atomic_inc(&vcc->stats->rx_drop);
25341+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25342 } else {
25343 /* Copy the small buffer to the huge buffer */
25344 sb = (struct sk_buff *)iov->iov_base;
25345@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
25346 #endif /* NS_USE_DESTRUCTORS */
25347 __net_timestamp(hb);
25348 vcc->push(vcc, hb);
25349- atomic_inc(&vcc->stats->rx);
25350+ atomic_inc_unchecked(&vcc->stats->rx);
25351 }
25352 }
25353
25354diff -urNp linux-3.1.4/drivers/atm/solos-pci.c linux-3.1.4/drivers/atm/solos-pci.c
25355--- linux-3.1.4/drivers/atm/solos-pci.c 2011-11-11 15:19:27.000000000 -0500
25356+++ linux-3.1.4/drivers/atm/solos-pci.c 2011-11-16 18:40:10.000000000 -0500
25357@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
25358 }
25359 atm_charge(vcc, skb->truesize);
25360 vcc->push(vcc, skb);
25361- atomic_inc(&vcc->stats->rx);
25362+ atomic_inc_unchecked(&vcc->stats->rx);
25363 break;
25364
25365 case PKT_STATUS:
25366@@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *
25367 char msg[500];
25368 char item[10];
25369
25370+ pax_track_stack();
25371+
25372 len = buf->len;
25373 for (i = 0; i < len; i++){
25374 if(i % 8 == 0)
25375@@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_car
25376 vcc = SKB_CB(oldskb)->vcc;
25377
25378 if (vcc) {
25379- atomic_inc(&vcc->stats->tx);
25380+ atomic_inc_unchecked(&vcc->stats->tx);
25381 solos_pop(vcc, oldskb);
25382 } else
25383 dev_kfree_skb_irq(oldskb);
25384diff -urNp linux-3.1.4/drivers/atm/suni.c linux-3.1.4/drivers/atm/suni.c
25385--- linux-3.1.4/drivers/atm/suni.c 2011-11-11 15:19:27.000000000 -0500
25386+++ linux-3.1.4/drivers/atm/suni.c 2011-11-16 18:39:07.000000000 -0500
25387@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
25388
25389
25390 #define ADD_LIMITED(s,v) \
25391- atomic_add((v),&stats->s); \
25392- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
25393+ atomic_add_unchecked((v),&stats->s); \
25394+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
25395
25396
25397 static void suni_hz(unsigned long from_timer)
25398diff -urNp linux-3.1.4/drivers/atm/uPD98402.c linux-3.1.4/drivers/atm/uPD98402.c
25399--- linux-3.1.4/drivers/atm/uPD98402.c 2011-11-11 15:19:27.000000000 -0500
25400+++ linux-3.1.4/drivers/atm/uPD98402.c 2011-11-16 18:39:07.000000000 -0500
25401@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
25402 struct sonet_stats tmp;
25403 int error = 0;
25404
25405- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
25406+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
25407 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
25408 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
25409 if (zero && !error) {
25410@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
25411
25412
25413 #define ADD_LIMITED(s,v) \
25414- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
25415- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
25416- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
25417+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
25418+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
25419+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
25420
25421
25422 static void stat_event(struct atm_dev *dev)
25423@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
25424 if (reason & uPD98402_INT_PFM) stat_event(dev);
25425 if (reason & uPD98402_INT_PCO) {
25426 (void) GET(PCOCR); /* clear interrupt cause */
25427- atomic_add(GET(HECCT),
25428+ atomic_add_unchecked(GET(HECCT),
25429 &PRIV(dev)->sonet_stats.uncorr_hcs);
25430 }
25431 if ((reason & uPD98402_INT_RFO) &&
25432@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
25433 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
25434 uPD98402_INT_LOS),PIMR); /* enable them */
25435 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
25436- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
25437- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
25438- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
25439+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
25440+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
25441+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
25442 return 0;
25443 }
25444
25445diff -urNp linux-3.1.4/drivers/atm/zatm.c linux-3.1.4/drivers/atm/zatm.c
25446--- linux-3.1.4/drivers/atm/zatm.c 2011-11-11 15:19:27.000000000 -0500
25447+++ linux-3.1.4/drivers/atm/zatm.c 2011-11-16 18:39:07.000000000 -0500
25448@@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
25449 }
25450 if (!size) {
25451 dev_kfree_skb_irq(skb);
25452- if (vcc) atomic_inc(&vcc->stats->rx_err);
25453+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
25454 continue;
25455 }
25456 if (!atm_charge(vcc,skb->truesize)) {
25457@@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
25458 skb->len = size;
25459 ATM_SKB(skb)->vcc = vcc;
25460 vcc->push(vcc,skb);
25461- atomic_inc(&vcc->stats->rx);
25462+ atomic_inc_unchecked(&vcc->stats->rx);
25463 }
25464 zout(pos & 0xffff,MTA(mbx));
25465 #if 0 /* probably a stupid idea */
25466@@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
25467 skb_queue_head(&zatm_vcc->backlog,skb);
25468 break;
25469 }
25470- atomic_inc(&vcc->stats->tx);
25471+ atomic_inc_unchecked(&vcc->stats->tx);
25472 wake_up(&zatm_vcc->tx_wait);
25473 }
25474
25475diff -urNp linux-3.1.4/drivers/base/devtmpfs.c linux-3.1.4/drivers/base/devtmpfs.c
25476--- linux-3.1.4/drivers/base/devtmpfs.c 2011-11-11 15:19:27.000000000 -0500
25477+++ linux-3.1.4/drivers/base/devtmpfs.c 2011-11-16 18:39:07.000000000 -0500
25478@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
25479 if (!thread)
25480 return 0;
25481
25482- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
25483+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
25484 if (err)
25485 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
25486 else
25487diff -urNp linux-3.1.4/drivers/base/power/wakeup.c linux-3.1.4/drivers/base/power/wakeup.c
25488--- linux-3.1.4/drivers/base/power/wakeup.c 2011-11-11 15:19:27.000000000 -0500
25489+++ linux-3.1.4/drivers/base/power/wakeup.c 2011-11-16 18:39:07.000000000 -0500
25490@@ -29,14 +29,14 @@ bool events_check_enabled;
25491 * They need to be modified together atomically, so it's better to use one
25492 * atomic variable to hold them both.
25493 */
25494-static atomic_t combined_event_count = ATOMIC_INIT(0);
25495+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
25496
25497 #define IN_PROGRESS_BITS (sizeof(int) * 4)
25498 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
25499
25500 static void split_counters(unsigned int *cnt, unsigned int *inpr)
25501 {
25502- unsigned int comb = atomic_read(&combined_event_count);
25503+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
25504
25505 *cnt = (comb >> IN_PROGRESS_BITS);
25506 *inpr = comb & MAX_IN_PROGRESS;
25507@@ -350,7 +350,7 @@ static void wakeup_source_activate(struc
25508 ws->last_time = ktime_get();
25509
25510 /* Increment the counter of events in progress. */
25511- atomic_inc(&combined_event_count);
25512+ atomic_inc_unchecked(&combined_event_count);
25513 }
25514
25515 /**
25516@@ -440,7 +440,7 @@ static void wakeup_source_deactivate(str
25517 * Increment the counter of registered wakeup events and decrement the
25518 * couter of wakeup events in progress simultaneously.
25519 */
25520- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
25521+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
25522 }
25523
25524 /**
25525diff -urNp linux-3.1.4/drivers/block/cciss.c linux-3.1.4/drivers/block/cciss.c
25526--- linux-3.1.4/drivers/block/cciss.c 2011-11-11 15:19:27.000000000 -0500
25527+++ linux-3.1.4/drivers/block/cciss.c 2011-11-16 18:40:10.000000000 -0500
25528@@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct
25529 int err;
25530 u32 cp;
25531
25532+ memset(&arg64, 0, sizeof(arg64));
25533+
25534 err = 0;
25535 err |=
25536 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
25537@@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
25538 while (!list_empty(&h->reqQ)) {
25539 c = list_entry(h->reqQ.next, CommandList_struct, list);
25540 /* can't do anything if fifo is full */
25541- if ((h->access.fifo_full(h))) {
25542+ if ((h->access->fifo_full(h))) {
25543 dev_warn(&h->pdev->dev, "fifo full\n");
25544 break;
25545 }
25546@@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
25547 h->Qdepth--;
25548
25549 /* Tell the controller execute command */
25550- h->access.submit_command(h, c);
25551+ h->access->submit_command(h, c);
25552
25553 /* Put job onto the completed Q */
25554 addQ(&h->cmpQ, c);
25555@@ -3422,17 +3424,17 @@ startio:
25556
25557 static inline unsigned long get_next_completion(ctlr_info_t *h)
25558 {
25559- return h->access.command_completed(h);
25560+ return h->access->command_completed(h);
25561 }
25562
25563 static inline int interrupt_pending(ctlr_info_t *h)
25564 {
25565- return h->access.intr_pending(h);
25566+ return h->access->intr_pending(h);
25567 }
25568
25569 static inline long interrupt_not_for_us(ctlr_info_t *h)
25570 {
25571- return ((h->access.intr_pending(h) == 0) ||
25572+ return ((h->access->intr_pending(h) == 0) ||
25573 (h->interrupts_enabled == 0));
25574 }
25575
25576@@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info
25577 u32 a;
25578
25579 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
25580- return h->access.command_completed(h);
25581+ return h->access->command_completed(h);
25582
25583 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
25584 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
25585@@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controll
25586 trans_support & CFGTBL_Trans_use_short_tags);
25587
25588 /* Change the access methods to the performant access methods */
25589- h->access = SA5_performant_access;
25590+ h->access = &SA5_performant_access;
25591 h->transMethod = CFGTBL_Trans_Performant;
25592
25593 return;
25594@@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr
25595 if (prod_index < 0)
25596 return -ENODEV;
25597 h->product_name = products[prod_index].product_name;
25598- h->access = *(products[prod_index].access);
25599+ h->access = products[prod_index].access;
25600
25601 if (cciss_board_disabled(h)) {
25602 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
25603@@ -5009,7 +5011,7 @@ reinit_after_soft_reset:
25604 }
25605
25606 /* make sure the board interrupts are off */
25607- h->access.set_intr_mask(h, CCISS_INTR_OFF);
25608+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
25609 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
25610 if (rc)
25611 goto clean2;
25612@@ -5061,7 +5063,7 @@ reinit_after_soft_reset:
25613 * fake ones to scoop up any residual completions.
25614 */
25615 spin_lock_irqsave(&h->lock, flags);
25616- h->access.set_intr_mask(h, CCISS_INTR_OFF);
25617+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
25618 spin_unlock_irqrestore(&h->lock, flags);
25619 free_irq(h->intr[PERF_MODE_INT], h);
25620 rc = cciss_request_irq(h, cciss_msix_discard_completions,
25621@@ -5081,9 +5083,9 @@ reinit_after_soft_reset:
25622 dev_info(&h->pdev->dev, "Board READY.\n");
25623 dev_info(&h->pdev->dev,
25624 "Waiting for stale completions to drain.\n");
25625- h->access.set_intr_mask(h, CCISS_INTR_ON);
25626+ h->access->set_intr_mask(h, CCISS_INTR_ON);
25627 msleep(10000);
25628- h->access.set_intr_mask(h, CCISS_INTR_OFF);
25629+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
25630
25631 rc = controller_reset_failed(h->cfgtable);
25632 if (rc)
25633@@ -5106,7 +5108,7 @@ reinit_after_soft_reset:
25634 cciss_scsi_setup(h);
25635
25636 /* Turn the interrupts on so we can service requests */
25637- h->access.set_intr_mask(h, CCISS_INTR_ON);
25638+ h->access->set_intr_mask(h, CCISS_INTR_ON);
25639
25640 /* Get the firmware version */
25641 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
25642@@ -5178,7 +5180,7 @@ static void cciss_shutdown(struct pci_de
25643 kfree(flush_buf);
25644 if (return_code != IO_OK)
25645 dev_warn(&h->pdev->dev, "Error flushing cache\n");
25646- h->access.set_intr_mask(h, CCISS_INTR_OFF);
25647+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
25648 free_irq(h->intr[PERF_MODE_INT], h);
25649 }
25650
25651diff -urNp linux-3.1.4/drivers/block/cciss.h linux-3.1.4/drivers/block/cciss.h
25652--- linux-3.1.4/drivers/block/cciss.h 2011-11-11 15:19:27.000000000 -0500
25653+++ linux-3.1.4/drivers/block/cciss.h 2011-11-16 18:39:07.000000000 -0500
25654@@ -100,7 +100,7 @@ struct ctlr_info
25655 /* information about each logical volume */
25656 drive_info_struct *drv[CISS_MAX_LUN];
25657
25658- struct access_method access;
25659+ struct access_method *access;
25660
25661 /* queue and queue Info */
25662 struct list_head reqQ;
25663diff -urNp linux-3.1.4/drivers/block/cpqarray.c linux-3.1.4/drivers/block/cpqarray.c
25664--- linux-3.1.4/drivers/block/cpqarray.c 2011-11-11 15:19:27.000000000 -0500
25665+++ linux-3.1.4/drivers/block/cpqarray.c 2011-11-16 18:40:10.000000000 -0500
25666@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
25667 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
25668 goto Enomem4;
25669 }
25670- hba[i]->access.set_intr_mask(hba[i], 0);
25671+ hba[i]->access->set_intr_mask(hba[i], 0);
25672 if (request_irq(hba[i]->intr, do_ida_intr,
25673 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
25674 {
25675@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
25676 add_timer(&hba[i]->timer);
25677
25678 /* Enable IRQ now that spinlock and rate limit timer are set up */
25679- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
25680+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
25681
25682 for(j=0; j<NWD; j++) {
25683 struct gendisk *disk = ida_gendisk[i][j];
25684@@ -694,7 +694,7 @@ DBGINFO(
25685 for(i=0; i<NR_PRODUCTS; i++) {
25686 if (board_id == products[i].board_id) {
25687 c->product_name = products[i].product_name;
25688- c->access = *(products[i].access);
25689+ c->access = products[i].access;
25690 break;
25691 }
25692 }
25693@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
25694 hba[ctlr]->intr = intr;
25695 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
25696 hba[ctlr]->product_name = products[j].product_name;
25697- hba[ctlr]->access = *(products[j].access);
25698+ hba[ctlr]->access = products[j].access;
25699 hba[ctlr]->ctlr = ctlr;
25700 hba[ctlr]->board_id = board_id;
25701 hba[ctlr]->pci_dev = NULL; /* not PCI */
25702@@ -911,6 +911,8 @@ static void do_ida_request(struct reques
25703 struct scatterlist tmp_sg[SG_MAX];
25704 int i, dir, seg;
25705
25706+ pax_track_stack();
25707+
25708 queue_next:
25709 creq = blk_peek_request(q);
25710 if (!creq)
25711@@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
25712
25713 while((c = h->reqQ) != NULL) {
25714 /* Can't do anything if we're busy */
25715- if (h->access.fifo_full(h) == 0)
25716+ if (h->access->fifo_full(h) == 0)
25717 return;
25718
25719 /* Get the first entry from the request Q */
25720@@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
25721 h->Qdepth--;
25722
25723 /* Tell the controller to do our bidding */
25724- h->access.submit_command(h, c);
25725+ h->access->submit_command(h, c);
25726
25727 /* Get onto the completion Q */
25728 addQ(&h->cmpQ, c);
25729@@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
25730 unsigned long flags;
25731 __u32 a,a1;
25732
25733- istat = h->access.intr_pending(h);
25734+ istat = h->access->intr_pending(h);
25735 /* Is this interrupt for us? */
25736 if (istat == 0)
25737 return IRQ_NONE;
25738@@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
25739 */
25740 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
25741 if (istat & FIFO_NOT_EMPTY) {
25742- while((a = h->access.command_completed(h))) {
25743+ while((a = h->access->command_completed(h))) {
25744 a1 = a; a &= ~3;
25745 if ((c = h->cmpQ) == NULL)
25746 {
25747@@ -1449,11 +1451,11 @@ static int sendcmd(
25748 /*
25749 * Disable interrupt
25750 */
25751- info_p->access.set_intr_mask(info_p, 0);
25752+ info_p->access->set_intr_mask(info_p, 0);
25753 /* Make sure there is room in the command FIFO */
25754 /* Actually it should be completely empty at this time. */
25755 for (i = 200000; i > 0; i--) {
25756- temp = info_p->access.fifo_full(info_p);
25757+ temp = info_p->access->fifo_full(info_p);
25758 if (temp != 0) {
25759 break;
25760 }
25761@@ -1466,7 +1468,7 @@ DBG(
25762 /*
25763 * Send the cmd
25764 */
25765- info_p->access.submit_command(info_p, c);
25766+ info_p->access->submit_command(info_p, c);
25767 complete = pollcomplete(ctlr);
25768
25769 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
25770@@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
25771 * we check the new geometry. Then turn interrupts back on when
25772 * we're done.
25773 */
25774- host->access.set_intr_mask(host, 0);
25775+ host->access->set_intr_mask(host, 0);
25776 getgeometry(ctlr);
25777- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
25778+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
25779
25780 for(i=0; i<NWD; i++) {
25781 struct gendisk *disk = ida_gendisk[ctlr][i];
25782@@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
25783 /* Wait (up to 2 seconds) for a command to complete */
25784
25785 for (i = 200000; i > 0; i--) {
25786- done = hba[ctlr]->access.command_completed(hba[ctlr]);
25787+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
25788 if (done == 0) {
25789 udelay(10); /* a short fixed delay */
25790 } else
25791diff -urNp linux-3.1.4/drivers/block/cpqarray.h linux-3.1.4/drivers/block/cpqarray.h
25792--- linux-3.1.4/drivers/block/cpqarray.h 2011-11-11 15:19:27.000000000 -0500
25793+++ linux-3.1.4/drivers/block/cpqarray.h 2011-11-16 18:39:07.000000000 -0500
25794@@ -99,7 +99,7 @@ struct ctlr_info {
25795 drv_info_t drv[NWD];
25796 struct proc_dir_entry *proc;
25797
25798- struct access_method access;
25799+ struct access_method *access;
25800
25801 cmdlist_t *reqQ;
25802 cmdlist_t *cmpQ;
25803diff -urNp linux-3.1.4/drivers/block/DAC960.c linux-3.1.4/drivers/block/DAC960.c
25804--- linux-3.1.4/drivers/block/DAC960.c 2011-11-11 15:19:27.000000000 -0500
25805+++ linux-3.1.4/drivers/block/DAC960.c 2011-11-16 18:40:10.000000000 -0500
25806@@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
25807 unsigned long flags;
25808 int Channel, TargetID;
25809
25810+ pax_track_stack();
25811+
25812 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
25813 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
25814 sizeof(DAC960_SCSI_Inquiry_T) +
25815diff -urNp linux-3.1.4/drivers/block/drbd/drbd_int.h linux-3.1.4/drivers/block/drbd/drbd_int.h
25816--- linux-3.1.4/drivers/block/drbd/drbd_int.h 2011-11-11 15:19:27.000000000 -0500
25817+++ linux-3.1.4/drivers/block/drbd/drbd_int.h 2011-11-16 18:39:07.000000000 -0500
25818@@ -737,7 +737,7 @@ struct drbd_request;
25819 struct drbd_epoch {
25820 struct list_head list;
25821 unsigned int barrier_nr;
25822- atomic_t epoch_size; /* increased on every request added. */
25823+ atomic_unchecked_t epoch_size; /* increased on every request added. */
25824 atomic_t active; /* increased on every req. added, and dec on every finished. */
25825 unsigned long flags;
25826 };
25827@@ -1109,7 +1109,7 @@ struct drbd_conf {
25828 void *int_dig_in;
25829 void *int_dig_vv;
25830 wait_queue_head_t seq_wait;
25831- atomic_t packet_seq;
25832+ atomic_unchecked_t packet_seq;
25833 unsigned int peer_seq;
25834 spinlock_t peer_seq_lock;
25835 unsigned int minor;
25836@@ -1618,30 +1618,30 @@ static inline int drbd_setsockopt(struct
25837
25838 static inline void drbd_tcp_cork(struct socket *sock)
25839 {
25840- int __user val = 1;
25841+ int val = 1;
25842 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
25843- (char __user *)&val, sizeof(val));
25844+ (char __force_user *)&val, sizeof(val));
25845 }
25846
25847 static inline void drbd_tcp_uncork(struct socket *sock)
25848 {
25849- int __user val = 0;
25850+ int val = 0;
25851 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
25852- (char __user *)&val, sizeof(val));
25853+ (char __force_user *)&val, sizeof(val));
25854 }
25855
25856 static inline void drbd_tcp_nodelay(struct socket *sock)
25857 {
25858- int __user val = 1;
25859+ int val = 1;
25860 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
25861- (char __user *)&val, sizeof(val));
25862+ (char __force_user *)&val, sizeof(val));
25863 }
25864
25865 static inline void drbd_tcp_quickack(struct socket *sock)
25866 {
25867- int __user val = 2;
25868+ int val = 2;
25869 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
25870- (char __user *)&val, sizeof(val));
25871+ (char __force_user *)&val, sizeof(val));
25872 }
25873
25874 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
25875diff -urNp linux-3.1.4/drivers/block/drbd/drbd_main.c linux-3.1.4/drivers/block/drbd/drbd_main.c
25876--- linux-3.1.4/drivers/block/drbd/drbd_main.c 2011-11-11 15:19:27.000000000 -0500
25877+++ linux-3.1.4/drivers/block/drbd/drbd_main.c 2011-11-16 18:39:07.000000000 -0500
25878@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_co
25879 p.sector = sector;
25880 p.block_id = block_id;
25881 p.blksize = blksize;
25882- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
25883+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
25884
25885 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
25886 return false;
25887@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *m
25888 p.sector = cpu_to_be64(req->sector);
25889 p.block_id = (unsigned long)req;
25890 p.seq_num = cpu_to_be32(req->seq_num =
25891- atomic_add_return(1, &mdev->packet_seq));
25892+ atomic_add_return_unchecked(1, &mdev->packet_seq));
25893
25894 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
25895
25896@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_
25897 atomic_set(&mdev->unacked_cnt, 0);
25898 atomic_set(&mdev->local_cnt, 0);
25899 atomic_set(&mdev->net_cnt, 0);
25900- atomic_set(&mdev->packet_seq, 0);
25901+ atomic_set_unchecked(&mdev->packet_seq, 0);
25902 atomic_set(&mdev->pp_in_use, 0);
25903 atomic_set(&mdev->pp_in_use_by_net, 0);
25904 atomic_set(&mdev->rs_sect_in, 0);
25905@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf
25906 mdev->receiver.t_state);
25907
25908 /* no need to lock it, I'm the only thread alive */
25909- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
25910- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
25911+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
25912+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
25913 mdev->al_writ_cnt =
25914 mdev->bm_writ_cnt =
25915 mdev->read_cnt =
25916diff -urNp linux-3.1.4/drivers/block/drbd/drbd_nl.c linux-3.1.4/drivers/block/drbd/drbd_nl.c
25917--- linux-3.1.4/drivers/block/drbd/drbd_nl.c 2011-11-11 15:19:27.000000000 -0500
25918+++ linux-3.1.4/drivers/block/drbd/drbd_nl.c 2011-11-16 18:39:07.000000000 -0500
25919@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(stru
25920 module_put(THIS_MODULE);
25921 }
25922
25923-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
25924+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
25925
25926 static unsigned short *
25927 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
25928@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *
25929 cn_reply->id.idx = CN_IDX_DRBD;
25930 cn_reply->id.val = CN_VAL_DRBD;
25931
25932- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
25933+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
25934 cn_reply->ack = 0; /* not used here. */
25935 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
25936 (int)((char *)tl - (char *)reply->tag_list);
25937@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_co
25938 cn_reply->id.idx = CN_IDX_DRBD;
25939 cn_reply->id.val = CN_VAL_DRBD;
25940
25941- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
25942+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
25943 cn_reply->ack = 0; /* not used here. */
25944 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
25945 (int)((char *)tl - (char *)reply->tag_list);
25946@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
25947 cn_reply->id.idx = CN_IDX_DRBD;
25948 cn_reply->id.val = CN_VAL_DRBD;
25949
25950- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
25951+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
25952 cn_reply->ack = 0; // not used here.
25953 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
25954 (int)((char*)tl - (char*)reply->tag_list);
25955@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drb
25956 cn_reply->id.idx = CN_IDX_DRBD;
25957 cn_reply->id.val = CN_VAL_DRBD;
25958
25959- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
25960+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
25961 cn_reply->ack = 0; /* not used here. */
25962 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
25963 (int)((char *)tl - (char *)reply->tag_list);
25964diff -urNp linux-3.1.4/drivers/block/drbd/drbd_receiver.c linux-3.1.4/drivers/block/drbd/drbd_receiver.c
25965--- linux-3.1.4/drivers/block/drbd/drbd_receiver.c 2011-11-11 15:19:27.000000000 -0500
25966+++ linux-3.1.4/drivers/block/drbd/drbd_receiver.c 2011-11-16 18:39:07.000000000 -0500
25967@@ -894,7 +894,7 @@ retry:
25968 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
25969 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
25970
25971- atomic_set(&mdev->packet_seq, 0);
25972+ atomic_set_unchecked(&mdev->packet_seq, 0);
25973 mdev->peer_seq = 0;
25974
25975 drbd_thread_start(&mdev->asender);
25976@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish
25977 do {
25978 next_epoch = NULL;
25979
25980- epoch_size = atomic_read(&epoch->epoch_size);
25981+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
25982
25983 switch (ev & ~EV_CLEANUP) {
25984 case EV_PUT:
25985@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish
25986 rv = FE_DESTROYED;
25987 } else {
25988 epoch->flags = 0;
25989- atomic_set(&epoch->epoch_size, 0);
25990+ atomic_set_unchecked(&epoch->epoch_size, 0);
25991 /* atomic_set(&epoch->active, 0); is already zero */
25992 if (rv == FE_STILL_LIVE)
25993 rv = FE_RECYCLED;
25994@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_c
25995 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
25996 drbd_flush(mdev);
25997
25998- if (atomic_read(&mdev->current_epoch->epoch_size)) {
25999+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26000 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
26001 if (epoch)
26002 break;
26003 }
26004
26005 epoch = mdev->current_epoch;
26006- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
26007+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
26008
26009 D_ASSERT(atomic_read(&epoch->active) == 0);
26010 D_ASSERT(epoch->flags == 0);
26011@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_c
26012 }
26013
26014 epoch->flags = 0;
26015- atomic_set(&epoch->epoch_size, 0);
26016+ atomic_set_unchecked(&epoch->epoch_size, 0);
26017 atomic_set(&epoch->active, 0);
26018
26019 spin_lock(&mdev->epoch_lock);
26020- if (atomic_read(&mdev->current_epoch->epoch_size)) {
26021+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26022 list_add(&epoch->list, &mdev->current_epoch->list);
26023 mdev->current_epoch = epoch;
26024 mdev->epochs++;
26025@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf
26026 spin_unlock(&mdev->peer_seq_lock);
26027
26028 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
26029- atomic_inc(&mdev->current_epoch->epoch_size);
26030+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
26031 return drbd_drain_block(mdev, data_size);
26032 }
26033
26034@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf
26035
26036 spin_lock(&mdev->epoch_lock);
26037 e->epoch = mdev->current_epoch;
26038- atomic_inc(&e->epoch->epoch_size);
26039+ atomic_inc_unchecked(&e->epoch->epoch_size);
26040 atomic_inc(&e->epoch->active);
26041 spin_unlock(&mdev->epoch_lock);
26042
26043@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_
26044 D_ASSERT(list_empty(&mdev->done_ee));
26045
26046 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
26047- atomic_set(&mdev->current_epoch->epoch_size, 0);
26048+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
26049 D_ASSERT(list_empty(&mdev->current_epoch->list));
26050 }
26051
26052diff -urNp linux-3.1.4/drivers/block/loop.c linux-3.1.4/drivers/block/loop.c
26053--- linux-3.1.4/drivers/block/loop.c 2011-11-11 15:19:27.000000000 -0500
26054+++ linux-3.1.4/drivers/block/loop.c 2011-11-16 18:39:07.000000000 -0500
26055@@ -283,7 +283,7 @@ static int __do_lo_send_write(struct fil
26056 mm_segment_t old_fs = get_fs();
26057
26058 set_fs(get_ds());
26059- bw = file->f_op->write(file, buf, len, &pos);
26060+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
26061 set_fs(old_fs);
26062 if (likely(bw == len))
26063 return 0;
26064diff -urNp linux-3.1.4/drivers/block/nbd.c linux-3.1.4/drivers/block/nbd.c
26065--- linux-3.1.4/drivers/block/nbd.c 2011-11-11 15:19:27.000000000 -0500
26066+++ linux-3.1.4/drivers/block/nbd.c 2011-11-16 18:40:10.000000000 -0500
26067@@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
26068 struct kvec iov;
26069 sigset_t blocked, oldset;
26070
26071+ pax_track_stack();
26072+
26073 if (unlikely(!sock)) {
26074 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
26075 lo->disk->disk_name, (send ? "send" : "recv"));
26076@@ -572,6 +574,8 @@ static void do_nbd_request(struct reques
26077 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
26078 unsigned int cmd, unsigned long arg)
26079 {
26080+ pax_track_stack();
26081+
26082 switch (cmd) {
26083 case NBD_DISCONNECT: {
26084 struct request sreq;
26085diff -urNp linux-3.1.4/drivers/char/agp/frontend.c linux-3.1.4/drivers/char/agp/frontend.c
26086--- linux-3.1.4/drivers/char/agp/frontend.c 2011-11-11 15:19:27.000000000 -0500
26087+++ linux-3.1.4/drivers/char/agp/frontend.c 2011-11-16 18:39:07.000000000 -0500
26088@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
26089 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
26090 return -EFAULT;
26091
26092- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
26093+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
26094 return -EFAULT;
26095
26096 client = agp_find_client_by_pid(reserve.pid);
26097diff -urNp linux-3.1.4/drivers/char/briq_panel.c linux-3.1.4/drivers/char/briq_panel.c
26098--- linux-3.1.4/drivers/char/briq_panel.c 2011-11-11 15:19:27.000000000 -0500
26099+++ linux-3.1.4/drivers/char/briq_panel.c 2011-11-16 18:40:10.000000000 -0500
26100@@ -9,6 +9,7 @@
26101 #include <linux/types.h>
26102 #include <linux/errno.h>
26103 #include <linux/tty.h>
26104+#include <linux/mutex.h>
26105 #include <linux/timer.h>
26106 #include <linux/kernel.h>
26107 #include <linux/wait.h>
26108@@ -34,6 +35,7 @@ static int vfd_is_open;
26109 static unsigned char vfd[40];
26110 static int vfd_cursor;
26111 static unsigned char ledpb, led;
26112+static DEFINE_MUTEX(vfd_mutex);
26113
26114 static void update_vfd(void)
26115 {
26116@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
26117 if (!vfd_is_open)
26118 return -EBUSY;
26119
26120+ mutex_lock(&vfd_mutex);
26121 for (;;) {
26122 char c;
26123 if (!indx)
26124 break;
26125- if (get_user(c, buf))
26126+ if (get_user(c, buf)) {
26127+ mutex_unlock(&vfd_mutex);
26128 return -EFAULT;
26129+ }
26130 if (esc) {
26131 set_led(c);
26132 esc = 0;
26133@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
26134 buf++;
26135 }
26136 update_vfd();
26137+ mutex_unlock(&vfd_mutex);
26138
26139 return len;
26140 }
26141diff -urNp linux-3.1.4/drivers/char/genrtc.c linux-3.1.4/drivers/char/genrtc.c
26142--- linux-3.1.4/drivers/char/genrtc.c 2011-11-11 15:19:27.000000000 -0500
26143+++ linux-3.1.4/drivers/char/genrtc.c 2011-11-16 18:40:10.000000000 -0500
26144@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
26145 switch (cmd) {
26146
26147 case RTC_PLL_GET:
26148+ memset(&pll, 0, sizeof(pll));
26149 if (get_rtc_pll(&pll))
26150 return -EINVAL;
26151 else
26152diff -urNp linux-3.1.4/drivers/char/hpet.c linux-3.1.4/drivers/char/hpet.c
26153--- linux-3.1.4/drivers/char/hpet.c 2011-11-11 15:19:27.000000000 -0500
26154+++ linux-3.1.4/drivers/char/hpet.c 2011-11-16 18:39:07.000000000 -0500
26155@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_di
26156 }
26157
26158 static int
26159-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
26160+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
26161 struct hpet_info *info)
26162 {
26163 struct hpet_timer __iomem *timer;
26164diff -urNp linux-3.1.4/drivers/char/ipmi/ipmi_msghandler.c linux-3.1.4/drivers/char/ipmi/ipmi_msghandler.c
26165--- linux-3.1.4/drivers/char/ipmi/ipmi_msghandler.c 2011-11-11 15:19:27.000000000 -0500
26166+++ linux-3.1.4/drivers/char/ipmi/ipmi_msghandler.c 2011-11-16 18:40:10.000000000 -0500
26167@@ -415,7 +415,7 @@ struct ipmi_smi {
26168 struct proc_dir_entry *proc_dir;
26169 char proc_dir_name[10];
26170
26171- atomic_t stats[IPMI_NUM_STATS];
26172+ atomic_unchecked_t stats[IPMI_NUM_STATS];
26173
26174 /*
26175 * run_to_completion duplicate of smb_info, smi_info
26176@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
26177
26178
26179 #define ipmi_inc_stat(intf, stat) \
26180- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
26181+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
26182 #define ipmi_get_stat(intf, stat) \
26183- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
26184+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
26185
26186 static int is_lan_addr(struct ipmi_addr *addr)
26187 {
26188@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
26189 INIT_LIST_HEAD(&intf->cmd_rcvrs);
26190 init_waitqueue_head(&intf->waitq);
26191 for (i = 0; i < IPMI_NUM_STATS; i++)
26192- atomic_set(&intf->stats[i], 0);
26193+ atomic_set_unchecked(&intf->stats[i], 0);
26194
26195 intf->proc_dir = NULL;
26196
26197@@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
26198 struct ipmi_smi_msg smi_msg;
26199 struct ipmi_recv_msg recv_msg;
26200
26201+ pax_track_stack();
26202+
26203 si = (struct ipmi_system_interface_addr *) &addr;
26204 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
26205 si->channel = IPMI_BMC_CHANNEL;
26206diff -urNp linux-3.1.4/drivers/char/ipmi/ipmi_si_intf.c linux-3.1.4/drivers/char/ipmi/ipmi_si_intf.c
26207--- linux-3.1.4/drivers/char/ipmi/ipmi_si_intf.c 2011-11-11 15:19:27.000000000 -0500
26208+++ linux-3.1.4/drivers/char/ipmi/ipmi_si_intf.c 2011-11-16 18:39:07.000000000 -0500
26209@@ -277,7 +277,7 @@ struct smi_info {
26210 unsigned char slave_addr;
26211
26212 /* Counters and things for the proc filesystem. */
26213- atomic_t stats[SI_NUM_STATS];
26214+ atomic_unchecked_t stats[SI_NUM_STATS];
26215
26216 struct task_struct *thread;
26217
26218@@ -286,9 +286,9 @@ struct smi_info {
26219 };
26220
26221 #define smi_inc_stat(smi, stat) \
26222- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
26223+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
26224 #define smi_get_stat(smi, stat) \
26225- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
26226+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
26227
26228 #define SI_MAX_PARMS 4
26229
26230@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info
26231 atomic_set(&new_smi->req_events, 0);
26232 new_smi->run_to_completion = 0;
26233 for (i = 0; i < SI_NUM_STATS; i++)
26234- atomic_set(&new_smi->stats[i], 0);
26235+ atomic_set_unchecked(&new_smi->stats[i], 0);
26236
26237 new_smi->interrupt_disabled = 1;
26238 atomic_set(&new_smi->stop_operation, 0);
26239diff -urNp linux-3.1.4/drivers/char/Kconfig linux-3.1.4/drivers/char/Kconfig
26240--- linux-3.1.4/drivers/char/Kconfig 2011-11-11 15:19:27.000000000 -0500
26241+++ linux-3.1.4/drivers/char/Kconfig 2011-11-16 18:40:10.000000000 -0500
26242@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
26243
26244 config DEVKMEM
26245 bool "/dev/kmem virtual device support"
26246- default y
26247+ default n
26248+ depends on !GRKERNSEC_KMEM
26249 help
26250 Say Y here if you want to support the /dev/kmem device. The
26251 /dev/kmem device is rarely used, but can be used for certain
26252@@ -596,6 +597,7 @@ config DEVPORT
26253 bool
26254 depends on !M68K
26255 depends on ISA || PCI
26256+ depends on !GRKERNSEC_KMEM
26257 default y
26258
26259 source "drivers/s390/char/Kconfig"
26260diff -urNp linux-3.1.4/drivers/char/mbcs.c linux-3.1.4/drivers/char/mbcs.c
26261--- linux-3.1.4/drivers/char/mbcs.c 2011-11-11 15:19:27.000000000 -0500
26262+++ linux-3.1.4/drivers/char/mbcs.c 2011-11-16 18:39:07.000000000 -0500
26263@@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *de
26264 return 0;
26265 }
26266
26267-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
26268+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
26269 {
26270 .part_num = MBCS_PART_NUM,
26271 .mfg_num = MBCS_MFG_NUM,
26272diff -urNp linux-3.1.4/drivers/char/mem.c linux-3.1.4/drivers/char/mem.c
26273--- linux-3.1.4/drivers/char/mem.c 2011-11-11 15:19:27.000000000 -0500
26274+++ linux-3.1.4/drivers/char/mem.c 2011-11-17 18:31:56.000000000 -0500
26275@@ -18,6 +18,7 @@
26276 #include <linux/raw.h>
26277 #include <linux/tty.h>
26278 #include <linux/capability.h>
26279+#include <linux/security.h>
26280 #include <linux/ptrace.h>
26281 #include <linux/device.h>
26282 #include <linux/highmem.h>
26283@@ -34,6 +35,10 @@
26284 # include <linux/efi.h>
26285 #endif
26286
26287+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
26288+extern const struct file_operations grsec_fops;
26289+#endif
26290+
26291 static inline unsigned long size_inside_page(unsigned long start,
26292 unsigned long size)
26293 {
26294@@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
26295
26296 while (cursor < to) {
26297 if (!devmem_is_allowed(pfn)) {
26298+#ifdef CONFIG_GRKERNSEC_KMEM
26299+ gr_handle_mem_readwrite(from, to);
26300+#else
26301 printk(KERN_INFO
26302 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
26303 current->comm, from, to);
26304+#endif
26305 return 0;
26306 }
26307 cursor += PAGE_SIZE;
26308@@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
26309 }
26310 return 1;
26311 }
26312+#elif defined(CONFIG_GRKERNSEC_KMEM)
26313+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26314+{
26315+ return 0;
26316+}
26317 #else
26318 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26319 {
26320@@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
26321
26322 while (count > 0) {
26323 unsigned long remaining;
26324+ char *temp;
26325
26326 sz = size_inside_page(p, count);
26327
26328@@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
26329 if (!ptr)
26330 return -EFAULT;
26331
26332- remaining = copy_to_user(buf, ptr, sz);
26333+#ifdef CONFIG_PAX_USERCOPY
26334+ temp = kmalloc(sz, GFP_KERNEL);
26335+ if (!temp) {
26336+ unxlate_dev_mem_ptr(p, ptr);
26337+ return -ENOMEM;
26338+ }
26339+ memcpy(temp, ptr, sz);
26340+#else
26341+ temp = ptr;
26342+#endif
26343+
26344+ remaining = copy_to_user(buf, temp, sz);
26345+
26346+#ifdef CONFIG_PAX_USERCOPY
26347+ kfree(temp);
26348+#endif
26349+
26350 unxlate_dev_mem_ptr(p, ptr);
26351 if (remaining)
26352 return -EFAULT;
26353@@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
26354 size_t count, loff_t *ppos)
26355 {
26356 unsigned long p = *ppos;
26357- ssize_t low_count, read, sz;
26358+ ssize_t low_count, read, sz, err = 0;
26359 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
26360- int err = 0;
26361
26362 read = 0;
26363 if (p < (unsigned long) high_memory) {
26364@@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
26365 }
26366 #endif
26367 while (low_count > 0) {
26368+ char *temp;
26369+
26370 sz = size_inside_page(p, low_count);
26371
26372 /*
26373@@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
26374 */
26375 kbuf = xlate_dev_kmem_ptr((char *)p);
26376
26377- if (copy_to_user(buf, kbuf, sz))
26378+#ifdef CONFIG_PAX_USERCOPY
26379+ temp = kmalloc(sz, GFP_KERNEL);
26380+ if (!temp)
26381+ return -ENOMEM;
26382+ memcpy(temp, kbuf, sz);
26383+#else
26384+ temp = kbuf;
26385+#endif
26386+
26387+ err = copy_to_user(buf, temp, sz);
26388+
26389+#ifdef CONFIG_PAX_USERCOPY
26390+ kfree(temp);
26391+#endif
26392+
26393+ if (err)
26394 return -EFAULT;
26395 buf += sz;
26396 p += sz;
26397@@ -866,6 +913,9 @@ static const struct memdev {
26398 #ifdef CONFIG_CRASH_DUMP
26399 [12] = { "oldmem", 0, &oldmem_fops, NULL },
26400 #endif
26401+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
26402+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
26403+#endif
26404 };
26405
26406 static int memory_open(struct inode *inode, struct file *filp)
26407diff -urNp linux-3.1.4/drivers/char/nvram.c linux-3.1.4/drivers/char/nvram.c
26408--- linux-3.1.4/drivers/char/nvram.c 2011-11-11 15:19:27.000000000 -0500
26409+++ linux-3.1.4/drivers/char/nvram.c 2011-11-16 18:39:07.000000000 -0500
26410@@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *f
26411
26412 spin_unlock_irq(&rtc_lock);
26413
26414- if (copy_to_user(buf, contents, tmp - contents))
26415+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
26416 return -EFAULT;
26417
26418 *ppos = i;
26419diff -urNp linux-3.1.4/drivers/char/random.c linux-3.1.4/drivers/char/random.c
26420--- linux-3.1.4/drivers/char/random.c 2011-11-11 15:19:27.000000000 -0500
26421+++ linux-3.1.4/drivers/char/random.c 2011-11-16 18:40:10.000000000 -0500
26422@@ -261,8 +261,13 @@
26423 /*
26424 * Configuration information
26425 */
26426+#ifdef CONFIG_GRKERNSEC_RANDNET
26427+#define INPUT_POOL_WORDS 512
26428+#define OUTPUT_POOL_WORDS 128
26429+#else
26430 #define INPUT_POOL_WORDS 128
26431 #define OUTPUT_POOL_WORDS 32
26432+#endif
26433 #define SEC_XFER_SIZE 512
26434 #define EXTRACT_SIZE 10
26435
26436@@ -300,10 +305,17 @@ static struct poolinfo {
26437 int poolwords;
26438 int tap1, tap2, tap3, tap4, tap5;
26439 } poolinfo_table[] = {
26440+#ifdef CONFIG_GRKERNSEC_RANDNET
26441+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
26442+ { 512, 411, 308, 208, 104, 1 },
26443+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
26444+ { 128, 103, 76, 51, 25, 1 },
26445+#else
26446 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
26447 { 128, 103, 76, 51, 25, 1 },
26448 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
26449 { 32, 26, 20, 14, 7, 1 },
26450+#endif
26451 #if 0
26452 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
26453 { 2048, 1638, 1231, 819, 411, 1 },
26454@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
26455
26456 extract_buf(r, tmp);
26457 i = min_t(int, nbytes, EXTRACT_SIZE);
26458- if (copy_to_user(buf, tmp, i)) {
26459+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
26460 ret = -EFAULT;
26461 break;
26462 }
26463@@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
26464 #include <linux/sysctl.h>
26465
26466 static int min_read_thresh = 8, min_write_thresh;
26467-static int max_read_thresh = INPUT_POOL_WORDS * 32;
26468+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
26469 static int max_write_thresh = INPUT_POOL_WORDS * 32;
26470 static char sysctl_bootid[16];
26471
26472diff -urNp linux-3.1.4/drivers/char/sonypi.c linux-3.1.4/drivers/char/sonypi.c
26473--- linux-3.1.4/drivers/char/sonypi.c 2011-11-11 15:19:27.000000000 -0500
26474+++ linux-3.1.4/drivers/char/sonypi.c 2011-11-16 18:39:07.000000000 -0500
26475@@ -55,6 +55,7 @@
26476 #include <asm/uaccess.h>
26477 #include <asm/io.h>
26478 #include <asm/system.h>
26479+#include <asm/local.h>
26480
26481 #include <linux/sonypi.h>
26482
26483@@ -491,7 +492,7 @@ static struct sonypi_device {
26484 spinlock_t fifo_lock;
26485 wait_queue_head_t fifo_proc_list;
26486 struct fasync_struct *fifo_async;
26487- int open_count;
26488+ local_t open_count;
26489 int model;
26490 struct input_dev *input_jog_dev;
26491 struct input_dev *input_key_dev;
26492@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
26493 static int sonypi_misc_release(struct inode *inode, struct file *file)
26494 {
26495 mutex_lock(&sonypi_device.lock);
26496- sonypi_device.open_count--;
26497+ local_dec(&sonypi_device.open_count);
26498 mutex_unlock(&sonypi_device.lock);
26499 return 0;
26500 }
26501@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
26502 {
26503 mutex_lock(&sonypi_device.lock);
26504 /* Flush input queue on first open */
26505- if (!sonypi_device.open_count)
26506+ if (!local_read(&sonypi_device.open_count))
26507 kfifo_reset(&sonypi_device.fifo);
26508- sonypi_device.open_count++;
26509+ local_inc(&sonypi_device.open_count);
26510 mutex_unlock(&sonypi_device.lock);
26511
26512 return 0;
26513diff -urNp linux-3.1.4/drivers/char/tpm/tpm_bios.c linux-3.1.4/drivers/char/tpm/tpm_bios.c
26514--- linux-3.1.4/drivers/char/tpm/tpm_bios.c 2011-11-11 15:19:27.000000000 -0500
26515+++ linux-3.1.4/drivers/char/tpm/tpm_bios.c 2011-11-16 18:39:07.000000000 -0500
26516@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
26517 event = addr;
26518
26519 if ((event->event_type == 0 && event->event_size == 0) ||
26520- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
26521+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
26522 return NULL;
26523
26524 return addr;
26525@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
26526 return NULL;
26527
26528 if ((event->event_type == 0 && event->event_size == 0) ||
26529- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
26530+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
26531 return NULL;
26532
26533 (*pos)++;
26534@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
26535 int i;
26536
26537 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
26538- seq_putc(m, data[i]);
26539+ if (!seq_putc(m, data[i]))
26540+ return -EFAULT;
26541
26542 return 0;
26543 }
26544@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log
26545 log->bios_event_log_end = log->bios_event_log + len;
26546
26547 virt = acpi_os_map_memory(start, len);
26548+ if (!virt) {
26549+ kfree(log->bios_event_log);
26550+ log->bios_event_log = NULL;
26551+ return -EFAULT;
26552+ }
26553
26554- memcpy(log->bios_event_log, virt, len);
26555+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
26556
26557 acpi_os_unmap_memory(virt, len);
26558 return 0;
26559diff -urNp linux-3.1.4/drivers/char/tpm/tpm.c linux-3.1.4/drivers/char/tpm/tpm.c
26560--- linux-3.1.4/drivers/char/tpm/tpm.c 2011-11-11 15:19:27.000000000 -0500
26561+++ linux-3.1.4/drivers/char/tpm/tpm.c 2011-11-16 18:40:10.000000000 -0500
26562@@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_c
26563 chip->vendor.req_complete_val)
26564 goto out_recv;
26565
26566- if ((status == chip->vendor.req_canceled)) {
26567+ if (status == chip->vendor.req_canceled) {
26568 dev_err(chip->dev, "Operation Canceled\n");
26569 rc = -ECANCELED;
26570 goto out;
26571@@ -862,6 +862,8 @@ ssize_t tpm_show_pubek(struct device *de
26572
26573 struct tpm_chip *chip = dev_get_drvdata(dev);
26574
26575+ pax_track_stack();
26576+
26577 tpm_cmd.header.in = tpm_readpubek_header;
26578 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
26579 "attempting to read the PUBEK");
26580diff -urNp linux-3.1.4/drivers/char/virtio_console.c linux-3.1.4/drivers/char/virtio_console.c
26581--- linux-3.1.4/drivers/char/virtio_console.c 2011-11-11 15:19:27.000000000 -0500
26582+++ linux-3.1.4/drivers/char/virtio_console.c 2011-11-16 18:39:07.000000000 -0500
26583@@ -555,7 +555,7 @@ static ssize_t fill_readbuf(struct port
26584 if (to_user) {
26585 ssize_t ret;
26586
26587- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
26588+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
26589 if (ret)
26590 return -EFAULT;
26591 } else {
26592@@ -654,7 +654,7 @@ static ssize_t port_fops_read(struct fil
26593 if (!port_has_data(port) && !port->host_connected)
26594 return 0;
26595
26596- return fill_readbuf(port, ubuf, count, true);
26597+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
26598 }
26599
26600 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
26601diff -urNp linux-3.1.4/drivers/crypto/hifn_795x.c linux-3.1.4/drivers/crypto/hifn_795x.c
26602--- linux-3.1.4/drivers/crypto/hifn_795x.c 2011-11-11 15:19:27.000000000 -0500
26603+++ linux-3.1.4/drivers/crypto/hifn_795x.c 2011-11-16 18:40:10.000000000 -0500
26604@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
26605 0xCA, 0x34, 0x2B, 0x2E};
26606 struct scatterlist sg;
26607
26608+ pax_track_stack();
26609+
26610 memset(src, 0, sizeof(src));
26611 memset(ctx.key, 0, sizeof(ctx.key));
26612
26613diff -urNp linux-3.1.4/drivers/crypto/padlock-aes.c linux-3.1.4/drivers/crypto/padlock-aes.c
26614--- linux-3.1.4/drivers/crypto/padlock-aes.c 2011-11-11 15:19:27.000000000 -0500
26615+++ linux-3.1.4/drivers/crypto/padlock-aes.c 2011-11-16 18:40:10.000000000 -0500
26616@@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
26617 struct crypto_aes_ctx gen_aes;
26618 int cpu;
26619
26620+ pax_track_stack();
26621+
26622 if (key_len % 8) {
26623 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
26624 return -EINVAL;
26625diff -urNp linux-3.1.4/drivers/edac/amd64_edac.c linux-3.1.4/drivers/edac/amd64_edac.c
26626--- linux-3.1.4/drivers/edac/amd64_edac.c 2011-11-11 15:19:27.000000000 -0500
26627+++ linux-3.1.4/drivers/edac/amd64_edac.c 2011-11-16 18:39:07.000000000 -0500
26628@@ -2670,7 +2670,7 @@ static void __devexit amd64_remove_one_i
26629 * PCI core identifies what devices are on a system during boot, and then
26630 * inquiry this table to see if this driver is for a given device found.
26631 */
26632-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
26633+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
26634 {
26635 .vendor = PCI_VENDOR_ID_AMD,
26636 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
26637diff -urNp linux-3.1.4/drivers/edac/amd76x_edac.c linux-3.1.4/drivers/edac/amd76x_edac.c
26638--- linux-3.1.4/drivers/edac/amd76x_edac.c 2011-11-11 15:19:27.000000000 -0500
26639+++ linux-3.1.4/drivers/edac/amd76x_edac.c 2011-11-16 18:39:07.000000000 -0500
26640@@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(
26641 edac_mc_free(mci);
26642 }
26643
26644-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
26645+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
26646 {
26647 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26648 AMD762},
26649diff -urNp linux-3.1.4/drivers/edac/e752x_edac.c linux-3.1.4/drivers/edac/e752x_edac.c
26650--- linux-3.1.4/drivers/edac/e752x_edac.c 2011-11-11 15:19:27.000000000 -0500
26651+++ linux-3.1.4/drivers/edac/e752x_edac.c 2011-11-16 18:39:07.000000000 -0500
26652@@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(s
26653 edac_mc_free(mci);
26654 }
26655
26656-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
26657+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
26658 {
26659 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26660 E7520},
26661diff -urNp linux-3.1.4/drivers/edac/e7xxx_edac.c linux-3.1.4/drivers/edac/e7xxx_edac.c
26662--- linux-3.1.4/drivers/edac/e7xxx_edac.c 2011-11-11 15:19:27.000000000 -0500
26663+++ linux-3.1.4/drivers/edac/e7xxx_edac.c 2011-11-16 18:39:07.000000000 -0500
26664@@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(s
26665 edac_mc_free(mci);
26666 }
26667
26668-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
26669+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
26670 {
26671 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26672 E7205},
26673diff -urNp linux-3.1.4/drivers/edac/edac_pci_sysfs.c linux-3.1.4/drivers/edac/edac_pci_sysfs.c
26674--- linux-3.1.4/drivers/edac/edac_pci_sysfs.c 2011-11-11 15:19:27.000000000 -0500
26675+++ linux-3.1.4/drivers/edac/edac_pci_sysfs.c 2011-11-16 18:39:07.000000000 -0500
26676@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
26677 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
26678 static int edac_pci_poll_msec = 1000; /* one second workq period */
26679
26680-static atomic_t pci_parity_count = ATOMIC_INIT(0);
26681-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
26682+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
26683+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
26684
26685 static struct kobject *edac_pci_top_main_kobj;
26686 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
26687@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
26688 edac_printk(KERN_CRIT, EDAC_PCI,
26689 "Signaled System Error on %s\n",
26690 pci_name(dev));
26691- atomic_inc(&pci_nonparity_count);
26692+ atomic_inc_unchecked(&pci_nonparity_count);
26693 }
26694
26695 if (status & (PCI_STATUS_PARITY)) {
26696@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
26697 "Master Data Parity Error on %s\n",
26698 pci_name(dev));
26699
26700- atomic_inc(&pci_parity_count);
26701+ atomic_inc_unchecked(&pci_parity_count);
26702 }
26703
26704 if (status & (PCI_STATUS_DETECTED_PARITY)) {
26705@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
26706 "Detected Parity Error on %s\n",
26707 pci_name(dev));
26708
26709- atomic_inc(&pci_parity_count);
26710+ atomic_inc_unchecked(&pci_parity_count);
26711 }
26712 }
26713
26714@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
26715 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
26716 "Signaled System Error on %s\n",
26717 pci_name(dev));
26718- atomic_inc(&pci_nonparity_count);
26719+ atomic_inc_unchecked(&pci_nonparity_count);
26720 }
26721
26722 if (status & (PCI_STATUS_PARITY)) {
26723@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
26724 "Master Data Parity Error on "
26725 "%s\n", pci_name(dev));
26726
26727- atomic_inc(&pci_parity_count);
26728+ atomic_inc_unchecked(&pci_parity_count);
26729 }
26730
26731 if (status & (PCI_STATUS_DETECTED_PARITY)) {
26732@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
26733 "Detected Parity Error on %s\n",
26734 pci_name(dev));
26735
26736- atomic_inc(&pci_parity_count);
26737+ atomic_inc_unchecked(&pci_parity_count);
26738 }
26739 }
26740 }
26741@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
26742 if (!check_pci_errors)
26743 return;
26744
26745- before_count = atomic_read(&pci_parity_count);
26746+ before_count = atomic_read_unchecked(&pci_parity_count);
26747
26748 /* scan all PCI devices looking for a Parity Error on devices and
26749 * bridges.
26750@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
26751 /* Only if operator has selected panic on PCI Error */
26752 if (edac_pci_get_panic_on_pe()) {
26753 /* If the count is different 'after' from 'before' */
26754- if (before_count != atomic_read(&pci_parity_count))
26755+ if (before_count != atomic_read_unchecked(&pci_parity_count))
26756 panic("EDAC: PCI Parity Error");
26757 }
26758 }
26759diff -urNp linux-3.1.4/drivers/edac/i3000_edac.c linux-3.1.4/drivers/edac/i3000_edac.c
26760--- linux-3.1.4/drivers/edac/i3000_edac.c 2011-11-11 15:19:27.000000000 -0500
26761+++ linux-3.1.4/drivers/edac/i3000_edac.c 2011-11-16 18:39:07.000000000 -0500
26762@@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(s
26763 edac_mc_free(mci);
26764 }
26765
26766-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
26767+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
26768 {
26769 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26770 I3000},
26771diff -urNp linux-3.1.4/drivers/edac/i3200_edac.c linux-3.1.4/drivers/edac/i3200_edac.c
26772--- linux-3.1.4/drivers/edac/i3200_edac.c 2011-11-11 15:19:27.000000000 -0500
26773+++ linux-3.1.4/drivers/edac/i3200_edac.c 2011-11-16 18:39:07.000000000 -0500
26774@@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(s
26775 edac_mc_free(mci);
26776 }
26777
26778-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
26779+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
26780 {
26781 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26782 I3200},
26783diff -urNp linux-3.1.4/drivers/edac/i5000_edac.c linux-3.1.4/drivers/edac/i5000_edac.c
26784--- linux-3.1.4/drivers/edac/i5000_edac.c 2011-11-11 15:19:27.000000000 -0500
26785+++ linux-3.1.4/drivers/edac/i5000_edac.c 2011-11-16 18:39:07.000000000 -0500
26786@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(s
26787 *
26788 * The "E500P" device is the first device supported.
26789 */
26790-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
26791+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
26792 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
26793 .driver_data = I5000P},
26794
26795diff -urNp linux-3.1.4/drivers/edac/i5100_edac.c linux-3.1.4/drivers/edac/i5100_edac.c
26796--- linux-3.1.4/drivers/edac/i5100_edac.c 2011-11-11 15:19:27.000000000 -0500
26797+++ linux-3.1.4/drivers/edac/i5100_edac.c 2011-11-16 18:39:07.000000000 -0500
26798@@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(s
26799 edac_mc_free(mci);
26800 }
26801
26802-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
26803+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
26804 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
26805 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
26806 { 0, }
26807diff -urNp linux-3.1.4/drivers/edac/i5400_edac.c linux-3.1.4/drivers/edac/i5400_edac.c
26808--- linux-3.1.4/drivers/edac/i5400_edac.c 2011-11-11 15:19:27.000000000 -0500
26809+++ linux-3.1.4/drivers/edac/i5400_edac.c 2011-11-16 18:39:07.000000000 -0500
26810@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(s
26811 *
26812 * The "E500P" device is the first device supported.
26813 */
26814-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
26815+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
26816 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
26817 {0,} /* 0 terminated list. */
26818 };
26819diff -urNp linux-3.1.4/drivers/edac/i7300_edac.c linux-3.1.4/drivers/edac/i7300_edac.c
26820--- linux-3.1.4/drivers/edac/i7300_edac.c 2011-11-11 15:19:27.000000000 -0500
26821+++ linux-3.1.4/drivers/edac/i7300_edac.c 2011-11-16 18:39:07.000000000 -0500
26822@@ -1191,7 +1191,7 @@ static void __devexit i7300_remove_one(s
26823 *
26824 * Has only 8086:360c PCI ID
26825 */
26826-static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
26827+static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
26828 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
26829 {0,} /* 0 terminated list. */
26830 };
26831diff -urNp linux-3.1.4/drivers/edac/i7core_edac.c linux-3.1.4/drivers/edac/i7core_edac.c
26832--- linux-3.1.4/drivers/edac/i7core_edac.c 2011-11-11 15:19:27.000000000 -0500
26833+++ linux-3.1.4/drivers/edac/i7core_edac.c 2011-11-16 18:39:07.000000000 -0500
26834@@ -359,7 +359,7 @@ static const struct pci_id_table pci_dev
26835 /*
26836 * pci_device_id table for which devices we are looking for
26837 */
26838-static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
26839+static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
26840 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
26841 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
26842 {0,} /* 0 terminated list. */
26843diff -urNp linux-3.1.4/drivers/edac/i82443bxgx_edac.c linux-3.1.4/drivers/edac/i82443bxgx_edac.c
26844--- linux-3.1.4/drivers/edac/i82443bxgx_edac.c 2011-11-11 15:19:27.000000000 -0500
26845+++ linux-3.1.4/drivers/edac/i82443bxgx_edac.c 2011-11-16 18:39:07.000000000 -0500
26846@@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_
26847
26848 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
26849
26850-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
26851+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
26852 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
26853 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
26854 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
26855diff -urNp linux-3.1.4/drivers/edac/i82860_edac.c linux-3.1.4/drivers/edac/i82860_edac.c
26856--- linux-3.1.4/drivers/edac/i82860_edac.c 2011-11-11 15:19:27.000000000 -0500
26857+++ linux-3.1.4/drivers/edac/i82860_edac.c 2011-11-16 18:39:07.000000000 -0500
26858@@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(
26859 edac_mc_free(mci);
26860 }
26861
26862-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
26863+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
26864 {
26865 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26866 I82860},
26867diff -urNp linux-3.1.4/drivers/edac/i82875p_edac.c linux-3.1.4/drivers/edac/i82875p_edac.c
26868--- linux-3.1.4/drivers/edac/i82875p_edac.c 2011-11-11 15:19:27.000000000 -0500
26869+++ linux-3.1.4/drivers/edac/i82875p_edac.c 2011-11-16 18:39:07.000000000 -0500
26870@@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one
26871 edac_mc_free(mci);
26872 }
26873
26874-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
26875+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
26876 {
26877 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26878 I82875P},
26879diff -urNp linux-3.1.4/drivers/edac/i82975x_edac.c linux-3.1.4/drivers/edac/i82975x_edac.c
26880--- linux-3.1.4/drivers/edac/i82975x_edac.c 2011-11-11 15:19:27.000000000 -0500
26881+++ linux-3.1.4/drivers/edac/i82975x_edac.c 2011-11-16 18:39:07.000000000 -0500
26882@@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one
26883 edac_mc_free(mci);
26884 }
26885
26886-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
26887+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
26888 {
26889 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26890 I82975X
26891diff -urNp linux-3.1.4/drivers/edac/mce_amd.h linux-3.1.4/drivers/edac/mce_amd.h
26892--- linux-3.1.4/drivers/edac/mce_amd.h 2011-11-11 15:19:27.000000000 -0500
26893+++ linux-3.1.4/drivers/edac/mce_amd.h 2011-11-16 18:39:07.000000000 -0500
26894@@ -83,7 +83,7 @@ struct amd_decoder_ops {
26895 bool (*dc_mce)(u16, u8);
26896 bool (*ic_mce)(u16, u8);
26897 bool (*nb_mce)(u16, u8);
26898-};
26899+} __no_const;
26900
26901 void amd_report_gart_errors(bool);
26902 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
26903diff -urNp linux-3.1.4/drivers/edac/r82600_edac.c linux-3.1.4/drivers/edac/r82600_edac.c
26904--- linux-3.1.4/drivers/edac/r82600_edac.c 2011-11-11 15:19:27.000000000 -0500
26905+++ linux-3.1.4/drivers/edac/r82600_edac.c 2011-11-16 18:39:07.000000000 -0500
26906@@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(
26907 edac_mc_free(mci);
26908 }
26909
26910-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
26911+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
26912 {
26913 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
26914 },
26915diff -urNp linux-3.1.4/drivers/edac/x38_edac.c linux-3.1.4/drivers/edac/x38_edac.c
26916--- linux-3.1.4/drivers/edac/x38_edac.c 2011-11-11 15:19:27.000000000 -0500
26917+++ linux-3.1.4/drivers/edac/x38_edac.c 2011-11-16 18:39:07.000000000 -0500
26918@@ -440,7 +440,7 @@ static void __devexit x38_remove_one(str
26919 edac_mc_free(mci);
26920 }
26921
26922-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
26923+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
26924 {
26925 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26926 X38},
26927diff -urNp linux-3.1.4/drivers/firewire/core-card.c linux-3.1.4/drivers/firewire/core-card.c
26928--- linux-3.1.4/drivers/firewire/core-card.c 2011-11-11 15:19:27.000000000 -0500
26929+++ linux-3.1.4/drivers/firewire/core-card.c 2011-11-16 18:39:07.000000000 -0500
26930@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
26931
26932 void fw_core_remove_card(struct fw_card *card)
26933 {
26934- struct fw_card_driver dummy_driver = dummy_driver_template;
26935+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
26936
26937 card->driver->update_phy_reg(card, 4,
26938 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
26939diff -urNp linux-3.1.4/drivers/firewire/core-cdev.c linux-3.1.4/drivers/firewire/core-cdev.c
26940--- linux-3.1.4/drivers/firewire/core-cdev.c 2011-11-11 15:19:27.000000000 -0500
26941+++ linux-3.1.4/drivers/firewire/core-cdev.c 2011-11-16 18:39:07.000000000 -0500
26942@@ -1331,8 +1331,7 @@ static int init_iso_resource(struct clie
26943 int ret;
26944
26945 if ((request->channels == 0 && request->bandwidth == 0) ||
26946- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
26947- request->bandwidth < 0)
26948+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
26949 return -EINVAL;
26950
26951 r = kmalloc(sizeof(*r), GFP_KERNEL);
26952diff -urNp linux-3.1.4/drivers/firewire/core.h linux-3.1.4/drivers/firewire/core.h
26953--- linux-3.1.4/drivers/firewire/core.h 2011-11-11 15:19:27.000000000 -0500
26954+++ linux-3.1.4/drivers/firewire/core.h 2011-11-16 18:39:07.000000000 -0500
26955@@ -101,6 +101,7 @@ struct fw_card_driver {
26956
26957 int (*stop_iso)(struct fw_iso_context *ctx);
26958 };
26959+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
26960
26961 void fw_card_initialize(struct fw_card *card,
26962 const struct fw_card_driver *driver, struct device *device);
26963diff -urNp linux-3.1.4/drivers/firewire/core-transaction.c linux-3.1.4/drivers/firewire/core-transaction.c
26964--- linux-3.1.4/drivers/firewire/core-transaction.c 2011-11-11 15:19:27.000000000 -0500
26965+++ linux-3.1.4/drivers/firewire/core-transaction.c 2011-11-16 18:40:10.000000000 -0500
26966@@ -37,6 +37,7 @@
26967 #include <linux/timer.h>
26968 #include <linux/types.h>
26969 #include <linux/workqueue.h>
26970+#include <linux/sched.h>
26971
26972 #include <asm/byteorder.h>
26973
26974@@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *c
26975 struct transaction_callback_data d;
26976 struct fw_transaction t;
26977
26978+ pax_track_stack();
26979+
26980 init_timer_on_stack(&t.split_timeout_timer);
26981 init_completion(&d.done);
26982 d.payload = payload;
26983diff -urNp linux-3.1.4/drivers/firmware/dmi_scan.c linux-3.1.4/drivers/firmware/dmi_scan.c
26984--- linux-3.1.4/drivers/firmware/dmi_scan.c 2011-11-11 15:19:27.000000000 -0500
26985+++ linux-3.1.4/drivers/firmware/dmi_scan.c 2011-11-16 18:39:07.000000000 -0500
26986@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
26987 }
26988 }
26989 else {
26990- /*
26991- * no iounmap() for that ioremap(); it would be a no-op, but
26992- * it's so early in setup that sucker gets confused into doing
26993- * what it shouldn't if we actually call it.
26994- */
26995 p = dmi_ioremap(0xF0000, 0x10000);
26996 if (p == NULL)
26997 goto error;
26998@@ -725,7 +720,7 @@ int dmi_walk(void (*decode)(const struct
26999 if (buf == NULL)
27000 return -1;
27001
27002- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
27003+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
27004
27005 iounmap(buf);
27006 return 0;
27007diff -urNp linux-3.1.4/drivers/gpio/gpio-vr41xx.c linux-3.1.4/drivers/gpio/gpio-vr41xx.c
27008--- linux-3.1.4/drivers/gpio/gpio-vr41xx.c 2011-11-11 15:19:27.000000000 -0500
27009+++ linux-3.1.4/drivers/gpio/gpio-vr41xx.c 2011-11-16 18:39:07.000000000 -0500
27010@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
27011 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
27012 maskl, pendl, maskh, pendh);
27013
27014- atomic_inc(&irq_err_count);
27015+ atomic_inc_unchecked(&irq_err_count);
27016
27017 return -EINVAL;
27018 }
27019diff -urNp linux-3.1.4/drivers/gpu/drm/drm_crtc.c linux-3.1.4/drivers/gpu/drm/drm_crtc.c
27020--- linux-3.1.4/drivers/gpu/drm/drm_crtc.c 2011-11-11 15:19:27.000000000 -0500
27021+++ linux-3.1.4/drivers/gpu/drm/drm_crtc.c 2011-11-16 18:39:07.000000000 -0500
27022@@ -1374,7 +1374,7 @@ int drm_mode_getconnector(struct drm_dev
27023 */
27024 if ((out_resp->count_modes >= mode_count) && mode_count) {
27025 copied = 0;
27026- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
27027+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
27028 list_for_each_entry(mode, &connector->modes, head) {
27029 drm_crtc_convert_to_umode(&u_mode, mode);
27030 if (copy_to_user(mode_ptr + copied,
27031@@ -1389,8 +1389,8 @@ int drm_mode_getconnector(struct drm_dev
27032
27033 if ((out_resp->count_props >= props_count) && props_count) {
27034 copied = 0;
27035- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
27036- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
27037+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
27038+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
27039 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
27040 if (connector->property_ids[i] != 0) {
27041 if (put_user(connector->property_ids[i],
27042@@ -1412,7 +1412,7 @@ int drm_mode_getconnector(struct drm_dev
27043
27044 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
27045 copied = 0;
27046- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
27047+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
27048 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
27049 if (connector->encoder_ids[i] != 0) {
27050 if (put_user(connector->encoder_ids[i],
27051@@ -1571,7 +1571,7 @@ int drm_mode_setcrtc(struct drm_device *
27052 }
27053
27054 for (i = 0; i < crtc_req->count_connectors; i++) {
27055- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
27056+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
27057 if (get_user(out_id, &set_connectors_ptr[i])) {
27058 ret = -EFAULT;
27059 goto out;
27060@@ -1852,7 +1852,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_de
27061 fb = obj_to_fb(obj);
27062
27063 num_clips = r->num_clips;
27064- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
27065+ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
27066
27067 if (!num_clips != !clips_ptr) {
27068 ret = -EINVAL;
27069@@ -2272,7 +2272,7 @@ int drm_mode_getproperty_ioctl(struct dr
27070 out_resp->flags = property->flags;
27071
27072 if ((out_resp->count_values >= value_count) && value_count) {
27073- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
27074+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
27075 for (i = 0; i < value_count; i++) {
27076 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
27077 ret = -EFAULT;
27078@@ -2285,7 +2285,7 @@ int drm_mode_getproperty_ioctl(struct dr
27079 if (property->flags & DRM_MODE_PROP_ENUM) {
27080 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
27081 copied = 0;
27082- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
27083+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
27084 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
27085
27086 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
27087@@ -2308,7 +2308,7 @@ int drm_mode_getproperty_ioctl(struct dr
27088 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
27089 copied = 0;
27090 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
27091- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
27092+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
27093
27094 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
27095 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
27096@@ -2369,7 +2369,7 @@ int drm_mode_getblob_ioctl(struct drm_de
27097 struct drm_mode_get_blob *out_resp = data;
27098 struct drm_property_blob *blob;
27099 int ret = 0;
27100- void *blob_ptr;
27101+ void __user *blob_ptr;
27102
27103 if (!drm_core_check_feature(dev, DRIVER_MODESET))
27104 return -EINVAL;
27105@@ -2383,7 +2383,7 @@ int drm_mode_getblob_ioctl(struct drm_de
27106 blob = obj_to_blob(obj);
27107
27108 if (out_resp->length == blob->length) {
27109- blob_ptr = (void *)(unsigned long)out_resp->data;
27110+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
27111 if (copy_to_user(blob_ptr, blob->data, blob->length)){
27112 ret = -EFAULT;
27113 goto done;
27114diff -urNp linux-3.1.4/drivers/gpu/drm/drm_crtc_helper.c linux-3.1.4/drivers/gpu/drm/drm_crtc_helper.c
27115--- linux-3.1.4/drivers/gpu/drm/drm_crtc_helper.c 2011-11-11 15:19:27.000000000 -0500
27116+++ linux-3.1.4/drivers/gpu/drm/drm_crtc_helper.c 2011-11-16 18:40:10.000000000 -0500
27117@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
27118 struct drm_crtc *tmp;
27119 int crtc_mask = 1;
27120
27121- WARN(!crtc, "checking null crtc?\n");
27122+ BUG_ON(!crtc);
27123
27124 dev = crtc->dev;
27125
27126@@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
27127 struct drm_encoder *encoder;
27128 bool ret = true;
27129
27130+ pax_track_stack();
27131+
27132 crtc->enabled = drm_helper_crtc_in_use(crtc);
27133 if (!crtc->enabled)
27134 return true;
27135diff -urNp linux-3.1.4/drivers/gpu/drm/drm_drv.c linux-3.1.4/drivers/gpu/drm/drm_drv.c
27136--- linux-3.1.4/drivers/gpu/drm/drm_drv.c 2011-11-11 15:19:27.000000000 -0500
27137+++ linux-3.1.4/drivers/gpu/drm/drm_drv.c 2011-11-16 18:39:07.000000000 -0500
27138@@ -307,7 +307,7 @@ module_exit(drm_core_exit);
27139 /**
27140 * Copy and IOCTL return string to user space
27141 */
27142-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
27143+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
27144 {
27145 int len;
27146
27147@@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
27148
27149 dev = file_priv->minor->dev;
27150 atomic_inc(&dev->ioctl_count);
27151- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
27152+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
27153 ++file_priv->ioctl_count;
27154
27155 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
27156diff -urNp linux-3.1.4/drivers/gpu/drm/drm_fops.c linux-3.1.4/drivers/gpu/drm/drm_fops.c
27157--- linux-3.1.4/drivers/gpu/drm/drm_fops.c 2011-11-11 15:19:27.000000000 -0500
27158+++ linux-3.1.4/drivers/gpu/drm/drm_fops.c 2011-11-16 18:39:07.000000000 -0500
27159@@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
27160 }
27161
27162 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
27163- atomic_set(&dev->counts[i], 0);
27164+ atomic_set_unchecked(&dev->counts[i], 0);
27165
27166 dev->sigdata.lock = NULL;
27167
27168@@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
27169
27170 retcode = drm_open_helper(inode, filp, dev);
27171 if (!retcode) {
27172- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
27173- if (!dev->open_count++)
27174+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
27175+ if (local_inc_return(&dev->open_count) == 1)
27176 retcode = drm_setup(dev);
27177 }
27178 if (!retcode) {
27179@@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
27180
27181 mutex_lock(&drm_global_mutex);
27182
27183- DRM_DEBUG("open_count = %d\n", dev->open_count);
27184+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
27185
27186 if (dev->driver->preclose)
27187 dev->driver->preclose(dev, file_priv);
27188@@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
27189 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
27190 task_pid_nr(current),
27191 (long)old_encode_dev(file_priv->minor->device),
27192- dev->open_count);
27193+ local_read(&dev->open_count));
27194
27195 /* if the master has gone away we can't do anything with the lock */
27196 if (file_priv->minor->master)
27197@@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
27198 * End inline drm_release
27199 */
27200
27201- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
27202- if (!--dev->open_count) {
27203+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
27204+ if (local_dec_and_test(&dev->open_count)) {
27205 if (atomic_read(&dev->ioctl_count)) {
27206 DRM_ERROR("Device busy: %d\n",
27207 atomic_read(&dev->ioctl_count));
27208diff -urNp linux-3.1.4/drivers/gpu/drm/drm_global.c linux-3.1.4/drivers/gpu/drm/drm_global.c
27209--- linux-3.1.4/drivers/gpu/drm/drm_global.c 2011-11-11 15:19:27.000000000 -0500
27210+++ linux-3.1.4/drivers/gpu/drm/drm_global.c 2011-11-16 18:39:07.000000000 -0500
27211@@ -36,7 +36,7 @@
27212 struct drm_global_item {
27213 struct mutex mutex;
27214 void *object;
27215- int refcount;
27216+ atomic_t refcount;
27217 };
27218
27219 static struct drm_global_item glob[DRM_GLOBAL_NUM];
27220@@ -49,7 +49,7 @@ void drm_global_init(void)
27221 struct drm_global_item *item = &glob[i];
27222 mutex_init(&item->mutex);
27223 item->object = NULL;
27224- item->refcount = 0;
27225+ atomic_set(&item->refcount, 0);
27226 }
27227 }
27228
27229@@ -59,7 +59,7 @@ void drm_global_release(void)
27230 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
27231 struct drm_global_item *item = &glob[i];
27232 BUG_ON(item->object != NULL);
27233- BUG_ON(item->refcount != 0);
27234+ BUG_ON(atomic_read(&item->refcount) != 0);
27235 }
27236 }
27237
27238@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
27239 void *object;
27240
27241 mutex_lock(&item->mutex);
27242- if (item->refcount == 0) {
27243+ if (atomic_read(&item->refcount) == 0) {
27244 item->object = kzalloc(ref->size, GFP_KERNEL);
27245 if (unlikely(item->object == NULL)) {
27246 ret = -ENOMEM;
27247@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
27248 goto out_err;
27249
27250 }
27251- ++item->refcount;
27252+ atomic_inc(&item->refcount);
27253 ref->object = item->object;
27254 object = item->object;
27255 mutex_unlock(&item->mutex);
27256@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
27257 struct drm_global_item *item = &glob[ref->global_type];
27258
27259 mutex_lock(&item->mutex);
27260- BUG_ON(item->refcount == 0);
27261+ BUG_ON(atomic_read(&item->refcount) == 0);
27262 BUG_ON(ref->object != item->object);
27263- if (--item->refcount == 0) {
27264+ if (atomic_dec_and_test(&item->refcount)) {
27265 ref->release(ref);
27266 item->object = NULL;
27267 }
27268diff -urNp linux-3.1.4/drivers/gpu/drm/drm_info.c linux-3.1.4/drivers/gpu/drm/drm_info.c
27269--- linux-3.1.4/drivers/gpu/drm/drm_info.c 2011-11-11 15:19:27.000000000 -0500
27270+++ linux-3.1.4/drivers/gpu/drm/drm_info.c 2011-11-16 18:40:10.000000000 -0500
27271@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
27272 struct drm_local_map *map;
27273 struct drm_map_list *r_list;
27274
27275- /* Hardcoded from _DRM_FRAME_BUFFER,
27276- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
27277- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
27278- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
27279+ static const char * const types[] = {
27280+ [_DRM_FRAME_BUFFER] = "FB",
27281+ [_DRM_REGISTERS] = "REG",
27282+ [_DRM_SHM] = "SHM",
27283+ [_DRM_AGP] = "AGP",
27284+ [_DRM_SCATTER_GATHER] = "SG",
27285+ [_DRM_CONSISTENT] = "PCI",
27286+ [_DRM_GEM] = "GEM" };
27287 const char *type;
27288 int i;
27289
27290@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
27291 map = r_list->map;
27292 if (!map)
27293 continue;
27294- if (map->type < 0 || map->type > 5)
27295+ if (map->type >= ARRAY_SIZE(types))
27296 type = "??";
27297 else
27298 type = types[map->type];
27299@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
27300 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
27301 vma->vm_flags & VM_LOCKED ? 'l' : '-',
27302 vma->vm_flags & VM_IO ? 'i' : '-',
27303+#ifdef CONFIG_GRKERNSEC_HIDESYM
27304+ 0);
27305+#else
27306 vma->vm_pgoff);
27307+#endif
27308
27309 #if defined(__i386__)
27310 pgprot = pgprot_val(vma->vm_page_prot);
27311diff -urNp linux-3.1.4/drivers/gpu/drm/drm_ioc32.c linux-3.1.4/drivers/gpu/drm/drm_ioc32.c
27312--- linux-3.1.4/drivers/gpu/drm/drm_ioc32.c 2011-11-11 15:19:27.000000000 -0500
27313+++ linux-3.1.4/drivers/gpu/drm/drm_ioc32.c 2011-11-16 18:39:07.000000000 -0500
27314@@ -455,7 +455,7 @@ static int compat_drm_infobufs(struct fi
27315 request = compat_alloc_user_space(nbytes);
27316 if (!access_ok(VERIFY_WRITE, request, nbytes))
27317 return -EFAULT;
27318- list = (struct drm_buf_desc *) (request + 1);
27319+ list = (struct drm_buf_desc __user *) (request + 1);
27320
27321 if (__put_user(count, &request->count)
27322 || __put_user(list, &request->list))
27323@@ -516,7 +516,7 @@ static int compat_drm_mapbufs(struct fil
27324 request = compat_alloc_user_space(nbytes);
27325 if (!access_ok(VERIFY_WRITE, request, nbytes))
27326 return -EFAULT;
27327- list = (struct drm_buf_pub *) (request + 1);
27328+ list = (struct drm_buf_pub __user *) (request + 1);
27329
27330 if (__put_user(count, &request->count)
27331 || __put_user(list, &request->list))
27332diff -urNp linux-3.1.4/drivers/gpu/drm/drm_ioctl.c linux-3.1.4/drivers/gpu/drm/drm_ioctl.c
27333--- linux-3.1.4/drivers/gpu/drm/drm_ioctl.c 2011-11-11 15:19:27.000000000 -0500
27334+++ linux-3.1.4/drivers/gpu/drm/drm_ioctl.c 2011-11-16 18:39:07.000000000 -0500
27335@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
27336 stats->data[i].value =
27337 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
27338 else
27339- stats->data[i].value = atomic_read(&dev->counts[i]);
27340+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
27341 stats->data[i].type = dev->types[i];
27342 }
27343
27344diff -urNp linux-3.1.4/drivers/gpu/drm/drm_lock.c linux-3.1.4/drivers/gpu/drm/drm_lock.c
27345--- linux-3.1.4/drivers/gpu/drm/drm_lock.c 2011-11-11 15:19:27.000000000 -0500
27346+++ linux-3.1.4/drivers/gpu/drm/drm_lock.c 2011-11-16 18:39:07.000000000 -0500
27347@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
27348 if (drm_lock_take(&master->lock, lock->context)) {
27349 master->lock.file_priv = file_priv;
27350 master->lock.lock_time = jiffies;
27351- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
27352+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
27353 break; /* Got lock */
27354 }
27355
27356@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
27357 return -EINVAL;
27358 }
27359
27360- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
27361+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
27362
27363 if (drm_lock_free(&master->lock, lock->context)) {
27364 /* FIXME: Should really bail out here. */
27365diff -urNp linux-3.1.4/drivers/gpu/drm/i810/i810_dma.c linux-3.1.4/drivers/gpu/drm/i810/i810_dma.c
27366--- linux-3.1.4/drivers/gpu/drm/i810/i810_dma.c 2011-11-11 15:19:27.000000000 -0500
27367+++ linux-3.1.4/drivers/gpu/drm/i810/i810_dma.c 2011-11-16 18:39:07.000000000 -0500
27368@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
27369 dma->buflist[vertex->idx],
27370 vertex->discard, vertex->used);
27371
27372- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
27373- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
27374+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
27375+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
27376 sarea_priv->last_enqueue = dev_priv->counter - 1;
27377 sarea_priv->last_dispatch = (int)hw_status[5];
27378
27379@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
27380 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
27381 mc->last_render);
27382
27383- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
27384- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
27385+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
27386+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
27387 sarea_priv->last_enqueue = dev_priv->counter - 1;
27388 sarea_priv->last_dispatch = (int)hw_status[5];
27389
27390diff -urNp linux-3.1.4/drivers/gpu/drm/i810/i810_drv.h linux-3.1.4/drivers/gpu/drm/i810/i810_drv.h
27391--- linux-3.1.4/drivers/gpu/drm/i810/i810_drv.h 2011-11-11 15:19:27.000000000 -0500
27392+++ linux-3.1.4/drivers/gpu/drm/i810/i810_drv.h 2011-11-16 18:39:07.000000000 -0500
27393@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
27394 int page_flipping;
27395
27396 wait_queue_head_t irq_queue;
27397- atomic_t irq_received;
27398- atomic_t irq_emitted;
27399+ atomic_unchecked_t irq_received;
27400+ atomic_unchecked_t irq_emitted;
27401
27402 int front_offset;
27403 } drm_i810_private_t;
27404diff -urNp linux-3.1.4/drivers/gpu/drm/i915/i915_debugfs.c linux-3.1.4/drivers/gpu/drm/i915/i915_debugfs.c
27405--- linux-3.1.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-11-11 15:19:27.000000000 -0500
27406+++ linux-3.1.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-11-16 18:39:07.000000000 -0500
27407@@ -497,7 +497,7 @@ static int i915_interrupt_info(struct se
27408 I915_READ(GTIMR));
27409 }
27410 seq_printf(m, "Interrupts received: %d\n",
27411- atomic_read(&dev_priv->irq_received));
27412+ atomic_read_unchecked(&dev_priv->irq_received));
27413 for (i = 0; i < I915_NUM_RINGS; i++) {
27414 if (IS_GEN6(dev) || IS_GEN7(dev)) {
27415 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
27416@@ -1185,7 +1185,7 @@ static int i915_opregion(struct seq_file
27417 return ret;
27418
27419 if (opregion->header)
27420- seq_write(m, opregion->header, OPREGION_SIZE);
27421+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
27422
27423 mutex_unlock(&dev->struct_mutex);
27424
27425diff -urNp linux-3.1.4/drivers/gpu/drm/i915/i915_dma.c linux-3.1.4/drivers/gpu/drm/i915/i915_dma.c
27426--- linux-3.1.4/drivers/gpu/drm/i915/i915_dma.c 2011-11-11 15:19:27.000000000 -0500
27427+++ linux-3.1.4/drivers/gpu/drm/i915/i915_dma.c 2011-11-16 18:39:07.000000000 -0500
27428@@ -1171,7 +1171,7 @@ static bool i915_switcheroo_can_switch(s
27429 bool can_switch;
27430
27431 spin_lock(&dev->count_lock);
27432- can_switch = (dev->open_count == 0);
27433+ can_switch = (local_read(&dev->open_count) == 0);
27434 spin_unlock(&dev->count_lock);
27435 return can_switch;
27436 }
27437diff -urNp linux-3.1.4/drivers/gpu/drm/i915/i915_drv.h linux-3.1.4/drivers/gpu/drm/i915/i915_drv.h
27438--- linux-3.1.4/drivers/gpu/drm/i915/i915_drv.h 2011-11-11 15:19:27.000000000 -0500
27439+++ linux-3.1.4/drivers/gpu/drm/i915/i915_drv.h 2011-11-16 18:39:07.000000000 -0500
27440@@ -222,7 +222,7 @@ struct drm_i915_display_funcs {
27441 /* render clock increase/decrease */
27442 /* display clock increase/decrease */
27443 /* pll clock increase/decrease */
27444-};
27445+} __no_const;
27446
27447 struct intel_device_info {
27448 u8 gen;
27449@@ -305,7 +305,7 @@ typedef struct drm_i915_private {
27450 int current_page;
27451 int page_flipping;
27452
27453- atomic_t irq_received;
27454+ atomic_unchecked_t irq_received;
27455
27456 /* protects the irq masks */
27457 spinlock_t irq_lock;
27458@@ -882,7 +882,7 @@ struct drm_i915_gem_object {
27459 * will be page flipped away on the next vblank. When it
27460 * reaches 0, dev_priv->pending_flip_queue will be woken up.
27461 */
27462- atomic_t pending_flip;
27463+ atomic_unchecked_t pending_flip;
27464 };
27465
27466 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
27467@@ -1262,7 +1262,7 @@ extern int intel_setup_gmbus(struct drm_
27468 extern void intel_teardown_gmbus(struct drm_device *dev);
27469 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
27470 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
27471-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
27472+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
27473 {
27474 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
27475 }
27476diff -urNp linux-3.1.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.1.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c
27477--- linux-3.1.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-11-11 15:19:27.000000000 -0500
27478+++ linux-3.1.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-11-16 18:39:07.000000000 -0500
27479@@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct
27480 i915_gem_clflush_object(obj);
27481
27482 if (obj->base.pending_write_domain)
27483- cd->flips |= atomic_read(&obj->pending_flip);
27484+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
27485
27486 /* The actual obj->write_domain will be updated with
27487 * pending_write_domain after we emit the accumulated flush for all
27488diff -urNp linux-3.1.4/drivers/gpu/drm/i915/i915_irq.c linux-3.1.4/drivers/gpu/drm/i915/i915_irq.c
27489--- linux-3.1.4/drivers/gpu/drm/i915/i915_irq.c 2011-11-11 15:19:27.000000000 -0500
27490+++ linux-3.1.4/drivers/gpu/drm/i915/i915_irq.c 2011-11-16 18:39:07.000000000 -0500
27491@@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler
27492 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
27493 struct drm_i915_master_private *master_priv;
27494
27495- atomic_inc(&dev_priv->irq_received);
27496+ atomic_inc_unchecked(&dev_priv->irq_received);
27497
27498 /* disable master interrupt before clearing iir */
27499 de_ier = I915_READ(DEIER);
27500@@ -565,7 +565,7 @@ static irqreturn_t ironlake_irq_handler(
27501 struct drm_i915_master_private *master_priv;
27502 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
27503
27504- atomic_inc(&dev_priv->irq_received);
27505+ atomic_inc_unchecked(&dev_priv->irq_received);
27506
27507 if (IS_GEN6(dev))
27508 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
27509@@ -1228,7 +1228,7 @@ static irqreturn_t i915_driver_irq_handl
27510 int ret = IRQ_NONE, pipe;
27511 bool blc_event = false;
27512
27513- atomic_inc(&dev_priv->irq_received);
27514+ atomic_inc_unchecked(&dev_priv->irq_received);
27515
27516 iir = I915_READ(IIR);
27517
27518@@ -1740,7 +1740,7 @@ static void ironlake_irq_preinstall(stru
27519 {
27520 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
27521
27522- atomic_set(&dev_priv->irq_received, 0);
27523+ atomic_set_unchecked(&dev_priv->irq_received, 0);
27524
27525 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
27526 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
27527@@ -1904,7 +1904,7 @@ static void i915_driver_irq_preinstall(s
27528 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
27529 int pipe;
27530
27531- atomic_set(&dev_priv->irq_received, 0);
27532+ atomic_set_unchecked(&dev_priv->irq_received, 0);
27533
27534 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
27535 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
27536diff -urNp linux-3.1.4/drivers/gpu/drm/i915/intel_display.c linux-3.1.4/drivers/gpu/drm/i915/intel_display.c
27537--- linux-3.1.4/drivers/gpu/drm/i915/intel_display.c 2011-11-26 19:57:29.000000000 -0500
27538+++ linux-3.1.4/drivers/gpu/drm/i915/intel_display.c 2011-11-26 20:00:43.000000000 -0500
27539@@ -2205,7 +2205,7 @@ intel_pipe_set_base(struct drm_crtc *crt
27540
27541 wait_event(dev_priv->pending_flip_queue,
27542 atomic_read(&dev_priv->mm.wedged) ||
27543- atomic_read(&obj->pending_flip) == 0);
27544+ atomic_read_unchecked(&obj->pending_flip) == 0);
27545
27546 /* Big Hammer, we also need to ensure that any pending
27547 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
27548@@ -2826,7 +2826,7 @@ static void intel_crtc_wait_for_pending_
27549 obj = to_intel_framebuffer(crtc->fb)->obj;
27550 dev_priv = crtc->dev->dev_private;
27551 wait_event(dev_priv->pending_flip_queue,
27552- atomic_read(&obj->pending_flip) == 0);
27553+ atomic_read_unchecked(&obj->pending_flip) == 0);
27554 }
27555
27556 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
27557@@ -6676,7 +6676,7 @@ static void do_intel_finish_page_flip(st
27558
27559 atomic_clear_mask(1 << intel_crtc->plane,
27560 &obj->pending_flip.counter);
27561- if (atomic_read(&obj->pending_flip) == 0)
27562+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
27563 wake_up(&dev_priv->pending_flip_queue);
27564
27565 schedule_work(&work->work);
27566@@ -6965,7 +6965,7 @@ static int intel_crtc_page_flip(struct d
27567 /* Block clients from rendering to the new back buffer until
27568 * the flip occurs and the object is no longer visible.
27569 */
27570- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
27571+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
27572
27573 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
27574 if (ret)
27575@@ -6979,7 +6979,7 @@ static int intel_crtc_page_flip(struct d
27576 return 0;
27577
27578 cleanup_pending:
27579- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
27580+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
27581 cleanup_objs:
27582 drm_gem_object_unreference(&work->old_fb_obj->base);
27583 drm_gem_object_unreference(&obj->base);
27584diff -urNp linux-3.1.4/drivers/gpu/drm/mga/mga_drv.h linux-3.1.4/drivers/gpu/drm/mga/mga_drv.h
27585--- linux-3.1.4/drivers/gpu/drm/mga/mga_drv.h 2011-11-11 15:19:27.000000000 -0500
27586+++ linux-3.1.4/drivers/gpu/drm/mga/mga_drv.h 2011-11-16 18:39:07.000000000 -0500
27587@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
27588 u32 clear_cmd;
27589 u32 maccess;
27590
27591- atomic_t vbl_received; /**< Number of vblanks received. */
27592+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
27593 wait_queue_head_t fence_queue;
27594- atomic_t last_fence_retired;
27595+ atomic_unchecked_t last_fence_retired;
27596 u32 next_fence_to_post;
27597
27598 unsigned int fb_cpp;
27599diff -urNp linux-3.1.4/drivers/gpu/drm/mga/mga_irq.c linux-3.1.4/drivers/gpu/drm/mga/mga_irq.c
27600--- linux-3.1.4/drivers/gpu/drm/mga/mga_irq.c 2011-11-11 15:19:27.000000000 -0500
27601+++ linux-3.1.4/drivers/gpu/drm/mga/mga_irq.c 2011-11-16 18:39:07.000000000 -0500
27602@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
27603 if (crtc != 0)
27604 return 0;
27605
27606- return atomic_read(&dev_priv->vbl_received);
27607+ return atomic_read_unchecked(&dev_priv->vbl_received);
27608 }
27609
27610
27611@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
27612 /* VBLANK interrupt */
27613 if (status & MGA_VLINEPEN) {
27614 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
27615- atomic_inc(&dev_priv->vbl_received);
27616+ atomic_inc_unchecked(&dev_priv->vbl_received);
27617 drm_handle_vblank(dev, 0);
27618 handled = 1;
27619 }
27620@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
27621 if ((prim_start & ~0x03) != (prim_end & ~0x03))
27622 MGA_WRITE(MGA_PRIMEND, prim_end);
27623
27624- atomic_inc(&dev_priv->last_fence_retired);
27625+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
27626 DRM_WAKEUP(&dev_priv->fence_queue);
27627 handled = 1;
27628 }
27629@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
27630 * using fences.
27631 */
27632 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
27633- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
27634+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
27635 - *sequence) <= (1 << 23)));
27636
27637 *sequence = cur_fence;
27638diff -urNp linux-3.1.4/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.1.4/drivers/gpu/drm/nouveau/nouveau_bios.c
27639--- linux-3.1.4/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-11-11 15:19:27.000000000 -0500
27640+++ linux-3.1.4/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-11-16 18:39:07.000000000 -0500
27641@@ -201,7 +201,7 @@ struct methods {
27642 const char desc[8];
27643 void (*loadbios)(struct drm_device *, uint8_t *);
27644 const bool rw;
27645-};
27646+} __do_const;
27647
27648 static struct methods shadow_methods[] = {
27649 { "PRAMIN", load_vbios_pramin, true },
27650@@ -5489,7 +5489,7 @@ parse_bit_displayport_tbl_entry(struct d
27651 struct bit_table {
27652 const char id;
27653 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
27654-};
27655+} __no_const;
27656
27657 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
27658
27659diff -urNp linux-3.1.4/drivers/gpu/drm/nouveau/nouveau_drv.h linux-3.1.4/drivers/gpu/drm/nouveau/nouveau_drv.h
27660--- linux-3.1.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-11-11 15:19:27.000000000 -0500
27661+++ linux-3.1.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-11-16 18:39:07.000000000 -0500
27662@@ -238,7 +238,7 @@ struct nouveau_channel {
27663 struct list_head pending;
27664 uint32_t sequence;
27665 uint32_t sequence_ack;
27666- atomic_t last_sequence_irq;
27667+ atomic_unchecked_t last_sequence_irq;
27668 struct nouveau_vma vma;
27669 } fence;
27670
27671@@ -319,7 +319,7 @@ struct nouveau_exec_engine {
27672 u32 handle, u16 class);
27673 void (*set_tile_region)(struct drm_device *dev, int i);
27674 void (*tlb_flush)(struct drm_device *, int engine);
27675-};
27676+} __no_const;
27677
27678 struct nouveau_instmem_engine {
27679 void *priv;
27680@@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
27681 struct nouveau_mc_engine {
27682 int (*init)(struct drm_device *dev);
27683 void (*takedown)(struct drm_device *dev);
27684-};
27685+} __no_const;
27686
27687 struct nouveau_timer_engine {
27688 int (*init)(struct drm_device *dev);
27689 void (*takedown)(struct drm_device *dev);
27690 uint64_t (*read)(struct drm_device *dev);
27691-};
27692+} __no_const;
27693
27694 struct nouveau_fb_engine {
27695 int num_tiles;
27696@@ -513,7 +513,7 @@ struct nouveau_vram_engine {
27697 void (*put)(struct drm_device *, struct nouveau_mem **);
27698
27699 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
27700-};
27701+} __no_const;
27702
27703 struct nouveau_engine {
27704 struct nouveau_instmem_engine instmem;
27705@@ -660,7 +660,7 @@ struct drm_nouveau_private {
27706 struct drm_global_reference mem_global_ref;
27707 struct ttm_bo_global_ref bo_global_ref;
27708 struct ttm_bo_device bdev;
27709- atomic_t validate_sequence;
27710+ atomic_unchecked_t validate_sequence;
27711 } ttm;
27712
27713 struct {
27714diff -urNp linux-3.1.4/drivers/gpu/drm/nouveau/nouveau_fence.c linux-3.1.4/drivers/gpu/drm/nouveau/nouveau_fence.c
27715--- linux-3.1.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-11-26 19:57:27.000000000 -0500
27716+++ linux-3.1.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-11-26 20:00:06.000000000 -0500
27717@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
27718 if (USE_REFCNT(dev))
27719 sequence = nvchan_rd32(chan, 0x48);
27720 else
27721- sequence = atomic_read(&chan->fence.last_sequence_irq);
27722+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
27723
27724 if (chan->fence.sequence_ack == sequence)
27725 goto out;
27726@@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouvea
27727 return ret;
27728 }
27729
27730- atomic_set(&chan->fence.last_sequence_irq, 0);
27731+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
27732 return 0;
27733 }
27734
27735diff -urNp linux-3.1.4/drivers/gpu/drm/nouveau/nouveau_gem.c linux-3.1.4/drivers/gpu/drm/nouveau/nouveau_gem.c
27736--- linux-3.1.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-11-11 15:19:27.000000000 -0500
27737+++ linux-3.1.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-11-16 18:39:07.000000000 -0500
27738@@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *ch
27739 int trycnt = 0;
27740 int ret, i;
27741
27742- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
27743+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
27744 retry:
27745 if (++trycnt > 100000) {
27746 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
27747diff -urNp linux-3.1.4/drivers/gpu/drm/nouveau/nouveau_state.c linux-3.1.4/drivers/gpu/drm/nouveau/nouveau_state.c
27748--- linux-3.1.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-11-11 15:19:27.000000000 -0500
27749+++ linux-3.1.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-11-16 18:39:07.000000000 -0500
27750@@ -496,7 +496,7 @@ static bool nouveau_switcheroo_can_switc
27751 bool can_switch;
27752
27753 spin_lock(&dev->count_lock);
27754- can_switch = (dev->open_count == 0);
27755+ can_switch = (local_read(&dev->open_count) == 0);
27756 spin_unlock(&dev->count_lock);
27757 return can_switch;
27758 }
27759diff -urNp linux-3.1.4/drivers/gpu/drm/nouveau/nv04_graph.c linux-3.1.4/drivers/gpu/drm/nouveau/nv04_graph.c
27760--- linux-3.1.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-11-11 15:19:27.000000000 -0500
27761+++ linux-3.1.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-11-16 18:39:07.000000000 -0500
27762@@ -554,7 +554,7 @@ static int
27763 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
27764 u32 class, u32 mthd, u32 data)
27765 {
27766- atomic_set(&chan->fence.last_sequence_irq, data);
27767+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
27768 return 0;
27769 }
27770
27771diff -urNp linux-3.1.4/drivers/gpu/drm/r128/r128_cce.c linux-3.1.4/drivers/gpu/drm/r128/r128_cce.c
27772--- linux-3.1.4/drivers/gpu/drm/r128/r128_cce.c 2011-11-11 15:19:27.000000000 -0500
27773+++ linux-3.1.4/drivers/gpu/drm/r128/r128_cce.c 2011-11-16 18:39:07.000000000 -0500
27774@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
27775
27776 /* GH: Simple idle check.
27777 */
27778- atomic_set(&dev_priv->idle_count, 0);
27779+ atomic_set_unchecked(&dev_priv->idle_count, 0);
27780
27781 /* We don't support anything other than bus-mastering ring mode,
27782 * but the ring can be in either AGP or PCI space for the ring
27783diff -urNp linux-3.1.4/drivers/gpu/drm/r128/r128_drv.h linux-3.1.4/drivers/gpu/drm/r128/r128_drv.h
27784--- linux-3.1.4/drivers/gpu/drm/r128/r128_drv.h 2011-11-11 15:19:27.000000000 -0500
27785+++ linux-3.1.4/drivers/gpu/drm/r128/r128_drv.h 2011-11-16 18:39:07.000000000 -0500
27786@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
27787 int is_pci;
27788 unsigned long cce_buffers_offset;
27789
27790- atomic_t idle_count;
27791+ atomic_unchecked_t idle_count;
27792
27793 int page_flipping;
27794 int current_page;
27795 u32 crtc_offset;
27796 u32 crtc_offset_cntl;
27797
27798- atomic_t vbl_received;
27799+ atomic_unchecked_t vbl_received;
27800
27801 u32 color_fmt;
27802 unsigned int front_offset;
27803diff -urNp linux-3.1.4/drivers/gpu/drm/r128/r128_irq.c linux-3.1.4/drivers/gpu/drm/r128/r128_irq.c
27804--- linux-3.1.4/drivers/gpu/drm/r128/r128_irq.c 2011-11-11 15:19:27.000000000 -0500
27805+++ linux-3.1.4/drivers/gpu/drm/r128/r128_irq.c 2011-11-16 18:39:07.000000000 -0500
27806@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
27807 if (crtc != 0)
27808 return 0;
27809
27810- return atomic_read(&dev_priv->vbl_received);
27811+ return atomic_read_unchecked(&dev_priv->vbl_received);
27812 }
27813
27814 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
27815@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
27816 /* VBLANK interrupt */
27817 if (status & R128_CRTC_VBLANK_INT) {
27818 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
27819- atomic_inc(&dev_priv->vbl_received);
27820+ atomic_inc_unchecked(&dev_priv->vbl_received);
27821 drm_handle_vblank(dev, 0);
27822 return IRQ_HANDLED;
27823 }
27824diff -urNp linux-3.1.4/drivers/gpu/drm/r128/r128_state.c linux-3.1.4/drivers/gpu/drm/r128/r128_state.c
27825--- linux-3.1.4/drivers/gpu/drm/r128/r128_state.c 2011-11-11 15:19:27.000000000 -0500
27826+++ linux-3.1.4/drivers/gpu/drm/r128/r128_state.c 2011-11-16 18:39:07.000000000 -0500
27827@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
27828
27829 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
27830 {
27831- if (atomic_read(&dev_priv->idle_count) == 0)
27832+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
27833 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
27834 else
27835- atomic_set(&dev_priv->idle_count, 0);
27836+ atomic_set_unchecked(&dev_priv->idle_count, 0);
27837 }
27838
27839 #endif
27840diff -urNp linux-3.1.4/drivers/gpu/drm/radeon/atom.c linux-3.1.4/drivers/gpu/drm/radeon/atom.c
27841--- linux-3.1.4/drivers/gpu/drm/radeon/atom.c 2011-11-11 15:19:27.000000000 -0500
27842+++ linux-3.1.4/drivers/gpu/drm/radeon/atom.c 2011-11-16 19:09:42.000000000 -0500
27843@@ -1254,6 +1254,8 @@ struct atom_context *atom_parse(struct c
27844 char name[512];
27845 int i;
27846
27847+ pax_track_stack();
27848+
27849 if (!ctx)
27850 return NULL;
27851
27852diff -urNp linux-3.1.4/drivers/gpu/drm/radeon/mkregtable.c linux-3.1.4/drivers/gpu/drm/radeon/mkregtable.c
27853--- linux-3.1.4/drivers/gpu/drm/radeon/mkregtable.c 2011-11-11 15:19:27.000000000 -0500
27854+++ linux-3.1.4/drivers/gpu/drm/radeon/mkregtable.c 2011-11-16 18:39:07.000000000 -0500
27855@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
27856 regex_t mask_rex;
27857 regmatch_t match[4];
27858 char buf[1024];
27859- size_t end;
27860+ long end;
27861 int len;
27862 int done = 0;
27863 int r;
27864 unsigned o;
27865 struct offset *offset;
27866 char last_reg_s[10];
27867- int last_reg;
27868+ unsigned long last_reg;
27869
27870 if (regcomp
27871 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
27872diff -urNp linux-3.1.4/drivers/gpu/drm/radeon/radeon_atombios.c linux-3.1.4/drivers/gpu/drm/radeon/radeon_atombios.c
27873--- linux-3.1.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-11-11 15:19:27.000000000 -0500
27874+++ linux-3.1.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-11-16 18:40:10.000000000 -0500
27875@@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
27876 struct radeon_gpio_rec gpio;
27877 struct radeon_hpd hpd;
27878
27879+ pax_track_stack();
27880+
27881 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
27882 return false;
27883
27884diff -urNp linux-3.1.4/drivers/gpu/drm/radeon/radeon_device.c linux-3.1.4/drivers/gpu/drm/radeon/radeon_device.c
27885--- linux-3.1.4/drivers/gpu/drm/radeon/radeon_device.c 2011-11-11 15:19:27.000000000 -0500
27886+++ linux-3.1.4/drivers/gpu/drm/radeon/radeon_device.c 2011-11-16 18:39:07.000000000 -0500
27887@@ -684,7 +684,7 @@ static bool radeon_switcheroo_can_switch
27888 bool can_switch;
27889
27890 spin_lock(&dev->count_lock);
27891- can_switch = (dev->open_count == 0);
27892+ can_switch = (local_read(&dev->open_count) == 0);
27893 spin_unlock(&dev->count_lock);
27894 return can_switch;
27895 }
27896diff -urNp linux-3.1.4/drivers/gpu/drm/radeon/radeon_display.c linux-3.1.4/drivers/gpu/drm/radeon/radeon_display.c
27897--- linux-3.1.4/drivers/gpu/drm/radeon/radeon_display.c 2011-11-11 15:19:27.000000000 -0500
27898+++ linux-3.1.4/drivers/gpu/drm/radeon/radeon_display.c 2011-11-16 18:40:10.000000000 -0500
27899@@ -925,6 +925,8 @@ void radeon_compute_pll_legacy(struct ra
27900 uint32_t post_div;
27901 u32 pll_out_min, pll_out_max;
27902
27903+ pax_track_stack();
27904+
27905 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
27906 freq = freq * 1000;
27907
27908diff -urNp linux-3.1.4/drivers/gpu/drm/radeon/radeon_drv.h linux-3.1.4/drivers/gpu/drm/radeon/radeon_drv.h
27909--- linux-3.1.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-11-11 15:19:27.000000000 -0500
27910+++ linux-3.1.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-11-16 18:39:07.000000000 -0500
27911@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
27912
27913 /* SW interrupt */
27914 wait_queue_head_t swi_queue;
27915- atomic_t swi_emitted;
27916+ atomic_unchecked_t swi_emitted;
27917 int vblank_crtc;
27918 uint32_t irq_enable_reg;
27919 uint32_t r500_disp_irq_reg;
27920diff -urNp linux-3.1.4/drivers/gpu/drm/radeon/radeon_fence.c linux-3.1.4/drivers/gpu/drm/radeon/radeon_fence.c
27921--- linux-3.1.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-11-11 15:19:27.000000000 -0500
27922+++ linux-3.1.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-11-16 18:39:07.000000000 -0500
27923@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_devi
27924 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
27925 return 0;
27926 }
27927- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
27928+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
27929 if (!rdev->cp.ready)
27930 /* FIXME: cp is not running assume everythings is done right
27931 * away
27932@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct rade
27933 return r;
27934 }
27935 radeon_fence_write(rdev, 0);
27936- atomic_set(&rdev->fence_drv.seq, 0);
27937+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
27938 INIT_LIST_HEAD(&rdev->fence_drv.created);
27939 INIT_LIST_HEAD(&rdev->fence_drv.emited);
27940 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
27941diff -urNp linux-3.1.4/drivers/gpu/drm/radeon/radeon.h linux-3.1.4/drivers/gpu/drm/radeon/radeon.h
27942--- linux-3.1.4/drivers/gpu/drm/radeon/radeon.h 2011-11-11 15:19:27.000000000 -0500
27943+++ linux-3.1.4/drivers/gpu/drm/radeon/radeon.h 2011-11-16 18:39:07.000000000 -0500
27944@@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_d
27945 */
27946 struct radeon_fence_driver {
27947 uint32_t scratch_reg;
27948- atomic_t seq;
27949+ atomic_unchecked_t seq;
27950 uint32_t last_seq;
27951 unsigned long last_jiffies;
27952 unsigned long last_timeout;
27953@@ -962,7 +962,7 @@ struct radeon_asic {
27954 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
27955 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
27956 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
27957-};
27958+} __no_const;
27959
27960 /*
27961 * Asic structures
27962diff -urNp linux-3.1.4/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.1.4/drivers/gpu/drm/radeon/radeon_ioc32.c
27963--- linux-3.1.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-11-11 15:19:27.000000000 -0500
27964+++ linux-3.1.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-11-16 18:39:07.000000000 -0500
27965@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
27966 request = compat_alloc_user_space(sizeof(*request));
27967 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
27968 || __put_user(req32.param, &request->param)
27969- || __put_user((void __user *)(unsigned long)req32.value,
27970+ || __put_user((unsigned long)req32.value,
27971 &request->value))
27972 return -EFAULT;
27973
27974diff -urNp linux-3.1.4/drivers/gpu/drm/radeon/radeon_irq.c linux-3.1.4/drivers/gpu/drm/radeon/radeon_irq.c
27975--- linux-3.1.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-11-11 15:19:27.000000000 -0500
27976+++ linux-3.1.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-11-16 18:39:07.000000000 -0500
27977@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
27978 unsigned int ret;
27979 RING_LOCALS;
27980
27981- atomic_inc(&dev_priv->swi_emitted);
27982- ret = atomic_read(&dev_priv->swi_emitted);
27983+ atomic_inc_unchecked(&dev_priv->swi_emitted);
27984+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
27985
27986 BEGIN_RING(4);
27987 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
27988@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
27989 drm_radeon_private_t *dev_priv =
27990 (drm_radeon_private_t *) dev->dev_private;
27991
27992- atomic_set(&dev_priv->swi_emitted, 0);
27993+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
27994 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
27995
27996 dev->max_vblank_count = 0x001fffff;
27997diff -urNp linux-3.1.4/drivers/gpu/drm/radeon/radeon_state.c linux-3.1.4/drivers/gpu/drm/radeon/radeon_state.c
27998--- linux-3.1.4/drivers/gpu/drm/radeon/radeon_state.c 2011-11-11 15:19:27.000000000 -0500
27999+++ linux-3.1.4/drivers/gpu/drm/radeon/radeon_state.c 2011-11-16 18:39:07.000000000 -0500
28000@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
28001 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
28002 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
28003
28004- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28005+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28006 sarea_priv->nbox * sizeof(depth_boxes[0])))
28007 return -EFAULT;
28008
28009@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
28010 {
28011 drm_radeon_private_t *dev_priv = dev->dev_private;
28012 drm_radeon_getparam_t *param = data;
28013- int value;
28014+ int value = 0;
28015
28016 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
28017
28018diff -urNp linux-3.1.4/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.1.4/drivers/gpu/drm/radeon/radeon_ttm.c
28019--- linux-3.1.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-11-11 15:19:27.000000000 -0500
28020+++ linux-3.1.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-11-16 18:39:07.000000000 -0500
28021@@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struc
28022 }
28023 if (unlikely(ttm_vm_ops == NULL)) {
28024 ttm_vm_ops = vma->vm_ops;
28025- radeon_ttm_vm_ops = *ttm_vm_ops;
28026- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28027+ pax_open_kernel();
28028+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
28029+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28030+ pax_close_kernel();
28031 }
28032 vma->vm_ops = &radeon_ttm_vm_ops;
28033 return 0;
28034diff -urNp linux-3.1.4/drivers/gpu/drm/radeon/rs690.c linux-3.1.4/drivers/gpu/drm/radeon/rs690.c
28035--- linux-3.1.4/drivers/gpu/drm/radeon/rs690.c 2011-11-11 15:19:27.000000000 -0500
28036+++ linux-3.1.4/drivers/gpu/drm/radeon/rs690.c 2011-11-16 18:39:07.000000000 -0500
28037@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
28038 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
28039 rdev->pm.sideport_bandwidth.full)
28040 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
28041- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
28042+ read_delay_latency.full = dfixed_const(800 * 1000);
28043 read_delay_latency.full = dfixed_div(read_delay_latency,
28044 rdev->pm.igp_sideport_mclk);
28045+ a.full = dfixed_const(370);
28046+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
28047 } else {
28048 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
28049 rdev->pm.k8_bandwidth.full)
28050diff -urNp linux-3.1.4/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.1.4/drivers/gpu/drm/ttm/ttm_page_alloc.c
28051--- linux-3.1.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-11-11 15:19:27.000000000 -0500
28052+++ linux-3.1.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-11-16 18:39:07.000000000 -0500
28053@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages
28054 static int ttm_pool_mm_shrink(struct shrinker *shrink,
28055 struct shrink_control *sc)
28056 {
28057- static atomic_t start_pool = ATOMIC_INIT(0);
28058+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
28059 unsigned i;
28060- unsigned pool_offset = atomic_add_return(1, &start_pool);
28061+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
28062 struct ttm_page_pool *pool;
28063 int shrink_pages = sc->nr_to_scan;
28064
28065diff -urNp linux-3.1.4/drivers/gpu/drm/via/via_drv.h linux-3.1.4/drivers/gpu/drm/via/via_drv.h
28066--- linux-3.1.4/drivers/gpu/drm/via/via_drv.h 2011-11-11 15:19:27.000000000 -0500
28067+++ linux-3.1.4/drivers/gpu/drm/via/via_drv.h 2011-11-16 18:39:07.000000000 -0500
28068@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
28069 typedef uint32_t maskarray_t[5];
28070
28071 typedef struct drm_via_irq {
28072- atomic_t irq_received;
28073+ atomic_unchecked_t irq_received;
28074 uint32_t pending_mask;
28075 uint32_t enable_mask;
28076 wait_queue_head_t irq_queue;
28077@@ -75,7 +75,7 @@ typedef struct drm_via_private {
28078 struct timeval last_vblank;
28079 int last_vblank_valid;
28080 unsigned usec_per_vblank;
28081- atomic_t vbl_received;
28082+ atomic_unchecked_t vbl_received;
28083 drm_via_state_t hc_state;
28084 char pci_buf[VIA_PCI_BUF_SIZE];
28085 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
28086diff -urNp linux-3.1.4/drivers/gpu/drm/via/via_irq.c linux-3.1.4/drivers/gpu/drm/via/via_irq.c
28087--- linux-3.1.4/drivers/gpu/drm/via/via_irq.c 2011-11-11 15:19:27.000000000 -0500
28088+++ linux-3.1.4/drivers/gpu/drm/via/via_irq.c 2011-11-16 18:39:07.000000000 -0500
28089@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
28090 if (crtc != 0)
28091 return 0;
28092
28093- return atomic_read(&dev_priv->vbl_received);
28094+ return atomic_read_unchecked(&dev_priv->vbl_received);
28095 }
28096
28097 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28098@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
28099
28100 status = VIA_READ(VIA_REG_INTERRUPT);
28101 if (status & VIA_IRQ_VBLANK_PENDING) {
28102- atomic_inc(&dev_priv->vbl_received);
28103- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
28104+ atomic_inc_unchecked(&dev_priv->vbl_received);
28105+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
28106 do_gettimeofday(&cur_vblank);
28107 if (dev_priv->last_vblank_valid) {
28108 dev_priv->usec_per_vblank =
28109@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
28110 dev_priv->last_vblank = cur_vblank;
28111 dev_priv->last_vblank_valid = 1;
28112 }
28113- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
28114+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
28115 DRM_DEBUG("US per vblank is: %u\n",
28116 dev_priv->usec_per_vblank);
28117 }
28118@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
28119
28120 for (i = 0; i < dev_priv->num_irqs; ++i) {
28121 if (status & cur_irq->pending_mask) {
28122- atomic_inc(&cur_irq->irq_received);
28123+ atomic_inc_unchecked(&cur_irq->irq_received);
28124 DRM_WAKEUP(&cur_irq->irq_queue);
28125 handled = 1;
28126 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
28127@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
28128 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28129 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
28130 masks[irq][4]));
28131- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
28132+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
28133 } else {
28134 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28135 (((cur_irq_sequence =
28136- atomic_read(&cur_irq->irq_received)) -
28137+ atomic_read_unchecked(&cur_irq->irq_received)) -
28138 *sequence) <= (1 << 23)));
28139 }
28140 *sequence = cur_irq_sequence;
28141@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
28142 }
28143
28144 for (i = 0; i < dev_priv->num_irqs; ++i) {
28145- atomic_set(&cur_irq->irq_received, 0);
28146+ atomic_set_unchecked(&cur_irq->irq_received, 0);
28147 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
28148 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
28149 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
28150@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
28151 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
28152 case VIA_IRQ_RELATIVE:
28153 irqwait->request.sequence +=
28154- atomic_read(&cur_irq->irq_received);
28155+ atomic_read_unchecked(&cur_irq->irq_received);
28156 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
28157 case VIA_IRQ_ABSOLUTE:
28158 break;
28159diff -urNp linux-3.1.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.1.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28160--- linux-3.1.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-11-11 15:19:27.000000000 -0500
28161+++ linux-3.1.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-11-16 18:39:07.000000000 -0500
28162@@ -240,7 +240,7 @@ struct vmw_private {
28163 * Fencing and IRQs.
28164 */
28165
28166- atomic_t fence_seq;
28167+ atomic_unchecked_t fence_seq;
28168 wait_queue_head_t fence_queue;
28169 wait_queue_head_t fifo_queue;
28170 atomic_t fence_queue_waiters;
28171diff -urNp linux-3.1.4/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c linux-3.1.4/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
28172--- linux-3.1.4/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 2011-11-11 15:19:27.000000000 -0500
28173+++ linux-3.1.4/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 2011-11-16 18:39:07.000000000 -0500
28174@@ -610,7 +610,7 @@ int vmw_execbuf_ioctl(struct drm_device
28175 struct drm_vmw_fence_rep fence_rep;
28176 struct drm_vmw_fence_rep __user *user_fence_rep;
28177 int ret;
28178- void *user_cmd;
28179+ void __user *user_cmd;
28180 void *cmd;
28181 uint32_t sequence;
28182 struct vmw_sw_context *sw_context = &dev_priv->ctx;
28183diff -urNp linux-3.1.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-3.1.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
28184--- linux-3.1.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-11-11 15:19:27.000000000 -0500
28185+++ linux-3.1.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-11-16 18:39:07.000000000 -0500
28186@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
28187 while (!vmw_lag_lt(queue, us)) {
28188 spin_lock(&queue->lock);
28189 if (list_empty(&queue->head))
28190- sequence = atomic_read(&dev_priv->fence_seq);
28191+ sequence = atomic_read_unchecked(&dev_priv->fence_seq);
28192 else {
28193 fence = list_first_entry(&queue->head,
28194 struct vmw_fence, head);
28195diff -urNp linux-3.1.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.1.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28196--- linux-3.1.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-11-11 15:19:27.000000000 -0500
28197+++ linux-3.1.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-11-16 18:39:07.000000000 -0500
28198@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
28199 (unsigned int) min,
28200 (unsigned int) fifo->capabilities);
28201
28202- atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
28203+ atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
28204 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
28205 vmw_fence_queue_init(&fifo->fence_queue);
28206 return vmw_fifo_send_fence(dev_priv, &dummy);
28207@@ -356,7 +356,7 @@ void *vmw_fifo_reserve(struct vmw_privat
28208 if (reserveable)
28209 iowrite32(bytes, fifo_mem +
28210 SVGA_FIFO_RESERVED);
28211- return fifo_mem + (next_cmd >> 2);
28212+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
28213 } else {
28214 need_bounce = true;
28215 }
28216@@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
28217
28218 fm = vmw_fifo_reserve(dev_priv, bytes);
28219 if (unlikely(fm == NULL)) {
28220- *sequence = atomic_read(&dev_priv->fence_seq);
28221+ *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
28222 ret = -ENOMEM;
28223 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
28224 false, 3*HZ);
28225@@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
28226 }
28227
28228 do {
28229- *sequence = atomic_add_return(1, &dev_priv->fence_seq);
28230+ *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
28231 } while (*sequence == 0);
28232
28233 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
28234diff -urNp linux-3.1.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.1.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
28235--- linux-3.1.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-11-11 15:19:27.000000000 -0500
28236+++ linux-3.1.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-11-16 18:39:07.000000000 -0500
28237@@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
28238 * emitted. Then the fence is stale and signaled.
28239 */
28240
28241- ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
28242+ ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
28243 > VMW_FENCE_WRAP);
28244
28245 return ret;
28246@@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
28247
28248 if (fifo_idle)
28249 down_read(&fifo_state->rwsem);
28250- signal_seq = atomic_read(&dev_priv->fence_seq);
28251+ signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
28252 ret = 0;
28253
28254 for (;;) {
28255diff -urNp linux-3.1.4/drivers/hid/hid-core.c linux-3.1.4/drivers/hid/hid-core.c
28256--- linux-3.1.4/drivers/hid/hid-core.c 2011-11-11 15:19:27.000000000 -0500
28257+++ linux-3.1.4/drivers/hid/hid-core.c 2011-11-16 18:39:07.000000000 -0500
28258@@ -1951,7 +1951,7 @@ static bool hid_ignore(struct hid_device
28259
28260 int hid_add_device(struct hid_device *hdev)
28261 {
28262- static atomic_t id = ATOMIC_INIT(0);
28263+ static atomic_unchecked_t id = ATOMIC_INIT(0);
28264 int ret;
28265
28266 if (WARN_ON(hdev->status & HID_STAT_ADDED))
28267@@ -1966,7 +1966,7 @@ int hid_add_device(struct hid_device *hd
28268 /* XXX hack, any other cleaner solution after the driver core
28269 * is converted to allow more than 20 bytes as the device name? */
28270 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
28271- hdev->vendor, hdev->product, atomic_inc_return(&id));
28272+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
28273
28274 hid_debug_register(hdev, dev_name(&hdev->dev));
28275 ret = device_add(&hdev->dev);
28276diff -urNp linux-3.1.4/drivers/hid/usbhid/hiddev.c linux-3.1.4/drivers/hid/usbhid/hiddev.c
28277--- linux-3.1.4/drivers/hid/usbhid/hiddev.c 2011-11-11 15:19:27.000000000 -0500
28278+++ linux-3.1.4/drivers/hid/usbhid/hiddev.c 2011-11-16 18:39:07.000000000 -0500
28279@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *fi
28280 break;
28281
28282 case HIDIOCAPPLICATION:
28283- if (arg < 0 || arg >= hid->maxapplication)
28284+ if (arg >= hid->maxapplication)
28285 break;
28286
28287 for (i = 0; i < hid->maxcollection; i++)
28288diff -urNp linux-3.1.4/drivers/hwmon/acpi_power_meter.c linux-3.1.4/drivers/hwmon/acpi_power_meter.c
28289--- linux-3.1.4/drivers/hwmon/acpi_power_meter.c 2011-11-11 15:19:27.000000000 -0500
28290+++ linux-3.1.4/drivers/hwmon/acpi_power_meter.c 2011-11-16 18:39:07.000000000 -0500
28291@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
28292 return res;
28293
28294 temp /= 1000;
28295- if (temp < 0)
28296- return -EINVAL;
28297
28298 mutex_lock(&resource->lock);
28299 resource->trip[attr->index - 7] = temp;
28300diff -urNp linux-3.1.4/drivers/hwmon/sht15.c linux-3.1.4/drivers/hwmon/sht15.c
28301--- linux-3.1.4/drivers/hwmon/sht15.c 2011-11-11 15:19:27.000000000 -0500
28302+++ linux-3.1.4/drivers/hwmon/sht15.c 2011-11-16 18:39:07.000000000 -0500
28303@@ -166,7 +166,7 @@ struct sht15_data {
28304 int supply_uV;
28305 bool supply_uV_valid;
28306 struct work_struct update_supply_work;
28307- atomic_t interrupt_handled;
28308+ atomic_unchecked_t interrupt_handled;
28309 };
28310
28311 /**
28312@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht1
28313 return ret;
28314
28315 gpio_direction_input(data->pdata->gpio_data);
28316- atomic_set(&data->interrupt_handled, 0);
28317+ atomic_set_unchecked(&data->interrupt_handled, 0);
28318
28319 enable_irq(gpio_to_irq(data->pdata->gpio_data));
28320 if (gpio_get_value(data->pdata->gpio_data) == 0) {
28321 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
28322 /* Only relevant if the interrupt hasn't occurred. */
28323- if (!atomic_read(&data->interrupt_handled))
28324+ if (!atomic_read_unchecked(&data->interrupt_handled))
28325 schedule_work(&data->read_work);
28326 }
28327 ret = wait_event_timeout(data->wait_queue,
28328@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired
28329
28330 /* First disable the interrupt */
28331 disable_irq_nosync(irq);
28332- atomic_inc(&data->interrupt_handled);
28333+ atomic_inc_unchecked(&data->interrupt_handled);
28334 /* Then schedule a reading work struct */
28335 if (data->state != SHT15_READING_NOTHING)
28336 schedule_work(&data->read_work);
28337@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct wo
28338 * If not, then start the interrupt again - care here as could
28339 * have gone low in meantime so verify it hasn't!
28340 */
28341- atomic_set(&data->interrupt_handled, 0);
28342+ atomic_set_unchecked(&data->interrupt_handled, 0);
28343 enable_irq(gpio_to_irq(data->pdata->gpio_data));
28344 /* If still not occurred or another handler has been scheduled */
28345 if (gpio_get_value(data->pdata->gpio_data)
28346- || atomic_read(&data->interrupt_handled))
28347+ || atomic_read_unchecked(&data->interrupt_handled))
28348 return;
28349 }
28350
28351diff -urNp linux-3.1.4/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.1.4/drivers/i2c/busses/i2c-amd756-s4882.c
28352--- linux-3.1.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-11-11 15:19:27.000000000 -0500
28353+++ linux-3.1.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-11-16 18:39:07.000000000 -0500
28354@@ -43,7 +43,7 @@
28355 extern struct i2c_adapter amd756_smbus;
28356
28357 static struct i2c_adapter *s4882_adapter;
28358-static struct i2c_algorithm *s4882_algo;
28359+static i2c_algorithm_no_const *s4882_algo;
28360
28361 /* Wrapper access functions for multiplexed SMBus */
28362 static DEFINE_MUTEX(amd756_lock);
28363diff -urNp linux-3.1.4/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.1.4/drivers/i2c/busses/i2c-nforce2-s4985.c
28364--- linux-3.1.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-11-11 15:19:27.000000000 -0500
28365+++ linux-3.1.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-11-16 18:39:07.000000000 -0500
28366@@ -41,7 +41,7 @@
28367 extern struct i2c_adapter *nforce2_smbus;
28368
28369 static struct i2c_adapter *s4985_adapter;
28370-static struct i2c_algorithm *s4985_algo;
28371+static i2c_algorithm_no_const *s4985_algo;
28372
28373 /* Wrapper access functions for multiplexed SMBus */
28374 static DEFINE_MUTEX(nforce2_lock);
28375diff -urNp linux-3.1.4/drivers/i2c/i2c-mux.c linux-3.1.4/drivers/i2c/i2c-mux.c
28376--- linux-3.1.4/drivers/i2c/i2c-mux.c 2011-11-11 15:19:27.000000000 -0500
28377+++ linux-3.1.4/drivers/i2c/i2c-mux.c 2011-11-16 18:39:07.000000000 -0500
28378@@ -28,7 +28,7 @@
28379 /* multiplexer per channel data */
28380 struct i2c_mux_priv {
28381 struct i2c_adapter adap;
28382- struct i2c_algorithm algo;
28383+ i2c_algorithm_no_const algo;
28384
28385 struct i2c_adapter *parent;
28386 void *mux_dev; /* the mux chip/device */
28387diff -urNp linux-3.1.4/drivers/ide/aec62xx.c linux-3.1.4/drivers/ide/aec62xx.c
28388--- linux-3.1.4/drivers/ide/aec62xx.c 2011-11-11 15:19:27.000000000 -0500
28389+++ linux-3.1.4/drivers/ide/aec62xx.c 2011-11-16 18:39:07.000000000 -0500
28390@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_
28391 .cable_detect = atp86x_cable_detect,
28392 };
28393
28394-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
28395+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
28396 { /* 0: AEC6210 */
28397 .name = DRV_NAME,
28398 .init_chipset = init_chipset_aec62xx,
28399diff -urNp linux-3.1.4/drivers/ide/alim15x3.c linux-3.1.4/drivers/ide/alim15x3.c
28400--- linux-3.1.4/drivers/ide/alim15x3.c 2011-11-11 15:19:27.000000000 -0500
28401+++ linux-3.1.4/drivers/ide/alim15x3.c 2011-11-16 18:39:07.000000000 -0500
28402@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_
28403 .dma_sff_read_status = ide_dma_sff_read_status,
28404 };
28405
28406-static const struct ide_port_info ali15x3_chipset __devinitdata = {
28407+static const struct ide_port_info ali15x3_chipset __devinitconst = {
28408 .name = DRV_NAME,
28409 .init_chipset = init_chipset_ali15x3,
28410 .init_hwif = init_hwif_ali15x3,
28411diff -urNp linux-3.1.4/drivers/ide/amd74xx.c linux-3.1.4/drivers/ide/amd74xx.c
28412--- linux-3.1.4/drivers/ide/amd74xx.c 2011-11-11 15:19:27.000000000 -0500
28413+++ linux-3.1.4/drivers/ide/amd74xx.c 2011-11-16 18:39:07.000000000 -0500
28414@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_por
28415 .udma_mask = udma, \
28416 }
28417
28418-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
28419+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
28420 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
28421 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
28422 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
28423diff -urNp linux-3.1.4/drivers/ide/atiixp.c linux-3.1.4/drivers/ide/atiixp.c
28424--- linux-3.1.4/drivers/ide/atiixp.c 2011-11-11 15:19:27.000000000 -0500
28425+++ linux-3.1.4/drivers/ide/atiixp.c 2011-11-16 18:39:07.000000000 -0500
28426@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_
28427 .cable_detect = atiixp_cable_detect,
28428 };
28429
28430-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
28431+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
28432 { /* 0: IXP200/300/400/700 */
28433 .name = DRV_NAME,
28434 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
28435diff -urNp linux-3.1.4/drivers/ide/cmd64x.c linux-3.1.4/drivers/ide/cmd64x.c
28436--- linux-3.1.4/drivers/ide/cmd64x.c 2011-11-11 15:19:27.000000000 -0500
28437+++ linux-3.1.4/drivers/ide/cmd64x.c 2011-11-16 18:39:07.000000000 -0500
28438@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_r
28439 .dma_sff_read_status = ide_dma_sff_read_status,
28440 };
28441
28442-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
28443+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
28444 { /* 0: CMD643 */
28445 .name = DRV_NAME,
28446 .init_chipset = init_chipset_cmd64x,
28447diff -urNp linux-3.1.4/drivers/ide/cs5520.c linux-3.1.4/drivers/ide/cs5520.c
28448--- linux-3.1.4/drivers/ide/cs5520.c 2011-11-11 15:19:27.000000000 -0500
28449+++ linux-3.1.4/drivers/ide/cs5520.c 2011-11-16 18:39:07.000000000 -0500
28450@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_
28451 .set_dma_mode = cs5520_set_dma_mode,
28452 };
28453
28454-static const struct ide_port_info cyrix_chipset __devinitdata = {
28455+static const struct ide_port_info cyrix_chipset __devinitconst = {
28456 .name = DRV_NAME,
28457 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
28458 .port_ops = &cs5520_port_ops,
28459diff -urNp linux-3.1.4/drivers/ide/cs5530.c linux-3.1.4/drivers/ide/cs5530.c
28460--- linux-3.1.4/drivers/ide/cs5530.c 2011-11-11 15:19:27.000000000 -0500
28461+++ linux-3.1.4/drivers/ide/cs5530.c 2011-11-16 18:39:07.000000000 -0500
28462@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_
28463 .udma_filter = cs5530_udma_filter,
28464 };
28465
28466-static const struct ide_port_info cs5530_chipset __devinitdata = {
28467+static const struct ide_port_info cs5530_chipset __devinitconst = {
28468 .name = DRV_NAME,
28469 .init_chipset = init_chipset_cs5530,
28470 .init_hwif = init_hwif_cs5530,
28471diff -urNp linux-3.1.4/drivers/ide/cs5535.c linux-3.1.4/drivers/ide/cs5535.c
28472--- linux-3.1.4/drivers/ide/cs5535.c 2011-11-11 15:19:27.000000000 -0500
28473+++ linux-3.1.4/drivers/ide/cs5535.c 2011-11-16 18:39:07.000000000 -0500
28474@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_
28475 .cable_detect = cs5535_cable_detect,
28476 };
28477
28478-static const struct ide_port_info cs5535_chipset __devinitdata = {
28479+static const struct ide_port_info cs5535_chipset __devinitconst = {
28480 .name = DRV_NAME,
28481 .port_ops = &cs5535_port_ops,
28482 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
28483diff -urNp linux-3.1.4/drivers/ide/cy82c693.c linux-3.1.4/drivers/ide/cy82c693.c
28484--- linux-3.1.4/drivers/ide/cy82c693.c 2011-11-11 15:19:27.000000000 -0500
28485+++ linux-3.1.4/drivers/ide/cy82c693.c 2011-11-16 18:39:07.000000000 -0500
28486@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c69
28487 .set_dma_mode = cy82c693_set_dma_mode,
28488 };
28489
28490-static const struct ide_port_info cy82c693_chipset __devinitdata = {
28491+static const struct ide_port_info cy82c693_chipset __devinitconst = {
28492 .name = DRV_NAME,
28493 .init_iops = init_iops_cy82c693,
28494 .port_ops = &cy82c693_port_ops,
28495diff -urNp linux-3.1.4/drivers/ide/hpt366.c linux-3.1.4/drivers/ide/hpt366.c
28496--- linux-3.1.4/drivers/ide/hpt366.c 2011-11-11 15:19:27.000000000 -0500
28497+++ linux-3.1.4/drivers/ide/hpt366.c 2011-11-16 18:39:07.000000000 -0500
28498@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings
28499 }
28500 };
28501
28502-static const struct hpt_info hpt36x __devinitdata = {
28503+static const struct hpt_info hpt36x __devinitconst = {
28504 .chip_name = "HPT36x",
28505 .chip_type = HPT36x,
28506 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
28507@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __de
28508 .timings = &hpt36x_timings
28509 };
28510
28511-static const struct hpt_info hpt370 __devinitdata = {
28512+static const struct hpt_info hpt370 __devinitconst = {
28513 .chip_name = "HPT370",
28514 .chip_type = HPT370,
28515 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
28516@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __de
28517 .timings = &hpt37x_timings
28518 };
28519
28520-static const struct hpt_info hpt370a __devinitdata = {
28521+static const struct hpt_info hpt370a __devinitconst = {
28522 .chip_name = "HPT370A",
28523 .chip_type = HPT370A,
28524 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
28525@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __d
28526 .timings = &hpt37x_timings
28527 };
28528
28529-static const struct hpt_info hpt374 __devinitdata = {
28530+static const struct hpt_info hpt374 __devinitconst = {
28531 .chip_name = "HPT374",
28532 .chip_type = HPT374,
28533 .udma_mask = ATA_UDMA5,
28534@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __de
28535 .timings = &hpt37x_timings
28536 };
28537
28538-static const struct hpt_info hpt372 __devinitdata = {
28539+static const struct hpt_info hpt372 __devinitconst = {
28540 .chip_name = "HPT372",
28541 .chip_type = HPT372,
28542 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
28543@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __de
28544 .timings = &hpt37x_timings
28545 };
28546
28547-static const struct hpt_info hpt372a __devinitdata = {
28548+static const struct hpt_info hpt372a __devinitconst = {
28549 .chip_name = "HPT372A",
28550 .chip_type = HPT372A,
28551 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
28552@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __d
28553 .timings = &hpt37x_timings
28554 };
28555
28556-static const struct hpt_info hpt302 __devinitdata = {
28557+static const struct hpt_info hpt302 __devinitconst = {
28558 .chip_name = "HPT302",
28559 .chip_type = HPT302,
28560 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
28561@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __de
28562 .timings = &hpt37x_timings
28563 };
28564
28565-static const struct hpt_info hpt371 __devinitdata = {
28566+static const struct hpt_info hpt371 __devinitconst = {
28567 .chip_name = "HPT371",
28568 .chip_type = HPT371,
28569 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
28570@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __de
28571 .timings = &hpt37x_timings
28572 };
28573
28574-static const struct hpt_info hpt372n __devinitdata = {
28575+static const struct hpt_info hpt372n __devinitconst = {
28576 .chip_name = "HPT372N",
28577 .chip_type = HPT372N,
28578 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
28579@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __d
28580 .timings = &hpt37x_timings
28581 };
28582
28583-static const struct hpt_info hpt302n __devinitdata = {
28584+static const struct hpt_info hpt302n __devinitconst = {
28585 .chip_name = "HPT302N",
28586 .chip_type = HPT302N,
28587 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
28588@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __d
28589 .timings = &hpt37x_timings
28590 };
28591
28592-static const struct hpt_info hpt371n __devinitdata = {
28593+static const struct hpt_info hpt371n __devinitconst = {
28594 .chip_name = "HPT371N",
28595 .chip_type = HPT371N,
28596 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
28597@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_d
28598 .dma_sff_read_status = ide_dma_sff_read_status,
28599 };
28600
28601-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
28602+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
28603 { /* 0: HPT36x */
28604 .name = DRV_NAME,
28605 .init_chipset = init_chipset_hpt366,
28606diff -urNp linux-3.1.4/drivers/ide/ide-cd.c linux-3.1.4/drivers/ide/ide-cd.c
28607--- linux-3.1.4/drivers/ide/ide-cd.c 2011-11-11 15:19:27.000000000 -0500
28608+++ linux-3.1.4/drivers/ide/ide-cd.c 2011-11-16 18:39:07.000000000 -0500
28609@@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
28610 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
28611 if ((unsigned long)buf & alignment
28612 || blk_rq_bytes(rq) & q->dma_pad_mask
28613- || object_is_on_stack(buf))
28614+ || object_starts_on_stack(buf))
28615 drive->dma = 0;
28616 }
28617 }
28618diff -urNp linux-3.1.4/drivers/ide/ide-floppy.c linux-3.1.4/drivers/ide/ide-floppy.c
28619--- linux-3.1.4/drivers/ide/ide-floppy.c 2011-11-11 15:19:27.000000000 -0500
28620+++ linux-3.1.4/drivers/ide/ide-floppy.c 2011-11-16 18:40:10.000000000 -0500
28621@@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
28622 u8 pc_buf[256], header_len, desc_cnt;
28623 int i, rc = 1, blocks, length;
28624
28625+ pax_track_stack();
28626+
28627 ide_debug_log(IDE_DBG_FUNC, "enter");
28628
28629 drive->bios_cyl = 0;
28630diff -urNp linux-3.1.4/drivers/ide/ide-pci-generic.c linux-3.1.4/drivers/ide/ide-pci-generic.c
28631--- linux-3.1.4/drivers/ide/ide-pci-generic.c 2011-11-11 15:19:27.000000000 -0500
28632+++ linux-3.1.4/drivers/ide/ide-pci-generic.c 2011-11-16 18:39:07.000000000 -0500
28633@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell
28634 .udma_mask = ATA_UDMA6, \
28635 }
28636
28637-static const struct ide_port_info generic_chipsets[] __devinitdata = {
28638+static const struct ide_port_info generic_chipsets[] __devinitconst = {
28639 /* 0: Unknown */
28640 DECLARE_GENERIC_PCI_DEV(0),
28641
28642diff -urNp linux-3.1.4/drivers/ide/it8172.c linux-3.1.4/drivers/ide/it8172.c
28643--- linux-3.1.4/drivers/ide/it8172.c 2011-11-11 15:19:27.000000000 -0500
28644+++ linux-3.1.4/drivers/ide/it8172.c 2011-11-16 18:39:07.000000000 -0500
28645@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_
28646 .set_dma_mode = it8172_set_dma_mode,
28647 };
28648
28649-static const struct ide_port_info it8172_port_info __devinitdata = {
28650+static const struct ide_port_info it8172_port_info __devinitconst = {
28651 .name = DRV_NAME,
28652 .port_ops = &it8172_port_ops,
28653 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
28654diff -urNp linux-3.1.4/drivers/ide/it8213.c linux-3.1.4/drivers/ide/it8213.c
28655--- linux-3.1.4/drivers/ide/it8213.c 2011-11-11 15:19:27.000000000 -0500
28656+++ linux-3.1.4/drivers/ide/it8213.c 2011-11-16 18:39:07.000000000 -0500
28657@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_
28658 .cable_detect = it8213_cable_detect,
28659 };
28660
28661-static const struct ide_port_info it8213_chipset __devinitdata = {
28662+static const struct ide_port_info it8213_chipset __devinitconst = {
28663 .name = DRV_NAME,
28664 .enablebits = { {0x41, 0x80, 0x80} },
28665 .port_ops = &it8213_port_ops,
28666diff -urNp linux-3.1.4/drivers/ide/it821x.c linux-3.1.4/drivers/ide/it821x.c
28667--- linux-3.1.4/drivers/ide/it821x.c 2011-11-11 15:19:27.000000000 -0500
28668+++ linux-3.1.4/drivers/ide/it821x.c 2011-11-16 18:39:07.000000000 -0500
28669@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_
28670 .cable_detect = it821x_cable_detect,
28671 };
28672
28673-static const struct ide_port_info it821x_chipset __devinitdata = {
28674+static const struct ide_port_info it821x_chipset __devinitconst = {
28675 .name = DRV_NAME,
28676 .init_chipset = init_chipset_it821x,
28677 .init_hwif = init_hwif_it821x,
28678diff -urNp linux-3.1.4/drivers/ide/jmicron.c linux-3.1.4/drivers/ide/jmicron.c
28679--- linux-3.1.4/drivers/ide/jmicron.c 2011-11-11 15:19:27.000000000 -0500
28680+++ linux-3.1.4/drivers/ide/jmicron.c 2011-11-16 18:39:07.000000000 -0500
28681@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron
28682 .cable_detect = jmicron_cable_detect,
28683 };
28684
28685-static const struct ide_port_info jmicron_chipset __devinitdata = {
28686+static const struct ide_port_info jmicron_chipset __devinitconst = {
28687 .name = DRV_NAME,
28688 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
28689 .port_ops = &jmicron_port_ops,
28690diff -urNp linux-3.1.4/drivers/ide/ns87415.c linux-3.1.4/drivers/ide/ns87415.c
28691--- linux-3.1.4/drivers/ide/ns87415.c 2011-11-11 15:19:27.000000000 -0500
28692+++ linux-3.1.4/drivers/ide/ns87415.c 2011-11-16 18:39:07.000000000 -0500
28693@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_
28694 .dma_sff_read_status = superio_dma_sff_read_status,
28695 };
28696
28697-static const struct ide_port_info ns87415_chipset __devinitdata = {
28698+static const struct ide_port_info ns87415_chipset __devinitconst = {
28699 .name = DRV_NAME,
28700 .init_hwif = init_hwif_ns87415,
28701 .tp_ops = &ns87415_tp_ops,
28702diff -urNp linux-3.1.4/drivers/ide/opti621.c linux-3.1.4/drivers/ide/opti621.c
28703--- linux-3.1.4/drivers/ide/opti621.c 2011-11-11 15:19:27.000000000 -0500
28704+++ linux-3.1.4/drivers/ide/opti621.c 2011-11-16 18:39:07.000000000 -0500
28705@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621
28706 .set_pio_mode = opti621_set_pio_mode,
28707 };
28708
28709-static const struct ide_port_info opti621_chipset __devinitdata = {
28710+static const struct ide_port_info opti621_chipset __devinitconst = {
28711 .name = DRV_NAME,
28712 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
28713 .port_ops = &opti621_port_ops,
28714diff -urNp linux-3.1.4/drivers/ide/pdc202xx_new.c linux-3.1.4/drivers/ide/pdc202xx_new.c
28715--- linux-3.1.4/drivers/ide/pdc202xx_new.c 2011-11-11 15:19:27.000000000 -0500
28716+++ linux-3.1.4/drivers/ide/pdc202xx_new.c 2011-11-16 18:39:07.000000000 -0500
28717@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_
28718 .udma_mask = udma, \
28719 }
28720
28721-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
28722+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
28723 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
28724 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
28725 };
28726diff -urNp linux-3.1.4/drivers/ide/pdc202xx_old.c linux-3.1.4/drivers/ide/pdc202xx_old.c
28727--- linux-3.1.4/drivers/ide/pdc202xx_old.c 2011-11-11 15:19:27.000000000 -0500
28728+++ linux-3.1.4/drivers/ide/pdc202xx_old.c 2011-11-16 18:39:07.000000000 -0500
28729@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x
28730 .max_sectors = sectors, \
28731 }
28732
28733-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
28734+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
28735 { /* 0: PDC20246 */
28736 .name = DRV_NAME,
28737 .init_chipset = init_chipset_pdc202xx,
28738diff -urNp linux-3.1.4/drivers/ide/piix.c linux-3.1.4/drivers/ide/piix.c
28739--- linux-3.1.4/drivers/ide/piix.c 2011-11-11 15:19:27.000000000 -0500
28740+++ linux-3.1.4/drivers/ide/piix.c 2011-11-16 18:39:07.000000000 -0500
28741@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_por
28742 .udma_mask = udma, \
28743 }
28744
28745-static const struct ide_port_info piix_pci_info[] __devinitdata = {
28746+static const struct ide_port_info piix_pci_info[] __devinitconst = {
28747 /* 0: MPIIX */
28748 { /*
28749 * MPIIX actually has only a single IDE channel mapped to
28750diff -urNp linux-3.1.4/drivers/ide/rz1000.c linux-3.1.4/drivers/ide/rz1000.c
28751--- linux-3.1.4/drivers/ide/rz1000.c 2011-11-11 15:19:27.000000000 -0500
28752+++ linux-3.1.4/drivers/ide/rz1000.c 2011-11-16 18:39:07.000000000 -0500
28753@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_read
28754 }
28755 }
28756
28757-static const struct ide_port_info rz1000_chipset __devinitdata = {
28758+static const struct ide_port_info rz1000_chipset __devinitconst = {
28759 .name = DRV_NAME,
28760 .host_flags = IDE_HFLAG_NO_DMA,
28761 };
28762diff -urNp linux-3.1.4/drivers/ide/sc1200.c linux-3.1.4/drivers/ide/sc1200.c
28763--- linux-3.1.4/drivers/ide/sc1200.c 2011-11-11 15:19:27.000000000 -0500
28764+++ linux-3.1.4/drivers/ide/sc1200.c 2011-11-16 18:39:07.000000000 -0500
28765@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_d
28766 .dma_sff_read_status = ide_dma_sff_read_status,
28767 };
28768
28769-static const struct ide_port_info sc1200_chipset __devinitdata = {
28770+static const struct ide_port_info sc1200_chipset __devinitconst = {
28771 .name = DRV_NAME,
28772 .port_ops = &sc1200_port_ops,
28773 .dma_ops = &sc1200_dma_ops,
28774diff -urNp linux-3.1.4/drivers/ide/scc_pata.c linux-3.1.4/drivers/ide/scc_pata.c
28775--- linux-3.1.4/drivers/ide/scc_pata.c 2011-11-11 15:19:27.000000000 -0500
28776+++ linux-3.1.4/drivers/ide/scc_pata.c 2011-11-16 18:39:07.000000000 -0500
28777@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_
28778 .dma_sff_read_status = scc_dma_sff_read_status,
28779 };
28780
28781-static const struct ide_port_info scc_chipset __devinitdata = {
28782+static const struct ide_port_info scc_chipset __devinitconst = {
28783 .name = "sccIDE",
28784 .init_iops = init_iops_scc,
28785 .init_dma = scc_init_dma,
28786diff -urNp linux-3.1.4/drivers/ide/serverworks.c linux-3.1.4/drivers/ide/serverworks.c
28787--- linux-3.1.4/drivers/ide/serverworks.c 2011-11-11 15:19:27.000000000 -0500
28788+++ linux-3.1.4/drivers/ide/serverworks.c 2011-11-16 18:39:07.000000000 -0500
28789@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_p
28790 .cable_detect = svwks_cable_detect,
28791 };
28792
28793-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
28794+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
28795 { /* 0: OSB4 */
28796 .name = DRV_NAME,
28797 .init_chipset = init_chipset_svwks,
28798diff -urNp linux-3.1.4/drivers/ide/setup-pci.c linux-3.1.4/drivers/ide/setup-pci.c
28799--- linux-3.1.4/drivers/ide/setup-pci.c 2011-11-11 15:19:27.000000000 -0500
28800+++ linux-3.1.4/drivers/ide/setup-pci.c 2011-11-16 18:40:10.000000000 -0500
28801@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
28802 int ret, i, n_ports = dev2 ? 4 : 2;
28803 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
28804
28805+ pax_track_stack();
28806+
28807 for (i = 0; i < n_ports / 2; i++) {
28808 ret = ide_setup_pci_controller(pdev[i], d, !i);
28809 if (ret < 0)
28810diff -urNp linux-3.1.4/drivers/ide/siimage.c linux-3.1.4/drivers/ide/siimage.c
28811--- linux-3.1.4/drivers/ide/siimage.c 2011-11-11 15:19:27.000000000 -0500
28812+++ linux-3.1.4/drivers/ide/siimage.c 2011-11-16 18:39:07.000000000 -0500
28813@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_
28814 .udma_mask = ATA_UDMA6, \
28815 }
28816
28817-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
28818+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
28819 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
28820 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
28821 };
28822diff -urNp linux-3.1.4/drivers/ide/sis5513.c linux-3.1.4/drivers/ide/sis5513.c
28823--- linux-3.1.4/drivers/ide/sis5513.c 2011-11-11 15:19:27.000000000 -0500
28824+++ linux-3.1.4/drivers/ide/sis5513.c 2011-11-16 18:39:07.000000000 -0500
28825@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata
28826 .cable_detect = sis_cable_detect,
28827 };
28828
28829-static const struct ide_port_info sis5513_chipset __devinitdata = {
28830+static const struct ide_port_info sis5513_chipset __devinitconst = {
28831 .name = DRV_NAME,
28832 .init_chipset = init_chipset_sis5513,
28833 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
28834diff -urNp linux-3.1.4/drivers/ide/sl82c105.c linux-3.1.4/drivers/ide/sl82c105.c
28835--- linux-3.1.4/drivers/ide/sl82c105.c 2011-11-11 15:19:27.000000000 -0500
28836+++ linux-3.1.4/drivers/ide/sl82c105.c 2011-11-16 18:39:07.000000000 -0500
28837@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105
28838 .dma_sff_read_status = ide_dma_sff_read_status,
28839 };
28840
28841-static const struct ide_port_info sl82c105_chipset __devinitdata = {
28842+static const struct ide_port_info sl82c105_chipset __devinitconst = {
28843 .name = DRV_NAME,
28844 .init_chipset = init_chipset_sl82c105,
28845 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
28846diff -urNp linux-3.1.4/drivers/ide/slc90e66.c linux-3.1.4/drivers/ide/slc90e66.c
28847--- linux-3.1.4/drivers/ide/slc90e66.c 2011-11-11 15:19:27.000000000 -0500
28848+++ linux-3.1.4/drivers/ide/slc90e66.c 2011-11-16 18:39:07.000000000 -0500
28849@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e6
28850 .cable_detect = slc90e66_cable_detect,
28851 };
28852
28853-static const struct ide_port_info slc90e66_chipset __devinitdata = {
28854+static const struct ide_port_info slc90e66_chipset __devinitconst = {
28855 .name = DRV_NAME,
28856 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
28857 .port_ops = &slc90e66_port_ops,
28858diff -urNp linux-3.1.4/drivers/ide/tc86c001.c linux-3.1.4/drivers/ide/tc86c001.c
28859--- linux-3.1.4/drivers/ide/tc86c001.c 2011-11-11 15:19:27.000000000 -0500
28860+++ linux-3.1.4/drivers/ide/tc86c001.c 2011-11-16 18:39:07.000000000 -0500
28861@@ -191,7 +191,7 @@ static const struct ide_dma_ops tc86c001
28862 .dma_sff_read_status = ide_dma_sff_read_status,
28863 };
28864
28865-static const struct ide_port_info tc86c001_chipset __devinitdata = {
28866+static const struct ide_port_info tc86c001_chipset __devinitconst = {
28867 .name = DRV_NAME,
28868 .init_hwif = init_hwif_tc86c001,
28869 .port_ops = &tc86c001_port_ops,
28870diff -urNp linux-3.1.4/drivers/ide/triflex.c linux-3.1.4/drivers/ide/triflex.c
28871--- linux-3.1.4/drivers/ide/triflex.c 2011-11-11 15:19:27.000000000 -0500
28872+++ linux-3.1.4/drivers/ide/triflex.c 2011-11-16 18:39:07.000000000 -0500
28873@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex
28874 .set_dma_mode = triflex_set_mode,
28875 };
28876
28877-static const struct ide_port_info triflex_device __devinitdata = {
28878+static const struct ide_port_info triflex_device __devinitconst = {
28879 .name = DRV_NAME,
28880 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
28881 .port_ops = &triflex_port_ops,
28882diff -urNp linux-3.1.4/drivers/ide/trm290.c linux-3.1.4/drivers/ide/trm290.c
28883--- linux-3.1.4/drivers/ide/trm290.c 2011-11-11 15:19:27.000000000 -0500
28884+++ linux-3.1.4/drivers/ide/trm290.c 2011-11-16 18:39:07.000000000 -0500
28885@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops
28886 .dma_check = trm290_dma_check,
28887 };
28888
28889-static const struct ide_port_info trm290_chipset __devinitdata = {
28890+static const struct ide_port_info trm290_chipset __devinitconst = {
28891 .name = DRV_NAME,
28892 .init_hwif = init_hwif_trm290,
28893 .tp_ops = &trm290_tp_ops,
28894diff -urNp linux-3.1.4/drivers/ide/via82cxxx.c linux-3.1.4/drivers/ide/via82cxxx.c
28895--- linux-3.1.4/drivers/ide/via82cxxx.c 2011-11-11 15:19:27.000000000 -0500
28896+++ linux-3.1.4/drivers/ide/via82cxxx.c 2011-11-16 18:39:07.000000000 -0500
28897@@ -403,7 +403,7 @@ static const struct ide_port_ops via_por
28898 .cable_detect = via82cxxx_cable_detect,
28899 };
28900
28901-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
28902+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
28903 .name = DRV_NAME,
28904 .init_chipset = init_chipset_via82cxxx,
28905 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
28906diff -urNp linux-3.1.4/drivers/infiniband/core/cm.c linux-3.1.4/drivers/infiniband/core/cm.c
28907--- linux-3.1.4/drivers/infiniband/core/cm.c 2011-11-11 15:19:27.000000000 -0500
28908+++ linux-3.1.4/drivers/infiniband/core/cm.c 2011-11-16 18:39:07.000000000 -0500
28909@@ -113,7 +113,7 @@ static char const counter_group_names[CM
28910
28911 struct cm_counter_group {
28912 struct kobject obj;
28913- atomic_long_t counter[CM_ATTR_COUNT];
28914+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
28915 };
28916
28917 struct cm_counter_attribute {
28918@@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
28919 struct ib_mad_send_buf *msg = NULL;
28920 int ret;
28921
28922- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28923+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28924 counter[CM_REQ_COUNTER]);
28925
28926 /* Quick state check to discard duplicate REQs. */
28927@@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
28928 if (!cm_id_priv)
28929 return;
28930
28931- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28932+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28933 counter[CM_REP_COUNTER]);
28934 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
28935 if (ret)
28936@@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
28937 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
28938 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
28939 spin_unlock_irq(&cm_id_priv->lock);
28940- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28941+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28942 counter[CM_RTU_COUNTER]);
28943 goto out;
28944 }
28945@@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
28946 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
28947 dreq_msg->local_comm_id);
28948 if (!cm_id_priv) {
28949- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28950+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28951 counter[CM_DREQ_COUNTER]);
28952 cm_issue_drep(work->port, work->mad_recv_wc);
28953 return -EINVAL;
28954@@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
28955 case IB_CM_MRA_REP_RCVD:
28956 break;
28957 case IB_CM_TIMEWAIT:
28958- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28959+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28960 counter[CM_DREQ_COUNTER]);
28961 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
28962 goto unlock;
28963@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
28964 cm_free_msg(msg);
28965 goto deref;
28966 case IB_CM_DREQ_RCVD:
28967- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28968+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28969 counter[CM_DREQ_COUNTER]);
28970 goto unlock;
28971 default:
28972@@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
28973 ib_modify_mad(cm_id_priv->av.port->mad_agent,
28974 cm_id_priv->msg, timeout)) {
28975 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
28976- atomic_long_inc(&work->port->
28977+ atomic_long_inc_unchecked(&work->port->
28978 counter_group[CM_RECV_DUPLICATES].
28979 counter[CM_MRA_COUNTER]);
28980 goto out;
28981@@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
28982 break;
28983 case IB_CM_MRA_REQ_RCVD:
28984 case IB_CM_MRA_REP_RCVD:
28985- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28986+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28987 counter[CM_MRA_COUNTER]);
28988 /* fall through */
28989 default:
28990@@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
28991 case IB_CM_LAP_IDLE:
28992 break;
28993 case IB_CM_MRA_LAP_SENT:
28994- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28995+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28996 counter[CM_LAP_COUNTER]);
28997 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
28998 goto unlock;
28999@@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
29000 cm_free_msg(msg);
29001 goto deref;
29002 case IB_CM_LAP_RCVD:
29003- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29004+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29005 counter[CM_LAP_COUNTER]);
29006 goto unlock;
29007 default:
29008@@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
29009 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
29010 if (cur_cm_id_priv) {
29011 spin_unlock_irq(&cm.lock);
29012- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29013+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29014 counter[CM_SIDR_REQ_COUNTER]);
29015 goto out; /* Duplicate message. */
29016 }
29017@@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
29018 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
29019 msg->retries = 1;
29020
29021- atomic_long_add(1 + msg->retries,
29022+ atomic_long_add_unchecked(1 + msg->retries,
29023 &port->counter_group[CM_XMIT].counter[attr_index]);
29024 if (msg->retries)
29025- atomic_long_add(msg->retries,
29026+ atomic_long_add_unchecked(msg->retries,
29027 &port->counter_group[CM_XMIT_RETRIES].
29028 counter[attr_index]);
29029
29030@@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
29031 }
29032
29033 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
29034- atomic_long_inc(&port->counter_group[CM_RECV].
29035+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
29036 counter[attr_id - CM_ATTR_ID_OFFSET]);
29037
29038 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
29039@@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
29040 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
29041
29042 return sprintf(buf, "%ld\n",
29043- atomic_long_read(&group->counter[cm_attr->index]));
29044+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
29045 }
29046
29047 static const struct sysfs_ops cm_counter_ops = {
29048diff -urNp linux-3.1.4/drivers/infiniband/core/fmr_pool.c linux-3.1.4/drivers/infiniband/core/fmr_pool.c
29049--- linux-3.1.4/drivers/infiniband/core/fmr_pool.c 2011-11-11 15:19:27.000000000 -0500
29050+++ linux-3.1.4/drivers/infiniband/core/fmr_pool.c 2011-11-16 18:39:07.000000000 -0500
29051@@ -97,8 +97,8 @@ struct ib_fmr_pool {
29052
29053 struct task_struct *thread;
29054
29055- atomic_t req_ser;
29056- atomic_t flush_ser;
29057+ atomic_unchecked_t req_ser;
29058+ atomic_unchecked_t flush_ser;
29059
29060 wait_queue_head_t force_wait;
29061 };
29062@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
29063 struct ib_fmr_pool *pool = pool_ptr;
29064
29065 do {
29066- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
29067+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
29068 ib_fmr_batch_release(pool);
29069
29070- atomic_inc(&pool->flush_ser);
29071+ atomic_inc_unchecked(&pool->flush_ser);
29072 wake_up_interruptible(&pool->force_wait);
29073
29074 if (pool->flush_function)
29075@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
29076 }
29077
29078 set_current_state(TASK_INTERRUPTIBLE);
29079- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
29080+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
29081 !kthread_should_stop())
29082 schedule();
29083 __set_current_state(TASK_RUNNING);
29084@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
29085 pool->dirty_watermark = params->dirty_watermark;
29086 pool->dirty_len = 0;
29087 spin_lock_init(&pool->pool_lock);
29088- atomic_set(&pool->req_ser, 0);
29089- atomic_set(&pool->flush_ser, 0);
29090+ atomic_set_unchecked(&pool->req_ser, 0);
29091+ atomic_set_unchecked(&pool->flush_ser, 0);
29092 init_waitqueue_head(&pool->force_wait);
29093
29094 pool->thread = kthread_run(ib_fmr_cleanup_thread,
29095@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
29096 }
29097 spin_unlock_irq(&pool->pool_lock);
29098
29099- serial = atomic_inc_return(&pool->req_ser);
29100+ serial = atomic_inc_return_unchecked(&pool->req_ser);
29101 wake_up_process(pool->thread);
29102
29103 if (wait_event_interruptible(pool->force_wait,
29104- atomic_read(&pool->flush_ser) - serial >= 0))
29105+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
29106 return -EINTR;
29107
29108 return 0;
29109@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
29110 } else {
29111 list_add_tail(&fmr->list, &pool->dirty_list);
29112 if (++pool->dirty_len >= pool->dirty_watermark) {
29113- atomic_inc(&pool->req_ser);
29114+ atomic_inc_unchecked(&pool->req_ser);
29115 wake_up_process(pool->thread);
29116 }
29117 }
29118diff -urNp linux-3.1.4/drivers/infiniband/hw/cxgb4/mem.c linux-3.1.4/drivers/infiniband/hw/cxgb4/mem.c
29119--- linux-3.1.4/drivers/infiniband/hw/cxgb4/mem.c 2011-11-11 15:19:27.000000000 -0500
29120+++ linux-3.1.4/drivers/infiniband/hw/cxgb4/mem.c 2011-11-16 18:39:07.000000000 -0500
29121@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
29122 int err;
29123 struct fw_ri_tpte tpt;
29124 u32 stag_idx;
29125- static atomic_t key;
29126+ static atomic_unchecked_t key;
29127
29128 if (c4iw_fatal_error(rdev))
29129 return -EIO;
29130@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
29131 &rdev->resource.tpt_fifo_lock);
29132 if (!stag_idx)
29133 return -ENOMEM;
29134- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
29135+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
29136 }
29137 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
29138 __func__, stag_state, type, pdid, stag_idx);
29139diff -urNp linux-3.1.4/drivers/infiniband/hw/ipath/ipath_fs.c linux-3.1.4/drivers/infiniband/hw/ipath/ipath_fs.c
29140--- linux-3.1.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-11-11 15:19:27.000000000 -0500
29141+++ linux-3.1.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-11-16 18:40:10.000000000 -0500
29142@@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
29143 struct infinipath_counters counters;
29144 struct ipath_devdata *dd;
29145
29146+ pax_track_stack();
29147+
29148 dd = file->f_path.dentry->d_inode->i_private;
29149 dd->ipath_f_read_counters(dd, &counters);
29150
29151diff -urNp linux-3.1.4/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.1.4/drivers/infiniband/hw/ipath/ipath_rc.c
29152--- linux-3.1.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-11-11 15:19:27.000000000 -0500
29153+++ linux-3.1.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-11-16 18:39:07.000000000 -0500
29154@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
29155 struct ib_atomic_eth *ateth;
29156 struct ipath_ack_entry *e;
29157 u64 vaddr;
29158- atomic64_t *maddr;
29159+ atomic64_unchecked_t *maddr;
29160 u64 sdata;
29161 u32 rkey;
29162 u8 next;
29163@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
29164 IB_ACCESS_REMOTE_ATOMIC)))
29165 goto nack_acc_unlck;
29166 /* Perform atomic OP and save result. */
29167- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
29168+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
29169 sdata = be64_to_cpu(ateth->swap_data);
29170 e = &qp->s_ack_queue[qp->r_head_ack_queue];
29171 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
29172- (u64) atomic64_add_return(sdata, maddr) - sdata :
29173+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
29174 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
29175 be64_to_cpu(ateth->compare_data),
29176 sdata);
29177diff -urNp linux-3.1.4/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.1.4/drivers/infiniband/hw/ipath/ipath_ruc.c
29178--- linux-3.1.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-11-11 15:19:27.000000000 -0500
29179+++ linux-3.1.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-11-16 18:39:07.000000000 -0500
29180@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
29181 unsigned long flags;
29182 struct ib_wc wc;
29183 u64 sdata;
29184- atomic64_t *maddr;
29185+ atomic64_unchecked_t *maddr;
29186 enum ib_wc_status send_status;
29187
29188 /*
29189@@ -382,11 +382,11 @@ again:
29190 IB_ACCESS_REMOTE_ATOMIC)))
29191 goto acc_err;
29192 /* Perform atomic OP and save result. */
29193- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
29194+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
29195 sdata = wqe->wr.wr.atomic.compare_add;
29196 *(u64 *) sqp->s_sge.sge.vaddr =
29197 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
29198- (u64) atomic64_add_return(sdata, maddr) - sdata :
29199+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
29200 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
29201 sdata, wqe->wr.wr.atomic.swap);
29202 goto send_comp;
29203diff -urNp linux-3.1.4/drivers/infiniband/hw/nes/nes.c linux-3.1.4/drivers/infiniband/hw/nes/nes.c
29204--- linux-3.1.4/drivers/infiniband/hw/nes/nes.c 2011-11-11 15:19:27.000000000 -0500
29205+++ linux-3.1.4/drivers/infiniband/hw/nes/nes.c 2011-11-16 18:39:07.000000000 -0500
29206@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
29207 LIST_HEAD(nes_adapter_list);
29208 static LIST_HEAD(nes_dev_list);
29209
29210-atomic_t qps_destroyed;
29211+atomic_unchecked_t qps_destroyed;
29212
29213 static unsigned int ee_flsh_adapter;
29214 static unsigned int sysfs_nonidx_addr;
29215@@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
29216 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
29217 struct nes_adapter *nesadapter = nesdev->nesadapter;
29218
29219- atomic_inc(&qps_destroyed);
29220+ atomic_inc_unchecked(&qps_destroyed);
29221
29222 /* Free the control structures */
29223
29224diff -urNp linux-3.1.4/drivers/infiniband/hw/nes/nes_cm.c linux-3.1.4/drivers/infiniband/hw/nes/nes_cm.c
29225--- linux-3.1.4/drivers/infiniband/hw/nes/nes_cm.c 2011-11-11 15:19:27.000000000 -0500
29226+++ linux-3.1.4/drivers/infiniband/hw/nes/nes_cm.c 2011-11-16 18:39:07.000000000 -0500
29227@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
29228 u32 cm_packets_retrans;
29229 u32 cm_packets_created;
29230 u32 cm_packets_received;
29231-atomic_t cm_listens_created;
29232-atomic_t cm_listens_destroyed;
29233+atomic_unchecked_t cm_listens_created;
29234+atomic_unchecked_t cm_listens_destroyed;
29235 u32 cm_backlog_drops;
29236-atomic_t cm_loopbacks;
29237-atomic_t cm_nodes_created;
29238-atomic_t cm_nodes_destroyed;
29239-atomic_t cm_accel_dropped_pkts;
29240-atomic_t cm_resets_recvd;
29241+atomic_unchecked_t cm_loopbacks;
29242+atomic_unchecked_t cm_nodes_created;
29243+atomic_unchecked_t cm_nodes_destroyed;
29244+atomic_unchecked_t cm_accel_dropped_pkts;
29245+atomic_unchecked_t cm_resets_recvd;
29246
29247 static inline int mini_cm_accelerated(struct nes_cm_core *,
29248 struct nes_cm_node *);
29249@@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
29250
29251 static struct nes_cm_core *g_cm_core;
29252
29253-atomic_t cm_connects;
29254-atomic_t cm_accepts;
29255-atomic_t cm_disconnects;
29256-atomic_t cm_closes;
29257-atomic_t cm_connecteds;
29258-atomic_t cm_connect_reqs;
29259-atomic_t cm_rejects;
29260+atomic_unchecked_t cm_connects;
29261+atomic_unchecked_t cm_accepts;
29262+atomic_unchecked_t cm_disconnects;
29263+atomic_unchecked_t cm_closes;
29264+atomic_unchecked_t cm_connecteds;
29265+atomic_unchecked_t cm_connect_reqs;
29266+atomic_unchecked_t cm_rejects;
29267
29268
29269 /**
29270@@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
29271 kfree(listener);
29272 listener = NULL;
29273 ret = 0;
29274- atomic_inc(&cm_listens_destroyed);
29275+ atomic_inc_unchecked(&cm_listens_destroyed);
29276 } else {
29277 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
29278 }
29279@@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
29280 cm_node->rem_mac);
29281
29282 add_hte_node(cm_core, cm_node);
29283- atomic_inc(&cm_nodes_created);
29284+ atomic_inc_unchecked(&cm_nodes_created);
29285
29286 return cm_node;
29287 }
29288@@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
29289 }
29290
29291 atomic_dec(&cm_core->node_cnt);
29292- atomic_inc(&cm_nodes_destroyed);
29293+ atomic_inc_unchecked(&cm_nodes_destroyed);
29294 nesqp = cm_node->nesqp;
29295 if (nesqp) {
29296 nesqp->cm_node = NULL;
29297@@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
29298
29299 static void drop_packet(struct sk_buff *skb)
29300 {
29301- atomic_inc(&cm_accel_dropped_pkts);
29302+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
29303 dev_kfree_skb_any(skb);
29304 }
29305
29306@@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
29307 {
29308
29309 int reset = 0; /* whether to send reset in case of err.. */
29310- atomic_inc(&cm_resets_recvd);
29311+ atomic_inc_unchecked(&cm_resets_recvd);
29312 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
29313 " refcnt=%d\n", cm_node, cm_node->state,
29314 atomic_read(&cm_node->ref_count));
29315@@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
29316 rem_ref_cm_node(cm_node->cm_core, cm_node);
29317 return NULL;
29318 }
29319- atomic_inc(&cm_loopbacks);
29320+ atomic_inc_unchecked(&cm_loopbacks);
29321 loopbackremotenode->loopbackpartner = cm_node;
29322 loopbackremotenode->tcp_cntxt.rcv_wscale =
29323 NES_CM_DEFAULT_RCV_WND_SCALE;
29324@@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
29325 add_ref_cm_node(cm_node);
29326 } else if (cm_node->state == NES_CM_STATE_TSA) {
29327 rem_ref_cm_node(cm_core, cm_node);
29328- atomic_inc(&cm_accel_dropped_pkts);
29329+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
29330 dev_kfree_skb_any(skb);
29331 break;
29332 }
29333@@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
29334
29335 if ((cm_id) && (cm_id->event_handler)) {
29336 if (issue_disconn) {
29337- atomic_inc(&cm_disconnects);
29338+ atomic_inc_unchecked(&cm_disconnects);
29339 cm_event.event = IW_CM_EVENT_DISCONNECT;
29340 cm_event.status = disconn_status;
29341 cm_event.local_addr = cm_id->local_addr;
29342@@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
29343 }
29344
29345 if (issue_close) {
29346- atomic_inc(&cm_closes);
29347+ atomic_inc_unchecked(&cm_closes);
29348 nes_disconnect(nesqp, 1);
29349
29350 cm_id->provider_data = nesqp;
29351@@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
29352
29353 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
29354 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
29355- atomic_inc(&cm_accepts);
29356+ atomic_inc_unchecked(&cm_accepts);
29357
29358 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
29359 netdev_refcnt_read(nesvnic->netdev));
29360@@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
29361
29362 struct nes_cm_core *cm_core;
29363
29364- atomic_inc(&cm_rejects);
29365+ atomic_inc_unchecked(&cm_rejects);
29366 cm_node = (struct nes_cm_node *) cm_id->provider_data;
29367 loopback = cm_node->loopbackpartner;
29368 cm_core = cm_node->cm_core;
29369@@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
29370 ntohl(cm_id->local_addr.sin_addr.s_addr),
29371 ntohs(cm_id->local_addr.sin_port));
29372
29373- atomic_inc(&cm_connects);
29374+ atomic_inc_unchecked(&cm_connects);
29375 nesqp->active_conn = 1;
29376
29377 /* cache the cm_id in the qp */
29378@@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
29379 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
29380 return err;
29381 }
29382- atomic_inc(&cm_listens_created);
29383+ atomic_inc_unchecked(&cm_listens_created);
29384 }
29385
29386 cm_id->add_ref(cm_id);
29387@@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
29388 if (nesqp->destroyed) {
29389 return;
29390 }
29391- atomic_inc(&cm_connecteds);
29392+ atomic_inc_unchecked(&cm_connecteds);
29393 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
29394 " local port 0x%04X. jiffies = %lu.\n",
29395 nesqp->hwqp.qp_id,
29396@@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
29397
29398 cm_id->add_ref(cm_id);
29399 ret = cm_id->event_handler(cm_id, &cm_event);
29400- atomic_inc(&cm_closes);
29401+ atomic_inc_unchecked(&cm_closes);
29402 cm_event.event = IW_CM_EVENT_CLOSE;
29403 cm_event.status = 0;
29404 cm_event.provider_data = cm_id->provider_data;
29405@@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
29406 return;
29407 cm_id = cm_node->cm_id;
29408
29409- atomic_inc(&cm_connect_reqs);
29410+ atomic_inc_unchecked(&cm_connect_reqs);
29411 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
29412 cm_node, cm_id, jiffies);
29413
29414@@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
29415 return;
29416 cm_id = cm_node->cm_id;
29417
29418- atomic_inc(&cm_connect_reqs);
29419+ atomic_inc_unchecked(&cm_connect_reqs);
29420 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
29421 cm_node, cm_id, jiffies);
29422
29423diff -urNp linux-3.1.4/drivers/infiniband/hw/nes/nes.h linux-3.1.4/drivers/infiniband/hw/nes/nes.h
29424--- linux-3.1.4/drivers/infiniband/hw/nes/nes.h 2011-11-11 15:19:27.000000000 -0500
29425+++ linux-3.1.4/drivers/infiniband/hw/nes/nes.h 2011-11-16 18:39:07.000000000 -0500
29426@@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
29427 extern unsigned int wqm_quanta;
29428 extern struct list_head nes_adapter_list;
29429
29430-extern atomic_t cm_connects;
29431-extern atomic_t cm_accepts;
29432-extern atomic_t cm_disconnects;
29433-extern atomic_t cm_closes;
29434-extern atomic_t cm_connecteds;
29435-extern atomic_t cm_connect_reqs;
29436-extern atomic_t cm_rejects;
29437-extern atomic_t mod_qp_timouts;
29438-extern atomic_t qps_created;
29439-extern atomic_t qps_destroyed;
29440-extern atomic_t sw_qps_destroyed;
29441+extern atomic_unchecked_t cm_connects;
29442+extern atomic_unchecked_t cm_accepts;
29443+extern atomic_unchecked_t cm_disconnects;
29444+extern atomic_unchecked_t cm_closes;
29445+extern atomic_unchecked_t cm_connecteds;
29446+extern atomic_unchecked_t cm_connect_reqs;
29447+extern atomic_unchecked_t cm_rejects;
29448+extern atomic_unchecked_t mod_qp_timouts;
29449+extern atomic_unchecked_t qps_created;
29450+extern atomic_unchecked_t qps_destroyed;
29451+extern atomic_unchecked_t sw_qps_destroyed;
29452 extern u32 mh_detected;
29453 extern u32 mh_pauses_sent;
29454 extern u32 cm_packets_sent;
29455@@ -194,14 +194,14 @@ extern u32 cm_packets_created;
29456 extern u32 cm_packets_received;
29457 extern u32 cm_packets_dropped;
29458 extern u32 cm_packets_retrans;
29459-extern atomic_t cm_listens_created;
29460-extern atomic_t cm_listens_destroyed;
29461+extern atomic_unchecked_t cm_listens_created;
29462+extern atomic_unchecked_t cm_listens_destroyed;
29463 extern u32 cm_backlog_drops;
29464-extern atomic_t cm_loopbacks;
29465-extern atomic_t cm_nodes_created;
29466-extern atomic_t cm_nodes_destroyed;
29467-extern atomic_t cm_accel_dropped_pkts;
29468-extern atomic_t cm_resets_recvd;
29469+extern atomic_unchecked_t cm_loopbacks;
29470+extern atomic_unchecked_t cm_nodes_created;
29471+extern atomic_unchecked_t cm_nodes_destroyed;
29472+extern atomic_unchecked_t cm_accel_dropped_pkts;
29473+extern atomic_unchecked_t cm_resets_recvd;
29474
29475 extern u32 int_mod_timer_init;
29476 extern u32 int_mod_cq_depth_256;
29477diff -urNp linux-3.1.4/drivers/infiniband/hw/nes/nes_nic.c linux-3.1.4/drivers/infiniband/hw/nes/nes_nic.c
29478--- linux-3.1.4/drivers/infiniband/hw/nes/nes_nic.c 2011-11-11 15:19:27.000000000 -0500
29479+++ linux-3.1.4/drivers/infiniband/hw/nes/nes_nic.c 2011-11-16 18:39:07.000000000 -0500
29480@@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats
29481 target_stat_values[++index] = mh_detected;
29482 target_stat_values[++index] = mh_pauses_sent;
29483 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
29484- target_stat_values[++index] = atomic_read(&cm_connects);
29485- target_stat_values[++index] = atomic_read(&cm_accepts);
29486- target_stat_values[++index] = atomic_read(&cm_disconnects);
29487- target_stat_values[++index] = atomic_read(&cm_connecteds);
29488- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
29489- target_stat_values[++index] = atomic_read(&cm_rejects);
29490- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
29491- target_stat_values[++index] = atomic_read(&qps_created);
29492- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
29493- target_stat_values[++index] = atomic_read(&qps_destroyed);
29494- target_stat_values[++index] = atomic_read(&cm_closes);
29495+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
29496+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
29497+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
29498+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
29499+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
29500+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
29501+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
29502+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
29503+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
29504+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
29505+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
29506 target_stat_values[++index] = cm_packets_sent;
29507 target_stat_values[++index] = cm_packets_bounced;
29508 target_stat_values[++index] = cm_packets_created;
29509 target_stat_values[++index] = cm_packets_received;
29510 target_stat_values[++index] = cm_packets_dropped;
29511 target_stat_values[++index] = cm_packets_retrans;
29512- target_stat_values[++index] = atomic_read(&cm_listens_created);
29513- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
29514+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
29515+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
29516 target_stat_values[++index] = cm_backlog_drops;
29517- target_stat_values[++index] = atomic_read(&cm_loopbacks);
29518- target_stat_values[++index] = atomic_read(&cm_nodes_created);
29519- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
29520- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
29521- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
29522+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
29523+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
29524+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
29525+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
29526+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
29527 target_stat_values[++index] = nesadapter->free_4kpbl;
29528 target_stat_values[++index] = nesadapter->free_256pbl;
29529 target_stat_values[++index] = int_mod_timer_init;
29530diff -urNp linux-3.1.4/drivers/infiniband/hw/nes/nes_verbs.c linux-3.1.4/drivers/infiniband/hw/nes/nes_verbs.c
29531--- linux-3.1.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-11-11 15:19:27.000000000 -0500
29532+++ linux-3.1.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-11-16 18:39:07.000000000 -0500
29533@@ -46,9 +46,9 @@
29534
29535 #include <rdma/ib_umem.h>
29536
29537-atomic_t mod_qp_timouts;
29538-atomic_t qps_created;
29539-atomic_t sw_qps_destroyed;
29540+atomic_unchecked_t mod_qp_timouts;
29541+atomic_unchecked_t qps_created;
29542+atomic_unchecked_t sw_qps_destroyed;
29543
29544 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
29545
29546@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struc
29547 if (init_attr->create_flags)
29548 return ERR_PTR(-EINVAL);
29549
29550- atomic_inc(&qps_created);
29551+ atomic_inc_unchecked(&qps_created);
29552 switch (init_attr->qp_type) {
29553 case IB_QPT_RC:
29554 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
29555@@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *
29556 struct iw_cm_event cm_event;
29557 int ret;
29558
29559- atomic_inc(&sw_qps_destroyed);
29560+ atomic_inc_unchecked(&sw_qps_destroyed);
29561 nesqp->destroyed = 1;
29562
29563 /* Blow away the connection if it exists. */
29564diff -urNp linux-3.1.4/drivers/infiniband/hw/qib/qib.h linux-3.1.4/drivers/infiniband/hw/qib/qib.h
29565--- linux-3.1.4/drivers/infiniband/hw/qib/qib.h 2011-11-11 15:19:27.000000000 -0500
29566+++ linux-3.1.4/drivers/infiniband/hw/qib/qib.h 2011-11-16 18:39:07.000000000 -0500
29567@@ -51,6 +51,7 @@
29568 #include <linux/completion.h>
29569 #include <linux/kref.h>
29570 #include <linux/sched.h>
29571+#include <linux/slab.h>
29572
29573 #include "qib_common.h"
29574 #include "qib_verbs.h"
29575diff -urNp linux-3.1.4/drivers/input/gameport/gameport.c linux-3.1.4/drivers/input/gameport/gameport.c
29576--- linux-3.1.4/drivers/input/gameport/gameport.c 2011-11-11 15:19:27.000000000 -0500
29577+++ linux-3.1.4/drivers/input/gameport/gameport.c 2011-11-16 18:39:07.000000000 -0500
29578@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
29579 */
29580 static void gameport_init_port(struct gameport *gameport)
29581 {
29582- static atomic_t gameport_no = ATOMIC_INIT(0);
29583+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
29584
29585 __module_get(THIS_MODULE);
29586
29587 mutex_init(&gameport->drv_mutex);
29588 device_initialize(&gameport->dev);
29589 dev_set_name(&gameport->dev, "gameport%lu",
29590- (unsigned long)atomic_inc_return(&gameport_no) - 1);
29591+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
29592 gameport->dev.bus = &gameport_bus;
29593 gameport->dev.release = gameport_release_port;
29594 if (gameport->parent)
29595diff -urNp linux-3.1.4/drivers/input/input.c linux-3.1.4/drivers/input/input.c
29596--- linux-3.1.4/drivers/input/input.c 2011-11-11 15:19:27.000000000 -0500
29597+++ linux-3.1.4/drivers/input/input.c 2011-11-16 18:39:07.000000000 -0500
29598@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struc
29599 */
29600 int input_register_device(struct input_dev *dev)
29601 {
29602- static atomic_t input_no = ATOMIC_INIT(0);
29603+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
29604 struct input_handler *handler;
29605 const char *path;
29606 int error;
29607@@ -1851,7 +1851,7 @@ int input_register_device(struct input_d
29608 dev->setkeycode = input_default_setkeycode;
29609
29610 dev_set_name(&dev->dev, "input%ld",
29611- (unsigned long) atomic_inc_return(&input_no) - 1);
29612+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
29613
29614 error = device_add(&dev->dev);
29615 if (error)
29616diff -urNp linux-3.1.4/drivers/input/joystick/sidewinder.c linux-3.1.4/drivers/input/joystick/sidewinder.c
29617--- linux-3.1.4/drivers/input/joystick/sidewinder.c 2011-11-11 15:19:27.000000000 -0500
29618+++ linux-3.1.4/drivers/input/joystick/sidewinder.c 2011-11-16 18:40:10.000000000 -0500
29619@@ -30,6 +30,7 @@
29620 #include <linux/kernel.h>
29621 #include <linux/module.h>
29622 #include <linux/slab.h>
29623+#include <linux/sched.h>
29624 #include <linux/init.h>
29625 #include <linux/input.h>
29626 #include <linux/gameport.h>
29627@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
29628 unsigned char buf[SW_LENGTH];
29629 int i;
29630
29631+ pax_track_stack();
29632+
29633 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
29634
29635 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
29636diff -urNp linux-3.1.4/drivers/input/joystick/xpad.c linux-3.1.4/drivers/input/joystick/xpad.c
29637--- linux-3.1.4/drivers/input/joystick/xpad.c 2011-11-11 15:19:27.000000000 -0500
29638+++ linux-3.1.4/drivers/input/joystick/xpad.c 2011-11-16 18:39:07.000000000 -0500
29639@@ -710,7 +710,7 @@ static void xpad_led_set(struct led_clas
29640
29641 static int xpad_led_probe(struct usb_xpad *xpad)
29642 {
29643- static atomic_t led_seq = ATOMIC_INIT(0);
29644+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
29645 long led_no;
29646 struct xpad_led *led;
29647 struct led_classdev *led_cdev;
29648@@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpa
29649 if (!led)
29650 return -ENOMEM;
29651
29652- led_no = (long)atomic_inc_return(&led_seq) - 1;
29653+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
29654
29655 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
29656 led->xpad = xpad;
29657diff -urNp linux-3.1.4/drivers/input/mousedev.c linux-3.1.4/drivers/input/mousedev.c
29658--- linux-3.1.4/drivers/input/mousedev.c 2011-11-11 15:19:27.000000000 -0500
29659+++ linux-3.1.4/drivers/input/mousedev.c 2011-11-16 18:39:07.000000000 -0500
29660@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file
29661
29662 spin_unlock_irq(&client->packet_lock);
29663
29664- if (copy_to_user(buffer, data, count))
29665+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
29666 return -EFAULT;
29667
29668 return count;
29669diff -urNp linux-3.1.4/drivers/input/serio/serio.c linux-3.1.4/drivers/input/serio/serio.c
29670--- linux-3.1.4/drivers/input/serio/serio.c 2011-11-11 15:19:27.000000000 -0500
29671+++ linux-3.1.4/drivers/input/serio/serio.c 2011-11-16 18:39:07.000000000 -0500
29672@@ -497,7 +497,7 @@ static void serio_release_port(struct de
29673 */
29674 static void serio_init_port(struct serio *serio)
29675 {
29676- static atomic_t serio_no = ATOMIC_INIT(0);
29677+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
29678
29679 __module_get(THIS_MODULE);
29680
29681@@ -508,7 +508,7 @@ static void serio_init_port(struct serio
29682 mutex_init(&serio->drv_mutex);
29683 device_initialize(&serio->dev);
29684 dev_set_name(&serio->dev, "serio%ld",
29685- (long)atomic_inc_return(&serio_no) - 1);
29686+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
29687 serio->dev.bus = &serio_bus;
29688 serio->dev.release = serio_release_port;
29689 serio->dev.groups = serio_device_attr_groups;
29690diff -urNp linux-3.1.4/drivers/isdn/capi/capi.c linux-3.1.4/drivers/isdn/capi/capi.c
29691--- linux-3.1.4/drivers/isdn/capi/capi.c 2011-11-11 15:19:27.000000000 -0500
29692+++ linux-3.1.4/drivers/isdn/capi/capi.c 2011-11-16 18:39:07.000000000 -0500
29693@@ -83,8 +83,8 @@ struct capiminor {
29694
29695 struct capi20_appl *ap;
29696 u32 ncci;
29697- atomic_t datahandle;
29698- atomic_t msgid;
29699+ atomic_unchecked_t datahandle;
29700+ atomic_unchecked_t msgid;
29701
29702 struct tty_port port;
29703 int ttyinstop;
29704@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *m
29705 capimsg_setu16(s, 2, mp->ap->applid);
29706 capimsg_setu8 (s, 4, CAPI_DATA_B3);
29707 capimsg_setu8 (s, 5, CAPI_RESP);
29708- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
29709+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
29710 capimsg_setu32(s, 8, mp->ncci);
29711 capimsg_setu16(s, 12, datahandle);
29712 }
29713@@ -518,14 +518,14 @@ static void handle_minor_send(struct cap
29714 mp->outbytes -= len;
29715 spin_unlock_bh(&mp->outlock);
29716
29717- datahandle = atomic_inc_return(&mp->datahandle);
29718+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
29719 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
29720 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
29721 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
29722 capimsg_setu16(skb->data, 2, mp->ap->applid);
29723 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
29724 capimsg_setu8 (skb->data, 5, CAPI_REQ);
29725- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
29726+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
29727 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
29728 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
29729 capimsg_setu16(skb->data, 16, len); /* Data length */
29730diff -urNp linux-3.1.4/drivers/isdn/gigaset/common.c linux-3.1.4/drivers/isdn/gigaset/common.c
29731--- linux-3.1.4/drivers/isdn/gigaset/common.c 2011-11-11 15:19:27.000000000 -0500
29732+++ linux-3.1.4/drivers/isdn/gigaset/common.c 2011-11-16 18:39:07.000000000 -0500
29733@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
29734 cs->commands_pending = 0;
29735 cs->cur_at_seq = 0;
29736 cs->gotfwver = -1;
29737- cs->open_count = 0;
29738+ local_set(&cs->open_count, 0);
29739 cs->dev = NULL;
29740 cs->tty = NULL;
29741 cs->tty_dev = NULL;
29742diff -urNp linux-3.1.4/drivers/isdn/gigaset/gigaset.h linux-3.1.4/drivers/isdn/gigaset/gigaset.h
29743--- linux-3.1.4/drivers/isdn/gigaset/gigaset.h 2011-11-11 15:19:27.000000000 -0500
29744+++ linux-3.1.4/drivers/isdn/gigaset/gigaset.h 2011-11-16 18:39:07.000000000 -0500
29745@@ -35,6 +35,7 @@
29746 #include <linux/tty_driver.h>
29747 #include <linux/list.h>
29748 #include <linux/atomic.h>
29749+#include <asm/local.h>
29750
29751 #define GIG_VERSION {0, 5, 0, 0}
29752 #define GIG_COMPAT {0, 4, 0, 0}
29753@@ -433,7 +434,7 @@ struct cardstate {
29754 spinlock_t cmdlock;
29755 unsigned curlen, cmdbytes;
29756
29757- unsigned open_count;
29758+ local_t open_count;
29759 struct tty_struct *tty;
29760 struct tasklet_struct if_wake_tasklet;
29761 unsigned control_state;
29762diff -urNp linux-3.1.4/drivers/isdn/gigaset/interface.c linux-3.1.4/drivers/isdn/gigaset/interface.c
29763--- linux-3.1.4/drivers/isdn/gigaset/interface.c 2011-11-11 15:19:27.000000000 -0500
29764+++ linux-3.1.4/drivers/isdn/gigaset/interface.c 2011-11-16 18:39:07.000000000 -0500
29765@@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tt
29766 }
29767 tty->driver_data = cs;
29768
29769- ++cs->open_count;
29770-
29771- if (cs->open_count == 1) {
29772+ if (local_inc_return(&cs->open_count) == 1) {
29773 spin_lock_irqsave(&cs->lock, flags);
29774 cs->tty = tty;
29775 spin_unlock_irqrestore(&cs->lock, flags);
29776@@ -192,10 +190,10 @@ static void if_close(struct tty_struct *
29777
29778 if (!cs->connected)
29779 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29780- else if (!cs->open_count)
29781+ else if (!local_read(&cs->open_count))
29782 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29783 else {
29784- if (!--cs->open_count) {
29785+ if (!local_dec_return(&cs->open_count)) {
29786 spin_lock_irqsave(&cs->lock, flags);
29787 cs->tty = NULL;
29788 spin_unlock_irqrestore(&cs->lock, flags);
29789@@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *t
29790 if (!cs->connected) {
29791 gig_dbg(DEBUG_IF, "not connected");
29792 retval = -ENODEV;
29793- } else if (!cs->open_count)
29794+ } else if (!local_read(&cs->open_count))
29795 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29796 else {
29797 retval = 0;
29798@@ -360,7 +358,7 @@ static int if_write(struct tty_struct *t
29799 retval = -ENODEV;
29800 goto done;
29801 }
29802- if (!cs->open_count) {
29803+ if (!local_read(&cs->open_count)) {
29804 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29805 retval = -ENODEV;
29806 goto done;
29807@@ -413,7 +411,7 @@ static int if_write_room(struct tty_stru
29808 if (!cs->connected) {
29809 gig_dbg(DEBUG_IF, "not connected");
29810 retval = -ENODEV;
29811- } else if (!cs->open_count)
29812+ } else if (!local_read(&cs->open_count))
29813 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29814 else if (cs->mstate != MS_LOCKED) {
29815 dev_warn(cs->dev, "can't write to unlocked device\n");
29816@@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty
29817
29818 if (!cs->connected)
29819 gig_dbg(DEBUG_IF, "not connected");
29820- else if (!cs->open_count)
29821+ else if (!local_read(&cs->open_count))
29822 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29823 else if (cs->mstate != MS_LOCKED)
29824 dev_warn(cs->dev, "can't write to unlocked device\n");
29825@@ -471,7 +469,7 @@ static void if_throttle(struct tty_struc
29826
29827 if (!cs->connected)
29828 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29829- else if (!cs->open_count)
29830+ else if (!local_read(&cs->open_count))
29831 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29832 else
29833 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
29834@@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_str
29835
29836 if (!cs->connected)
29837 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29838- else if (!cs->open_count)
29839+ else if (!local_read(&cs->open_count))
29840 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29841 else
29842 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
29843@@ -526,7 +524,7 @@ static void if_set_termios(struct tty_st
29844 goto out;
29845 }
29846
29847- if (!cs->open_count) {
29848+ if (!local_read(&cs->open_count)) {
29849 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29850 goto out;
29851 }
29852diff -urNp linux-3.1.4/drivers/isdn/hardware/avm/b1.c linux-3.1.4/drivers/isdn/hardware/avm/b1.c
29853--- linux-3.1.4/drivers/isdn/hardware/avm/b1.c 2011-11-11 15:19:27.000000000 -0500
29854+++ linux-3.1.4/drivers/isdn/hardware/avm/b1.c 2011-11-16 18:39:07.000000000 -0500
29855@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
29856 }
29857 if (left) {
29858 if (t4file->user) {
29859- if (copy_from_user(buf, dp, left))
29860+ if (left > sizeof buf || copy_from_user(buf, dp, left))
29861 return -EFAULT;
29862 } else {
29863 memcpy(buf, dp, left);
29864@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
29865 }
29866 if (left) {
29867 if (config->user) {
29868- if (copy_from_user(buf, dp, left))
29869+ if (left > sizeof buf || copy_from_user(buf, dp, left))
29870 return -EFAULT;
29871 } else {
29872 memcpy(buf, dp, left);
29873diff -urNp linux-3.1.4/drivers/isdn/hardware/eicon/capidtmf.c linux-3.1.4/drivers/isdn/hardware/eicon/capidtmf.c
29874--- linux-3.1.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-11-11 15:19:27.000000000 -0500
29875+++ linux-3.1.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-11-16 18:40:10.000000000 -0500
29876@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
29877 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
29878 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
29879
29880+ pax_track_stack();
29881
29882 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
29883 {
29884diff -urNp linux-3.1.4/drivers/isdn/hardware/eicon/capifunc.c linux-3.1.4/drivers/isdn/hardware/eicon/capifunc.c
29885--- linux-3.1.4/drivers/isdn/hardware/eicon/capifunc.c 2011-11-11 15:19:27.000000000 -0500
29886+++ linux-3.1.4/drivers/isdn/hardware/eicon/capifunc.c 2011-11-16 18:40:10.000000000 -0500
29887@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
29888 IDI_SYNC_REQ req;
29889 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29890
29891+ pax_track_stack();
29892+
29893 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29894
29895 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29896diff -urNp linux-3.1.4/drivers/isdn/hardware/eicon/diddfunc.c linux-3.1.4/drivers/isdn/hardware/eicon/diddfunc.c
29897--- linux-3.1.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-11-11 15:19:27.000000000 -0500
29898+++ linux-3.1.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-11-16 18:40:10.000000000 -0500
29899@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
29900 IDI_SYNC_REQ req;
29901 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29902
29903+ pax_track_stack();
29904+
29905 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29906
29907 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29908diff -urNp linux-3.1.4/drivers/isdn/hardware/eicon/divasfunc.c linux-3.1.4/drivers/isdn/hardware/eicon/divasfunc.c
29909--- linux-3.1.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-11-11 15:19:27.000000000 -0500
29910+++ linux-3.1.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-11-16 18:40:10.000000000 -0500
29911@@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_di
29912 IDI_SYNC_REQ req;
29913 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29914
29915+ pax_track_stack();
29916+
29917 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29918
29919 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29920diff -urNp linux-3.1.4/drivers/isdn/hardware/eicon/divasync.h linux-3.1.4/drivers/isdn/hardware/eicon/divasync.h
29921--- linux-3.1.4/drivers/isdn/hardware/eicon/divasync.h 2011-11-11 15:19:27.000000000 -0500
29922+++ linux-3.1.4/drivers/isdn/hardware/eicon/divasync.h 2011-11-16 18:39:07.000000000 -0500
29923@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
29924 } diva_didd_add_adapter_t;
29925 typedef struct _diva_didd_remove_adapter {
29926 IDI_CALL p_request;
29927-} diva_didd_remove_adapter_t;
29928+} __no_const diva_didd_remove_adapter_t;
29929 typedef struct _diva_didd_read_adapter_array {
29930 void * buffer;
29931 dword length;
29932diff -urNp linux-3.1.4/drivers/isdn/hardware/eicon/idifunc.c linux-3.1.4/drivers/isdn/hardware/eicon/idifunc.c
29933--- linux-3.1.4/drivers/isdn/hardware/eicon/idifunc.c 2011-11-11 15:19:27.000000000 -0500
29934+++ linux-3.1.4/drivers/isdn/hardware/eicon/idifunc.c 2011-11-16 18:40:10.000000000 -0500
29935@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
29936 IDI_SYNC_REQ req;
29937 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29938
29939+ pax_track_stack();
29940+
29941 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29942
29943 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29944diff -urNp linux-3.1.4/drivers/isdn/hardware/eicon/message.c linux-3.1.4/drivers/isdn/hardware/eicon/message.c
29945--- linux-3.1.4/drivers/isdn/hardware/eicon/message.c 2011-11-11 15:19:27.000000000 -0500
29946+++ linux-3.1.4/drivers/isdn/hardware/eicon/message.c 2011-11-16 18:40:10.000000000 -0500
29947@@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
29948 dword d;
29949 word w;
29950
29951+ pax_track_stack();
29952+
29953 a = plci->adapter;
29954 Id = ((word)plci->Id<<8)|a->Id;
29955 PUT_WORD(&SS_Ind[4],0x0000);
29956@@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE
29957 word j, n, w;
29958 dword d;
29959
29960+ pax_track_stack();
29961+
29962
29963 for(i=0;i<8;i++) bp_parms[i].length = 0;
29964 for(i=0;i<2;i++) global_config[i].length = 0;
29965@@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARS
29966 const byte llc3[] = {4,3,2,2,6,6,0};
29967 const byte header[] = {0,2,3,3,0,0,0};
29968
29969+ pax_track_stack();
29970+
29971 for(i=0;i<8;i++) bp_parms[i].length = 0;
29972 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
29973 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
29974@@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI
29975 word appl_number_group_type[MAX_APPL];
29976 PLCI *auxplci;
29977
29978+ pax_track_stack();
29979+
29980 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
29981
29982 if(!a->group_optimization_enabled)
29983diff -urNp linux-3.1.4/drivers/isdn/hardware/eicon/mntfunc.c linux-3.1.4/drivers/isdn/hardware/eicon/mntfunc.c
29984--- linux-3.1.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-11-11 15:19:27.000000000 -0500
29985+++ linux-3.1.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-11-16 18:40:10.000000000 -0500
29986@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
29987 IDI_SYNC_REQ req;
29988 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29989
29990+ pax_track_stack();
29991+
29992 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29993
29994 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29995diff -urNp linux-3.1.4/drivers/isdn/hardware/eicon/xdi_adapter.h linux-3.1.4/drivers/isdn/hardware/eicon/xdi_adapter.h
29996--- linux-3.1.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-11-11 15:19:27.000000000 -0500
29997+++ linux-3.1.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-11-16 18:39:07.000000000 -0500
29998@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
29999 typedef struct _diva_os_idi_adapter_interface {
30000 diva_init_card_proc_t cleanup_adapter_proc;
30001 diva_cmd_card_proc_t cmd_proc;
30002-} diva_os_idi_adapter_interface_t;
30003+} __no_const diva_os_idi_adapter_interface_t;
30004
30005 typedef struct _diva_os_xdi_adapter {
30006 struct list_head link;
30007diff -urNp linux-3.1.4/drivers/isdn/i4l/isdn_common.c linux-3.1.4/drivers/isdn/i4l/isdn_common.c
30008--- linux-3.1.4/drivers/isdn/i4l/isdn_common.c 2011-11-11 15:19:27.000000000 -0500
30009+++ linux-3.1.4/drivers/isdn/i4l/isdn_common.c 2011-11-16 18:40:10.000000000 -0500
30010@@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd,
30011 } iocpar;
30012 void __user *argp = (void __user *)arg;
30013
30014+ pax_track_stack();
30015+
30016 #define name iocpar.name
30017 #define bname iocpar.bname
30018 #define iocts iocpar.iocts
30019diff -urNp linux-3.1.4/drivers/isdn/icn/icn.c linux-3.1.4/drivers/isdn/icn/icn.c
30020--- linux-3.1.4/drivers/isdn/icn/icn.c 2011-11-11 15:19:27.000000000 -0500
30021+++ linux-3.1.4/drivers/isdn/icn/icn.c 2011-11-16 18:39:07.000000000 -0500
30022@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
30023 if (count > len)
30024 count = len;
30025 if (user) {
30026- if (copy_from_user(msg, buf, count))
30027+ if (count > sizeof msg || copy_from_user(msg, buf, count))
30028 return -EFAULT;
30029 } else
30030 memcpy(msg, buf, count);
30031diff -urNp linux-3.1.4/drivers/lguest/core.c linux-3.1.4/drivers/lguest/core.c
30032--- linux-3.1.4/drivers/lguest/core.c 2011-11-11 15:19:27.000000000 -0500
30033+++ linux-3.1.4/drivers/lguest/core.c 2011-11-16 18:39:07.000000000 -0500
30034@@ -92,9 +92,17 @@ static __init int map_switcher(void)
30035 * it's worked so far. The end address needs +1 because __get_vm_area
30036 * allocates an extra guard page, so we need space for that.
30037 */
30038+
30039+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30040+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30041+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
30042+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30043+#else
30044 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30045 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
30046 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30047+#endif
30048+
30049 if (!switcher_vma) {
30050 err = -ENOMEM;
30051 printk("lguest: could not map switcher pages high\n");
30052@@ -119,7 +127,7 @@ static __init int map_switcher(void)
30053 * Now the Switcher is mapped at the right address, we can't fail!
30054 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
30055 */
30056- memcpy(switcher_vma->addr, start_switcher_text,
30057+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
30058 end_switcher_text - start_switcher_text);
30059
30060 printk(KERN_INFO "lguest: mapped switcher at %p\n",
30061diff -urNp linux-3.1.4/drivers/lguest/x86/core.c linux-3.1.4/drivers/lguest/x86/core.c
30062--- linux-3.1.4/drivers/lguest/x86/core.c 2011-11-11 15:19:27.000000000 -0500
30063+++ linux-3.1.4/drivers/lguest/x86/core.c 2011-11-16 18:39:07.000000000 -0500
30064@@ -59,7 +59,7 @@ static struct {
30065 /* Offset from where switcher.S was compiled to where we've copied it */
30066 static unsigned long switcher_offset(void)
30067 {
30068- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
30069+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
30070 }
30071
30072 /* This cpu's struct lguest_pages. */
30073@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
30074 * These copies are pretty cheap, so we do them unconditionally: */
30075 /* Save the current Host top-level page directory.
30076 */
30077+
30078+#ifdef CONFIG_PAX_PER_CPU_PGD
30079+ pages->state.host_cr3 = read_cr3();
30080+#else
30081 pages->state.host_cr3 = __pa(current->mm->pgd);
30082+#endif
30083+
30084 /*
30085 * Set up the Guest's page tables to see this CPU's pages (and no
30086 * other CPU's pages).
30087@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
30088 * compiled-in switcher code and the high-mapped copy we just made.
30089 */
30090 for (i = 0; i < IDT_ENTRIES; i++)
30091- default_idt_entries[i] += switcher_offset();
30092+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
30093
30094 /*
30095 * Set up the Switcher's per-cpu areas.
30096@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
30097 * it will be undisturbed when we switch. To change %cs and jump we
30098 * need this structure to feed to Intel's "lcall" instruction.
30099 */
30100- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
30101+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
30102 lguest_entry.segment = LGUEST_CS;
30103
30104 /*
30105diff -urNp linux-3.1.4/drivers/lguest/x86/switcher_32.S linux-3.1.4/drivers/lguest/x86/switcher_32.S
30106--- linux-3.1.4/drivers/lguest/x86/switcher_32.S 2011-11-11 15:19:27.000000000 -0500
30107+++ linux-3.1.4/drivers/lguest/x86/switcher_32.S 2011-11-16 18:39:07.000000000 -0500
30108@@ -87,6 +87,7 @@
30109 #include <asm/page.h>
30110 #include <asm/segment.h>
30111 #include <asm/lguest.h>
30112+#include <asm/processor-flags.h>
30113
30114 // We mark the start of the code to copy
30115 // It's placed in .text tho it's never run here
30116@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
30117 // Changes type when we load it: damn Intel!
30118 // For after we switch over our page tables
30119 // That entry will be read-only: we'd crash.
30120+
30121+#ifdef CONFIG_PAX_KERNEXEC
30122+ mov %cr0, %edx
30123+ xor $X86_CR0_WP, %edx
30124+ mov %edx, %cr0
30125+#endif
30126+
30127 movl $(GDT_ENTRY_TSS*8), %edx
30128 ltr %dx
30129
30130@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
30131 // Let's clear it again for our return.
30132 // The GDT descriptor of the Host
30133 // Points to the table after two "size" bytes
30134- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
30135+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
30136 // Clear "used" from type field (byte 5, bit 2)
30137- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
30138+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
30139+
30140+#ifdef CONFIG_PAX_KERNEXEC
30141+ mov %cr0, %eax
30142+ xor $X86_CR0_WP, %eax
30143+ mov %eax, %cr0
30144+#endif
30145
30146 // Once our page table's switched, the Guest is live!
30147 // The Host fades as we run this final step.
30148@@ -295,13 +309,12 @@ deliver_to_host:
30149 // I consulted gcc, and it gave
30150 // These instructions, which I gladly credit:
30151 leal (%edx,%ebx,8), %eax
30152- movzwl (%eax),%edx
30153- movl 4(%eax), %eax
30154- xorw %ax, %ax
30155- orl %eax, %edx
30156+ movl 4(%eax), %edx
30157+ movw (%eax), %dx
30158 // Now the address of the handler's in %edx
30159 // We call it now: its "iret" drops us home.
30160- jmp *%edx
30161+ ljmp $__KERNEL_CS, $1f
30162+1: jmp *%edx
30163
30164 // Every interrupt can come to us here
30165 // But we must truly tell each apart.
30166diff -urNp linux-3.1.4/drivers/macintosh/macio_asic.c linux-3.1.4/drivers/macintosh/macio_asic.c
30167--- linux-3.1.4/drivers/macintosh/macio_asic.c 2011-11-11 15:19:27.000000000 -0500
30168+++ linux-3.1.4/drivers/macintosh/macio_asic.c 2011-11-16 18:39:07.000000000 -0500
30169@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(s
30170 * MacIO is matched against any Apple ID, it's probe() function
30171 * will then decide wether it applies or not
30172 */
30173-static const struct pci_device_id __devinitdata pci_ids [] = { {
30174+static const struct pci_device_id __devinitconst pci_ids [] = { {
30175 .vendor = PCI_VENDOR_ID_APPLE,
30176 .device = PCI_ANY_ID,
30177 .subvendor = PCI_ANY_ID,
30178diff -urNp linux-3.1.4/drivers/md/dm.c linux-3.1.4/drivers/md/dm.c
30179--- linux-3.1.4/drivers/md/dm.c 2011-11-11 15:19:27.000000000 -0500
30180+++ linux-3.1.4/drivers/md/dm.c 2011-11-16 18:39:07.000000000 -0500
30181@@ -165,9 +165,9 @@ struct mapped_device {
30182 /*
30183 * Event handling.
30184 */
30185- atomic_t event_nr;
30186+ atomic_unchecked_t event_nr;
30187 wait_queue_head_t eventq;
30188- atomic_t uevent_seq;
30189+ atomic_unchecked_t uevent_seq;
30190 struct list_head uevent_list;
30191 spinlock_t uevent_lock; /* Protect access to uevent_list */
30192
30193@@ -1843,8 +1843,8 @@ static struct mapped_device *alloc_dev(i
30194 rwlock_init(&md->map_lock);
30195 atomic_set(&md->holders, 1);
30196 atomic_set(&md->open_count, 0);
30197- atomic_set(&md->event_nr, 0);
30198- atomic_set(&md->uevent_seq, 0);
30199+ atomic_set_unchecked(&md->event_nr, 0);
30200+ atomic_set_unchecked(&md->uevent_seq, 0);
30201 INIT_LIST_HEAD(&md->uevent_list);
30202 spin_lock_init(&md->uevent_lock);
30203
30204@@ -1978,7 +1978,7 @@ static void event_callback(void *context
30205
30206 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
30207
30208- atomic_inc(&md->event_nr);
30209+ atomic_inc_unchecked(&md->event_nr);
30210 wake_up(&md->eventq);
30211 }
30212
30213@@ -2614,18 +2614,18 @@ int dm_kobject_uevent(struct mapped_devi
30214
30215 uint32_t dm_next_uevent_seq(struct mapped_device *md)
30216 {
30217- return atomic_add_return(1, &md->uevent_seq);
30218+ return atomic_add_return_unchecked(1, &md->uevent_seq);
30219 }
30220
30221 uint32_t dm_get_event_nr(struct mapped_device *md)
30222 {
30223- return atomic_read(&md->event_nr);
30224+ return atomic_read_unchecked(&md->event_nr);
30225 }
30226
30227 int dm_wait_event(struct mapped_device *md, int event_nr)
30228 {
30229 return wait_event_interruptible(md->eventq,
30230- (event_nr != atomic_read(&md->event_nr)));
30231+ (event_nr != atomic_read_unchecked(&md->event_nr)));
30232 }
30233
30234 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
30235diff -urNp linux-3.1.4/drivers/md/dm-ioctl.c linux-3.1.4/drivers/md/dm-ioctl.c
30236--- linux-3.1.4/drivers/md/dm-ioctl.c 2011-11-11 15:19:27.000000000 -0500
30237+++ linux-3.1.4/drivers/md/dm-ioctl.c 2011-11-16 18:39:07.000000000 -0500
30238@@ -1578,7 +1578,7 @@ static int validate_params(uint cmd, str
30239 cmd == DM_LIST_VERSIONS_CMD)
30240 return 0;
30241
30242- if ((cmd == DM_DEV_CREATE_CMD)) {
30243+ if (cmd == DM_DEV_CREATE_CMD) {
30244 if (!*param->name) {
30245 DMWARN("name not supplied when creating device");
30246 return -EINVAL;
30247diff -urNp linux-3.1.4/drivers/md/dm-raid1.c linux-3.1.4/drivers/md/dm-raid1.c
30248--- linux-3.1.4/drivers/md/dm-raid1.c 2011-11-11 15:19:27.000000000 -0500
30249+++ linux-3.1.4/drivers/md/dm-raid1.c 2011-11-16 18:39:07.000000000 -0500
30250@@ -40,7 +40,7 @@ enum dm_raid1_error {
30251
30252 struct mirror {
30253 struct mirror_set *ms;
30254- atomic_t error_count;
30255+ atomic_unchecked_t error_count;
30256 unsigned long error_type;
30257 struct dm_dev *dev;
30258 sector_t offset;
30259@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(s
30260 struct mirror *m;
30261
30262 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
30263- if (!atomic_read(&m->error_count))
30264+ if (!atomic_read_unchecked(&m->error_count))
30265 return m;
30266
30267 return NULL;
30268@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m
30269 * simple way to tell if a device has encountered
30270 * errors.
30271 */
30272- atomic_inc(&m->error_count);
30273+ atomic_inc_unchecked(&m->error_count);
30274
30275 if (test_and_set_bit(error_type, &m->error_type))
30276 return;
30277@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(stru
30278 struct mirror *m = get_default_mirror(ms);
30279
30280 do {
30281- if (likely(!atomic_read(&m->error_count)))
30282+ if (likely(!atomic_read_unchecked(&m->error_count)))
30283 return m;
30284
30285 if (m-- == ms->mirror)
30286@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
30287 {
30288 struct mirror *default_mirror = get_default_mirror(m->ms);
30289
30290- return !atomic_read(&default_mirror->error_count);
30291+ return !atomic_read_unchecked(&default_mirror->error_count);
30292 }
30293
30294 static int mirror_available(struct mirror_set *ms, struct bio *bio)
30295@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *
30296 */
30297 if (likely(region_in_sync(ms, region, 1)))
30298 m = choose_mirror(ms, bio->bi_sector);
30299- else if (m && atomic_read(&m->error_count))
30300+ else if (m && atomic_read_unchecked(&m->error_count))
30301 m = NULL;
30302
30303 if (likely(m))
30304@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set
30305 }
30306
30307 ms->mirror[mirror].ms = ms;
30308- atomic_set(&(ms->mirror[mirror].error_count), 0);
30309+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
30310 ms->mirror[mirror].error_type = 0;
30311 ms->mirror[mirror].offset = offset;
30312
30313@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
30314 */
30315 static char device_status_char(struct mirror *m)
30316 {
30317- if (!atomic_read(&(m->error_count)))
30318+ if (!atomic_read_unchecked(&(m->error_count)))
30319 return 'A';
30320
30321 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
30322diff -urNp linux-3.1.4/drivers/md/dm-stripe.c linux-3.1.4/drivers/md/dm-stripe.c
30323--- linux-3.1.4/drivers/md/dm-stripe.c 2011-11-11 15:19:27.000000000 -0500
30324+++ linux-3.1.4/drivers/md/dm-stripe.c 2011-11-16 18:39:07.000000000 -0500
30325@@ -20,7 +20,7 @@ struct stripe {
30326 struct dm_dev *dev;
30327 sector_t physical_start;
30328
30329- atomic_t error_count;
30330+ atomic_unchecked_t error_count;
30331 };
30332
30333 struct stripe_c {
30334@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
30335 kfree(sc);
30336 return r;
30337 }
30338- atomic_set(&(sc->stripe[i].error_count), 0);
30339+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
30340 }
30341
30342 ti->private = sc;
30343@@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
30344 DMEMIT("%d ", sc->stripes);
30345 for (i = 0; i < sc->stripes; i++) {
30346 DMEMIT("%s ", sc->stripe[i].dev->name);
30347- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
30348+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
30349 'D' : 'A';
30350 }
30351 buffer[i] = '\0';
30352@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
30353 */
30354 for (i = 0; i < sc->stripes; i++)
30355 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
30356- atomic_inc(&(sc->stripe[i].error_count));
30357- if (atomic_read(&(sc->stripe[i].error_count)) <
30358+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
30359+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
30360 DM_IO_ERROR_THRESHOLD)
30361 schedule_work(&sc->trigger_event);
30362 }
30363diff -urNp linux-3.1.4/drivers/md/dm-table.c linux-3.1.4/drivers/md/dm-table.c
30364--- linux-3.1.4/drivers/md/dm-table.c 2011-11-11 15:19:27.000000000 -0500
30365+++ linux-3.1.4/drivers/md/dm-table.c 2011-11-16 18:39:07.000000000 -0500
30366@@ -389,7 +389,7 @@ static int device_area_is_invalid(struct
30367 if (!dev_size)
30368 return 0;
30369
30370- if ((start >= dev_size) || (start + len > dev_size)) {
30371+ if ((start >= dev_size) || (len > dev_size - start)) {
30372 DMWARN("%s: %s too small for target: "
30373 "start=%llu, len=%llu, dev_size=%llu",
30374 dm_device_name(ti->table->md), bdevname(bdev, b),
30375diff -urNp linux-3.1.4/drivers/md/md.c linux-3.1.4/drivers/md/md.c
30376--- linux-3.1.4/drivers/md/md.c 2011-11-11 15:19:27.000000000 -0500
30377+++ linux-3.1.4/drivers/md/md.c 2011-11-16 18:39:07.000000000 -0500
30378@@ -280,10 +280,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
30379 * start build, activate spare
30380 */
30381 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
30382-static atomic_t md_event_count;
30383+static atomic_unchecked_t md_event_count;
30384 void md_new_event(mddev_t *mddev)
30385 {
30386- atomic_inc(&md_event_count);
30387+ atomic_inc_unchecked(&md_event_count);
30388 wake_up(&md_event_waiters);
30389 }
30390 EXPORT_SYMBOL_GPL(md_new_event);
30391@@ -293,7 +293,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
30392 */
30393 static void md_new_event_inintr(mddev_t *mddev)
30394 {
30395- atomic_inc(&md_event_count);
30396+ atomic_inc_unchecked(&md_event_count);
30397 wake_up(&md_event_waiters);
30398 }
30399
30400@@ -1531,7 +1531,7 @@ static int super_1_load(mdk_rdev_t *rdev
30401
30402 rdev->preferred_minor = 0xffff;
30403 rdev->data_offset = le64_to_cpu(sb->data_offset);
30404- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
30405+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
30406
30407 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
30408 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
30409@@ -1748,7 +1748,7 @@ static void super_1_sync(mddev_t *mddev,
30410 else
30411 sb->resync_offset = cpu_to_le64(0);
30412
30413- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
30414+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
30415
30416 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
30417 sb->size = cpu_to_le64(mddev->dev_sectors);
30418@@ -2643,7 +2643,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
30419 static ssize_t
30420 errors_show(mdk_rdev_t *rdev, char *page)
30421 {
30422- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
30423+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
30424 }
30425
30426 static ssize_t
30427@@ -2652,7 +2652,7 @@ errors_store(mdk_rdev_t *rdev, const cha
30428 char *e;
30429 unsigned long n = simple_strtoul(buf, &e, 10);
30430 if (*buf && (*e == 0 || *e == '\n')) {
30431- atomic_set(&rdev->corrected_errors, n);
30432+ atomic_set_unchecked(&rdev->corrected_errors, n);
30433 return len;
30434 }
30435 return -EINVAL;
30436@@ -3042,8 +3042,8 @@ int md_rdev_init(mdk_rdev_t *rdev)
30437 rdev->sb_loaded = 0;
30438 rdev->bb_page = NULL;
30439 atomic_set(&rdev->nr_pending, 0);
30440- atomic_set(&rdev->read_errors, 0);
30441- atomic_set(&rdev->corrected_errors, 0);
30442+ atomic_set_unchecked(&rdev->read_errors, 0);
30443+ atomic_set_unchecked(&rdev->corrected_errors, 0);
30444
30445 INIT_LIST_HEAD(&rdev->same_set);
30446 init_waitqueue_head(&rdev->blocked_wait);
30447@@ -6667,7 +6667,7 @@ static int md_seq_show(struct seq_file *
30448
30449 spin_unlock(&pers_lock);
30450 seq_printf(seq, "\n");
30451- seq->poll_event = atomic_read(&md_event_count);
30452+ seq->poll_event = atomic_read_unchecked(&md_event_count);
30453 return 0;
30454 }
30455 if (v == (void*)2) {
30456@@ -6756,7 +6756,7 @@ static int md_seq_show(struct seq_file *
30457 chunk_kb ? "KB" : "B");
30458 if (bitmap->file) {
30459 seq_printf(seq, ", file: ");
30460- seq_path(seq, &bitmap->file->f_path, " \t\n");
30461+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
30462 }
30463
30464 seq_printf(seq, "\n");
30465@@ -6787,7 +6787,7 @@ static int md_seq_open(struct inode *ino
30466 return error;
30467
30468 seq = file->private_data;
30469- seq->poll_event = atomic_read(&md_event_count);
30470+ seq->poll_event = atomic_read_unchecked(&md_event_count);
30471 return error;
30472 }
30473
30474@@ -6801,7 +6801,7 @@ static unsigned int mdstat_poll(struct f
30475 /* always allow read */
30476 mask = POLLIN | POLLRDNORM;
30477
30478- if (seq->poll_event != atomic_read(&md_event_count))
30479+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
30480 mask |= POLLERR | POLLPRI;
30481 return mask;
30482 }
30483@@ -6845,7 +6845,7 @@ static int is_mddev_idle(mddev_t *mddev,
30484 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
30485 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
30486 (int)part_stat_read(&disk->part0, sectors[1]) -
30487- atomic_read(&disk->sync_io);
30488+ atomic_read_unchecked(&disk->sync_io);
30489 /* sync IO will cause sync_io to increase before the disk_stats
30490 * as sync_io is counted when a request starts, and
30491 * disk_stats is counted when it completes.
30492diff -urNp linux-3.1.4/drivers/md/md.h linux-3.1.4/drivers/md/md.h
30493--- linux-3.1.4/drivers/md/md.h 2011-11-11 15:19:27.000000000 -0500
30494+++ linux-3.1.4/drivers/md/md.h 2011-11-16 18:39:07.000000000 -0500
30495@@ -124,13 +124,13 @@ struct mdk_rdev_s
30496 * only maintained for arrays that
30497 * support hot removal
30498 */
30499- atomic_t read_errors; /* number of consecutive read errors that
30500+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
30501 * we have tried to ignore.
30502 */
30503 struct timespec last_read_error; /* monotonic time since our
30504 * last read error
30505 */
30506- atomic_t corrected_errors; /* number of corrected read errors,
30507+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
30508 * for reporting to userspace and storing
30509 * in superblock.
30510 */
30511@@ -415,7 +415,7 @@ static inline void rdev_dec_pending(mdk_
30512
30513 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
30514 {
30515- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
30516+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
30517 }
30518
30519 struct mdk_personality
30520diff -urNp linux-3.1.4/drivers/md/raid10.c linux-3.1.4/drivers/md/raid10.c
30521--- linux-3.1.4/drivers/md/raid10.c 2011-11-11 15:19:27.000000000 -0500
30522+++ linux-3.1.4/drivers/md/raid10.c 2011-11-16 18:39:07.000000000 -0500
30523@@ -1423,7 +1423,7 @@ static void end_sync_read(struct bio *bi
30524 /* The write handler will notice the lack of
30525 * R10BIO_Uptodate and record any errors etc
30526 */
30527- atomic_add(r10_bio->sectors,
30528+ atomic_add_unchecked(r10_bio->sectors,
30529 &conf->mirrors[d].rdev->corrected_errors);
30530
30531 /* for reconstruct, we always reschedule after a read.
30532@@ -1723,7 +1723,7 @@ static void check_decay_read_errors(mdde
30533 {
30534 struct timespec cur_time_mon;
30535 unsigned long hours_since_last;
30536- unsigned int read_errors = atomic_read(&rdev->read_errors);
30537+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
30538
30539 ktime_get_ts(&cur_time_mon);
30540
30541@@ -1745,9 +1745,9 @@ static void check_decay_read_errors(mdde
30542 * overflowing the shift of read_errors by hours_since_last.
30543 */
30544 if (hours_since_last >= 8 * sizeof(read_errors))
30545- atomic_set(&rdev->read_errors, 0);
30546+ atomic_set_unchecked(&rdev->read_errors, 0);
30547 else
30548- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
30549+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
30550 }
30551
30552 static int r10_sync_page_io(mdk_rdev_t *rdev, sector_t sector,
30553@@ -1797,8 +1797,8 @@ static void fix_read_error(conf_t *conf,
30554 return;
30555
30556 check_decay_read_errors(mddev, rdev);
30557- atomic_inc(&rdev->read_errors);
30558- if (atomic_read(&rdev->read_errors) > max_read_errors) {
30559+ atomic_inc_unchecked(&rdev->read_errors);
30560+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
30561 char b[BDEVNAME_SIZE];
30562 bdevname(rdev->bdev, b);
30563
30564@@ -1806,7 +1806,7 @@ static void fix_read_error(conf_t *conf,
30565 "md/raid10:%s: %s: Raid device exceeded "
30566 "read_error threshold [cur %d:max %d]\n",
30567 mdname(mddev), b,
30568- atomic_read(&rdev->read_errors), max_read_errors);
30569+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
30570 printk(KERN_NOTICE
30571 "md/raid10:%s: %s: Failing raid device\n",
30572 mdname(mddev), b);
30573@@ -1951,7 +1951,7 @@ static void fix_read_error(conf_t *conf,
30574 (unsigned long long)(
30575 sect + rdev->data_offset),
30576 bdevname(rdev->bdev, b));
30577- atomic_add(s, &rdev->corrected_errors);
30578+ atomic_add_unchecked(s, &rdev->corrected_errors);
30579 }
30580
30581 rdev_dec_pending(rdev, mddev);
30582diff -urNp linux-3.1.4/drivers/md/raid1.c linux-3.1.4/drivers/md/raid1.c
30583--- linux-3.1.4/drivers/md/raid1.c 2011-11-11 15:19:27.000000000 -0500
30584+++ linux-3.1.4/drivers/md/raid1.c 2011-11-16 18:39:07.000000000 -0500
30585@@ -1541,7 +1541,7 @@ static int fix_sync_read_error(r1bio_t *
30586 if (r1_sync_page_io(rdev, sect, s,
30587 bio->bi_io_vec[idx].bv_page,
30588 READ) != 0)
30589- atomic_add(s, &rdev->corrected_errors);
30590+ atomic_add_unchecked(s, &rdev->corrected_errors);
30591 }
30592 sectors -= s;
30593 sect += s;
30594@@ -1754,7 +1754,7 @@ static void fix_read_error(conf_t *conf,
30595 test_bit(In_sync, &rdev->flags)) {
30596 if (r1_sync_page_io(rdev, sect, s,
30597 conf->tmppage, READ)) {
30598- atomic_add(s, &rdev->corrected_errors);
30599+ atomic_add_unchecked(s, &rdev->corrected_errors);
30600 printk(KERN_INFO
30601 "md/raid1:%s: read error corrected "
30602 "(%d sectors at %llu on %s)\n",
30603diff -urNp linux-3.1.4/drivers/md/raid5.c linux-3.1.4/drivers/md/raid5.c
30604--- linux-3.1.4/drivers/md/raid5.c 2011-11-11 15:19:27.000000000 -0500
30605+++ linux-3.1.4/drivers/md/raid5.c 2011-11-16 18:40:10.000000000 -0500
30606@@ -1616,19 +1616,19 @@ static void raid5_end_read_request(struc
30607 (unsigned long long)(sh->sector
30608 + rdev->data_offset),
30609 bdevname(rdev->bdev, b));
30610- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
30611+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
30612 clear_bit(R5_ReadError, &sh->dev[i].flags);
30613 clear_bit(R5_ReWrite, &sh->dev[i].flags);
30614 }
30615- if (atomic_read(&conf->disks[i].rdev->read_errors))
30616- atomic_set(&conf->disks[i].rdev->read_errors, 0);
30617+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
30618+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
30619 } else {
30620 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
30621 int retry = 0;
30622 rdev = conf->disks[i].rdev;
30623
30624 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
30625- atomic_inc(&rdev->read_errors);
30626+ atomic_inc_unchecked(&rdev->read_errors);
30627 if (conf->mddev->degraded >= conf->max_degraded)
30628 printk_ratelimited(
30629 KERN_WARNING
30630@@ -1648,7 +1648,7 @@ static void raid5_end_read_request(struc
30631 (unsigned long long)(sh->sector
30632 + rdev->data_offset),
30633 bdn);
30634- else if (atomic_read(&rdev->read_errors)
30635+ else if (atomic_read_unchecked(&rdev->read_errors)
30636 > conf->max_nr_stripes)
30637 printk(KERN_WARNING
30638 "md/raid:%s: Too many read errors, failing device %s.\n",
30639@@ -1978,6 +1978,7 @@ static sector_t compute_blocknr(struct s
30640 sector_t r_sector;
30641 struct stripe_head sh2;
30642
30643+ pax_track_stack();
30644
30645 chunk_offset = sector_div(new_sector, sectors_per_chunk);
30646 stripe = new_sector;
30647diff -urNp linux-3.1.4/drivers/media/common/saa7146_hlp.c linux-3.1.4/drivers/media/common/saa7146_hlp.c
30648--- linux-3.1.4/drivers/media/common/saa7146_hlp.c 2011-11-11 15:19:27.000000000 -0500
30649+++ linux-3.1.4/drivers/media/common/saa7146_hlp.c 2011-11-16 18:40:10.000000000 -0500
30650@@ -353,6 +353,8 @@ static void calculate_clipping_registers
30651
30652 int x[32], y[32], w[32], h[32];
30653
30654+ pax_track_stack();
30655+
30656 /* clear out memory */
30657 memset(&line_list[0], 0x00, sizeof(u32)*32);
30658 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
30659diff -urNp linux-3.1.4/drivers/media/dvb/ddbridge/ddbridge-core.c linux-3.1.4/drivers/media/dvb/ddbridge/ddbridge-core.c
30660--- linux-3.1.4/drivers/media/dvb/ddbridge/ddbridge-core.c 2011-11-11 15:19:27.000000000 -0500
30661+++ linux-3.1.4/drivers/media/dvb/ddbridge/ddbridge-core.c 2011-11-16 18:39:07.000000000 -0500
30662@@ -1675,7 +1675,7 @@ static struct ddb_info ddb_v6 = {
30663 .subvendor = _subvend, .subdevice = _subdev, \
30664 .driver_data = (unsigned long)&_driverdata }
30665
30666-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
30667+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
30668 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
30669 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
30670 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
30671diff -urNp linux-3.1.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-3.1.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
30672--- linux-3.1.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-11-11 15:19:27.000000000 -0500
30673+++ linux-3.1.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-11-16 18:40:10.000000000 -0500
30674@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
30675 u8 buf[HOST_LINK_BUF_SIZE];
30676 int i;
30677
30678+ pax_track_stack();
30679+
30680 dprintk("%s\n", __func__);
30681
30682 /* check if we have space for a link buf in the rx_buffer */
30683@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
30684 unsigned long timeout;
30685 int written;
30686
30687+ pax_track_stack();
30688+
30689 dprintk("%s\n", __func__);
30690
30691 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
30692diff -urNp linux-3.1.4/drivers/media/dvb/dvb-core/dvb_demux.h linux-3.1.4/drivers/media/dvb/dvb-core/dvb_demux.h
30693--- linux-3.1.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-11-11 15:19:27.000000000 -0500
30694+++ linux-3.1.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-11-17 18:34:32.000000000 -0500
30695@@ -73,7 +73,7 @@ struct dvb_demux_feed {
30696 union {
30697 dmx_ts_cb ts;
30698 dmx_section_cb sec;
30699- } cb;
30700+ } __no_const cb;
30701
30702 struct dvb_demux *demux;
30703 void *priv;
30704diff -urNp linux-3.1.4/drivers/media/dvb/dvb-core/dvbdev.c linux-3.1.4/drivers/media/dvb/dvb-core/dvbdev.c
30705--- linux-3.1.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-11-11 15:19:27.000000000 -0500
30706+++ linux-3.1.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-11-16 18:39:07.000000000 -0500
30707@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
30708 const struct dvb_device *template, void *priv, int type)
30709 {
30710 struct dvb_device *dvbdev;
30711- struct file_operations *dvbdevfops;
30712+ file_operations_no_const *dvbdevfops;
30713 struct device *clsdev;
30714 int minor;
30715 int id;
30716diff -urNp linux-3.1.4/drivers/media/dvb/dvb-usb/cxusb.c linux-3.1.4/drivers/media/dvb/dvb-usb/cxusb.c
30717--- linux-3.1.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-11-11 15:19:27.000000000 -0500
30718+++ linux-3.1.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-11-16 18:39:07.000000000 -0500
30719@@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
30720 struct dib0700_adapter_state {
30721 int (*set_param_save) (struct dvb_frontend *,
30722 struct dvb_frontend_parameters *);
30723-};
30724+} __no_const;
30725
30726 static int dib7070_set_param_override(struct dvb_frontend *fe,
30727 struct dvb_frontend_parameters *fep)
30728diff -urNp linux-3.1.4/drivers/media/dvb/dvb-usb/dib0700_core.c linux-3.1.4/drivers/media/dvb/dvb-usb/dib0700_core.c
30729--- linux-3.1.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-11-11 15:19:27.000000000 -0500
30730+++ linux-3.1.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-11-16 18:40:10.000000000 -0500
30731@@ -478,6 +478,8 @@ int dib0700_download_firmware(struct usb
30732 if (!buf)
30733 return -ENOMEM;
30734
30735+ pax_track_stack();
30736+
30737 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
30738 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
30739 hx.addr, hx.len, hx.chk);
30740diff -urNp linux-3.1.4/drivers/media/dvb/dvb-usb/dw2102.c linux-3.1.4/drivers/media/dvb/dvb-usb/dw2102.c
30741--- linux-3.1.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-11-11 15:19:27.000000000 -0500
30742+++ linux-3.1.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-11-16 18:39:07.000000000 -0500
30743@@ -95,7 +95,7 @@ struct su3000_state {
30744
30745 struct s6x0_state {
30746 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
30747-};
30748+} __no_const;
30749
30750 /* debug */
30751 static int dvb_usb_dw2102_debug;
30752diff -urNp linux-3.1.4/drivers/media/dvb/dvb-usb/lmedm04.c linux-3.1.4/drivers/media/dvb/dvb-usb/lmedm04.c
30753--- linux-3.1.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-11-11 15:19:27.000000000 -0500
30754+++ linux-3.1.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-11-16 18:40:10.000000000 -0500
30755@@ -742,6 +742,7 @@ static int lme2510_download_firmware(str
30756 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
30757 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
30758
30759+ pax_track_stack();
30760
30761 data[0] = 0x8a;
30762 len_in = 1;
30763@@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_dev
30764 int ret = 0, len_in;
30765 u8 data[512] = {0};
30766
30767+ pax_track_stack();
30768+
30769 data[0] = 0x0a;
30770 len_in = 1;
30771 info("FRM Firmware Cold Reset");
30772diff -urNp linux-3.1.4/drivers/media/dvb/frontends/dib3000.h linux-3.1.4/drivers/media/dvb/frontends/dib3000.h
30773--- linux-3.1.4/drivers/media/dvb/frontends/dib3000.h 2011-11-11 15:19:27.000000000 -0500
30774+++ linux-3.1.4/drivers/media/dvb/frontends/dib3000.h 2011-11-17 18:38:05.000000000 -0500
30775@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
30776 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
30777 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
30778 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
30779-};
30780+} __no_const;
30781
30782 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
30783 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
30784diff -urNp linux-3.1.4/drivers/media/dvb/frontends/mb86a16.c linux-3.1.4/drivers/media/dvb/frontends/mb86a16.c
30785--- linux-3.1.4/drivers/media/dvb/frontends/mb86a16.c 2011-11-11 15:19:27.000000000 -0500
30786+++ linux-3.1.4/drivers/media/dvb/frontends/mb86a16.c 2011-11-16 18:40:10.000000000 -0500
30787@@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
30788 int ret = -1;
30789 int sync;
30790
30791+ pax_track_stack();
30792+
30793 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
30794
30795 fcp = 3000;
30796diff -urNp linux-3.1.4/drivers/media/dvb/frontends/or51211.c linux-3.1.4/drivers/media/dvb/frontends/or51211.c
30797--- linux-3.1.4/drivers/media/dvb/frontends/or51211.c 2011-11-11 15:19:27.000000000 -0500
30798+++ linux-3.1.4/drivers/media/dvb/frontends/or51211.c 2011-11-16 18:40:10.000000000 -0500
30799@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
30800 u8 tudata[585];
30801 int i;
30802
30803+ pax_track_stack();
30804+
30805 dprintk("Firmware is %zd bytes\n",fw->size);
30806
30807 /* Get eprom data */
30808diff -urNp linux-3.1.4/drivers/media/dvb/ngene/ngene-cards.c linux-3.1.4/drivers/media/dvb/ngene/ngene-cards.c
30809--- linux-3.1.4/drivers/media/dvb/ngene/ngene-cards.c 2011-11-11 15:19:27.000000000 -0500
30810+++ linux-3.1.4/drivers/media/dvb/ngene/ngene-cards.c 2011-11-16 18:39:07.000000000 -0500
30811@@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780
30812
30813 /****************************************************************************/
30814
30815-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
30816+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
30817 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
30818 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
30819 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
30820diff -urNp linux-3.1.4/drivers/media/radio/radio-cadet.c linux-3.1.4/drivers/media/radio/radio-cadet.c
30821--- linux-3.1.4/drivers/media/radio/radio-cadet.c 2011-11-11 15:19:27.000000000 -0500
30822+++ linux-3.1.4/drivers/media/radio/radio-cadet.c 2011-11-16 18:39:07.000000000 -0500
30823@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *f
30824 unsigned char readbuf[RDS_BUFFER];
30825 int i = 0;
30826
30827+ if (count > RDS_BUFFER)
30828+ return -EFAULT;
30829 mutex_lock(&dev->lock);
30830 if (dev->rdsstat == 0) {
30831 dev->rdsstat = 1;
30832diff -urNp linux-3.1.4/drivers/media/video/au0828/au0828.h linux-3.1.4/drivers/media/video/au0828/au0828.h
30833--- linux-3.1.4/drivers/media/video/au0828/au0828.h 2011-11-11 15:19:27.000000000 -0500
30834+++ linux-3.1.4/drivers/media/video/au0828/au0828.h 2011-11-16 18:39:07.000000000 -0500
30835@@ -191,7 +191,7 @@ struct au0828_dev {
30836
30837 /* I2C */
30838 struct i2c_adapter i2c_adap;
30839- struct i2c_algorithm i2c_algo;
30840+ i2c_algorithm_no_const i2c_algo;
30841 struct i2c_client i2c_client;
30842 u32 i2c_rc;
30843
30844diff -urNp linux-3.1.4/drivers/media/video/cx18/cx18-driver.c linux-3.1.4/drivers/media/video/cx18/cx18-driver.c
30845--- linux-3.1.4/drivers/media/video/cx18/cx18-driver.c 2011-11-11 15:19:27.000000000 -0500
30846+++ linux-3.1.4/drivers/media/video/cx18/cx18-driver.c 2011-11-16 18:40:10.000000000 -0500
30847@@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
30848 struct i2c_client c;
30849 u8 eedata[256];
30850
30851+ pax_track_stack();
30852+
30853 memset(&c, 0, sizeof(c));
30854 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
30855 c.adapter = &cx->i2c_adap[0];
30856diff -urNp linux-3.1.4/drivers/media/video/cx23885/cx23885-input.c linux-3.1.4/drivers/media/video/cx23885/cx23885-input.c
30857--- linux-3.1.4/drivers/media/video/cx23885/cx23885-input.c 2011-11-11 15:19:27.000000000 -0500
30858+++ linux-3.1.4/drivers/media/video/cx23885/cx23885-input.c 2011-11-16 18:40:10.000000000 -0500
30859@@ -53,6 +53,8 @@ static void cx23885_input_process_measur
30860 bool handle = false;
30861 struct ir_raw_event ir_core_event[64];
30862
30863+ pax_track_stack();
30864+
30865 do {
30866 num = 0;
30867 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
30868diff -urNp linux-3.1.4/drivers/media/video/cx88/cx88-alsa.c linux-3.1.4/drivers/media/video/cx88/cx88-alsa.c
30869--- linux-3.1.4/drivers/media/video/cx88/cx88-alsa.c 2011-11-11 15:19:27.000000000 -0500
30870+++ linux-3.1.4/drivers/media/video/cx88/cx88-alsa.c 2011-11-16 18:39:07.000000000 -0500
30871@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_
30872 * Only boards with eeprom and byte 1 at eeprom=1 have it
30873 */
30874
30875-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
30876+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
30877 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
30878 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
30879 {0, }
30880diff -urNp linux-3.1.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-3.1.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
30881--- linux-3.1.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-11-11 15:19:27.000000000 -0500
30882+++ linux-3.1.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-11-16 18:40:10.000000000 -0500
30883@@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
30884 u8 *eeprom;
30885 struct tveeprom tvdata;
30886
30887+ pax_track_stack();
30888+
30889 memset(&tvdata,0,sizeof(tvdata));
30890
30891 eeprom = pvr2_eeprom_fetch(hdw);
30892diff -urNp linux-3.1.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h linux-3.1.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
30893--- linux-3.1.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-11-11 15:19:27.000000000 -0500
30894+++ linux-3.1.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-11-16 18:39:07.000000000 -0500
30895@@ -196,7 +196,7 @@ struct pvr2_hdw {
30896
30897 /* I2C stuff */
30898 struct i2c_adapter i2c_adap;
30899- struct i2c_algorithm i2c_algo;
30900+ i2c_algorithm_no_const i2c_algo;
30901 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
30902 int i2c_cx25840_hack_state;
30903 int i2c_linked;
30904diff -urNp linux-3.1.4/drivers/media/video/saa7134/saa6752hs.c linux-3.1.4/drivers/media/video/saa7134/saa6752hs.c
30905--- linux-3.1.4/drivers/media/video/saa7134/saa6752hs.c 2011-11-11 15:19:27.000000000 -0500
30906+++ linux-3.1.4/drivers/media/video/saa7134/saa6752hs.c 2011-11-16 18:40:10.000000000 -0500
30907@@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
30908 unsigned char localPAT[256];
30909 unsigned char localPMT[256];
30910
30911+ pax_track_stack();
30912+
30913 /* Set video format - must be done first as it resets other settings */
30914 set_reg8(client, 0x41, h->video_format);
30915
30916diff -urNp linux-3.1.4/drivers/media/video/saa7164/saa7164-cmd.c linux-3.1.4/drivers/media/video/saa7164/saa7164-cmd.c
30917--- linux-3.1.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-11-11 15:19:27.000000000 -0500
30918+++ linux-3.1.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-11-16 18:40:10.000000000 -0500
30919@@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
30920 u8 tmp[512];
30921 dprintk(DBGLVL_CMD, "%s()\n", __func__);
30922
30923+ pax_track_stack();
30924+
30925 /* While any outstand message on the bus exists... */
30926 do {
30927
30928@@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
30929 u8 tmp[512];
30930 dprintk(DBGLVL_CMD, "%s()\n", __func__);
30931
30932+ pax_track_stack();
30933+
30934 while (loop) {
30935
30936 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
30937diff -urNp linux-3.1.4/drivers/media/video/timblogiw.c linux-3.1.4/drivers/media/video/timblogiw.c
30938--- linux-3.1.4/drivers/media/video/timblogiw.c 2011-11-11 15:19:27.000000000 -0500
30939+++ linux-3.1.4/drivers/media/video/timblogiw.c 2011-11-17 18:36:32.000000000 -0500
30940@@ -744,7 +744,7 @@ static int timblogiw_mmap(struct file *f
30941
30942 /* Platform device functions */
30943
30944-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
30945+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
30946 .vidioc_querycap = timblogiw_querycap,
30947 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
30948 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
30949@@ -766,7 +766,7 @@ static __devinitconst struct v4l2_ioctl_
30950 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
30951 };
30952
30953-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
30954+static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
30955 .owner = THIS_MODULE,
30956 .open = timblogiw_open,
30957 .release = timblogiw_close,
30958diff -urNp linux-3.1.4/drivers/media/video/usbvision/usbvision-core.c linux-3.1.4/drivers/media/video/usbvision/usbvision-core.c
30959--- linux-3.1.4/drivers/media/video/usbvision/usbvision-core.c 2011-11-11 15:19:27.000000000 -0500
30960+++ linux-3.1.4/drivers/media/video/usbvision/usbvision-core.c 2011-11-16 18:40:10.000000000 -0500
30961@@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_
30962 unsigned char rv, gv, bv;
30963 static unsigned char *Y, *U, *V;
30964
30965+ pax_track_stack();
30966+
30967 frame = usbvision->cur_frame;
30968 image_size = frame->frmwidth * frame->frmheight;
30969 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
30970diff -urNp linux-3.1.4/drivers/media/video/videobuf-dma-sg.c linux-3.1.4/drivers/media/video/videobuf-dma-sg.c
30971--- linux-3.1.4/drivers/media/video/videobuf-dma-sg.c 2011-11-11 15:19:27.000000000 -0500
30972+++ linux-3.1.4/drivers/media/video/videobuf-dma-sg.c 2011-11-16 18:40:10.000000000 -0500
30973@@ -607,6 +607,8 @@ void *videobuf_sg_alloc(size_t size)
30974 {
30975 struct videobuf_queue q;
30976
30977+ pax_track_stack();
30978+
30979 /* Required to make generic handler to call __videobuf_alloc */
30980 q.int_ops = &sg_ops;
30981
30982diff -urNp linux-3.1.4/drivers/message/fusion/mptbase.c linux-3.1.4/drivers/message/fusion/mptbase.c
30983--- linux-3.1.4/drivers/message/fusion/mptbase.c 2011-11-11 15:19:27.000000000 -0500
30984+++ linux-3.1.4/drivers/message/fusion/mptbase.c 2011-11-16 18:40:10.000000000 -0500
30985@@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct
30986 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
30987 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
30988
30989+#ifdef CONFIG_GRKERNSEC_HIDESYM
30990+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
30991+#else
30992 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
30993 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
30994+#endif
30995+
30996 /*
30997 * Rounding UP to nearest 4-kB boundary here...
30998 */
30999diff -urNp linux-3.1.4/drivers/message/fusion/mptsas.c linux-3.1.4/drivers/message/fusion/mptsas.c
31000--- linux-3.1.4/drivers/message/fusion/mptsas.c 2011-11-11 15:19:27.000000000 -0500
31001+++ linux-3.1.4/drivers/message/fusion/mptsas.c 2011-11-16 18:39:07.000000000 -0500
31002@@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
31003 return 0;
31004 }
31005
31006+static inline void
31007+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31008+{
31009+ if (phy_info->port_details) {
31010+ phy_info->port_details->rphy = rphy;
31011+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31012+ ioc->name, rphy));
31013+ }
31014+
31015+ if (rphy) {
31016+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31017+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31018+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31019+ ioc->name, rphy, rphy->dev.release));
31020+ }
31021+}
31022+
31023 /* no mutex */
31024 static void
31025 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
31026@@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
31027 return NULL;
31028 }
31029
31030-static inline void
31031-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31032-{
31033- if (phy_info->port_details) {
31034- phy_info->port_details->rphy = rphy;
31035- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31036- ioc->name, rphy));
31037- }
31038-
31039- if (rphy) {
31040- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31041- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31042- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31043- ioc->name, rphy, rphy->dev.release));
31044- }
31045-}
31046-
31047 static inline struct sas_port *
31048 mptsas_get_port(struct mptsas_phyinfo *phy_info)
31049 {
31050diff -urNp linux-3.1.4/drivers/message/fusion/mptscsih.c linux-3.1.4/drivers/message/fusion/mptscsih.c
31051--- linux-3.1.4/drivers/message/fusion/mptscsih.c 2011-11-11 15:19:27.000000000 -0500
31052+++ linux-3.1.4/drivers/message/fusion/mptscsih.c 2011-11-16 18:39:07.000000000 -0500
31053@@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
31054
31055 h = shost_priv(SChost);
31056
31057- if (h) {
31058- if (h->info_kbuf == NULL)
31059- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31060- return h->info_kbuf;
31061- h->info_kbuf[0] = '\0';
31062+ if (!h)
31063+ return NULL;
31064
31065- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31066- h->info_kbuf[size-1] = '\0';
31067- }
31068+ if (h->info_kbuf == NULL)
31069+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31070+ return h->info_kbuf;
31071+ h->info_kbuf[0] = '\0';
31072+
31073+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31074+ h->info_kbuf[size-1] = '\0';
31075
31076 return h->info_kbuf;
31077 }
31078diff -urNp linux-3.1.4/drivers/message/i2o/i2o_config.c linux-3.1.4/drivers/message/i2o/i2o_config.c
31079--- linux-3.1.4/drivers/message/i2o/i2o_config.c 2011-11-11 15:19:27.000000000 -0500
31080+++ linux-3.1.4/drivers/message/i2o/i2o_config.c 2011-11-16 18:40:10.000000000 -0500
31081@@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
31082 struct i2o_message *msg;
31083 unsigned int iop;
31084
31085+ pax_track_stack();
31086+
31087 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
31088 return -EFAULT;
31089
31090diff -urNp linux-3.1.4/drivers/message/i2o/i2o_proc.c linux-3.1.4/drivers/message/i2o/i2o_proc.c
31091--- linux-3.1.4/drivers/message/i2o/i2o_proc.c 2011-11-11 15:19:27.000000000 -0500
31092+++ linux-3.1.4/drivers/message/i2o/i2o_proc.c 2011-11-16 18:39:07.000000000 -0500
31093@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
31094 "Array Controller Device"
31095 };
31096
31097-static char *chtostr(u8 * chars, int n)
31098-{
31099- char tmp[256];
31100- tmp[0] = 0;
31101- return strncat(tmp, (char *)chars, n);
31102-}
31103-
31104 static int i2o_report_query_status(struct seq_file *seq, int block_status,
31105 char *group)
31106 {
31107@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
31108
31109 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
31110 seq_printf(seq, "%-#8x", ddm_table.module_id);
31111- seq_printf(seq, "%-29s",
31112- chtostr(ddm_table.module_name_version, 28));
31113+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
31114 seq_printf(seq, "%9d ", ddm_table.data_size);
31115 seq_printf(seq, "%8d", ddm_table.code_size);
31116
31117@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
31118
31119 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
31120 seq_printf(seq, "%-#8x", dst->module_id);
31121- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
31122- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
31123+ seq_printf(seq, "%-.28s", dst->module_name_version);
31124+ seq_printf(seq, "%-.8s", dst->date);
31125 seq_printf(seq, "%8d ", dst->module_size);
31126 seq_printf(seq, "%8d ", dst->mpb_size);
31127 seq_printf(seq, "0x%04x", dst->module_flags);
31128@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
31129 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
31130 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
31131 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
31132- seq_printf(seq, "Vendor info : %s\n",
31133- chtostr((u8 *) (work32 + 2), 16));
31134- seq_printf(seq, "Product info : %s\n",
31135- chtostr((u8 *) (work32 + 6), 16));
31136- seq_printf(seq, "Description : %s\n",
31137- chtostr((u8 *) (work32 + 10), 16));
31138- seq_printf(seq, "Product rev. : %s\n",
31139- chtostr((u8 *) (work32 + 14), 8));
31140+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
31141+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
31142+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
31143+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
31144
31145 seq_printf(seq, "Serial number : ");
31146 print_serial_number(seq, (u8 *) (work32 + 16),
31147@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
31148 }
31149
31150 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
31151- seq_printf(seq, "Module name : %s\n",
31152- chtostr(result.module_name, 24));
31153- seq_printf(seq, "Module revision : %s\n",
31154- chtostr(result.module_rev, 8));
31155+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
31156+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
31157
31158 seq_printf(seq, "Serial number : ");
31159 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
31160@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
31161 return 0;
31162 }
31163
31164- seq_printf(seq, "Device name : %s\n",
31165- chtostr(result.device_name, 64));
31166- seq_printf(seq, "Service name : %s\n",
31167- chtostr(result.service_name, 64));
31168- seq_printf(seq, "Physical name : %s\n",
31169- chtostr(result.physical_location, 64));
31170- seq_printf(seq, "Instance number : %s\n",
31171- chtostr(result.instance_number, 4));
31172+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
31173+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
31174+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
31175+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
31176
31177 return 0;
31178 }
31179diff -urNp linux-3.1.4/drivers/message/i2o/iop.c linux-3.1.4/drivers/message/i2o/iop.c
31180--- linux-3.1.4/drivers/message/i2o/iop.c 2011-11-11 15:19:27.000000000 -0500
31181+++ linux-3.1.4/drivers/message/i2o/iop.c 2011-11-16 18:39:07.000000000 -0500
31182@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
31183
31184 spin_lock_irqsave(&c->context_list_lock, flags);
31185
31186- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
31187- atomic_inc(&c->context_list_counter);
31188+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
31189+ atomic_inc_unchecked(&c->context_list_counter);
31190
31191- entry->context = atomic_read(&c->context_list_counter);
31192+ entry->context = atomic_read_unchecked(&c->context_list_counter);
31193
31194 list_add(&entry->list, &c->context_list);
31195
31196@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
31197
31198 #if BITS_PER_LONG == 64
31199 spin_lock_init(&c->context_list_lock);
31200- atomic_set(&c->context_list_counter, 0);
31201+ atomic_set_unchecked(&c->context_list_counter, 0);
31202 INIT_LIST_HEAD(&c->context_list);
31203 #endif
31204
31205diff -urNp linux-3.1.4/drivers/mfd/ab3100-core.c linux-3.1.4/drivers/mfd/ab3100-core.c
31206--- linux-3.1.4/drivers/mfd/ab3100-core.c 2011-11-11 15:19:27.000000000 -0500
31207+++ linux-3.1.4/drivers/mfd/ab3100-core.c 2011-11-16 18:39:07.000000000 -0500
31208@@ -809,7 +809,7 @@ struct ab_family_id {
31209 char *name;
31210 };
31211
31212-static const struct ab_family_id ids[] __devinitdata = {
31213+static const struct ab_family_id ids[] __devinitconst = {
31214 /* AB3100 */
31215 {
31216 .id = 0xc0,
31217diff -urNp linux-3.1.4/drivers/mfd/abx500-core.c linux-3.1.4/drivers/mfd/abx500-core.c
31218--- linux-3.1.4/drivers/mfd/abx500-core.c 2011-11-11 15:19:27.000000000 -0500
31219+++ linux-3.1.4/drivers/mfd/abx500-core.c 2011-11-16 18:39:07.000000000 -0500
31220@@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
31221
31222 struct abx500_device_entry {
31223 struct list_head list;
31224- struct abx500_ops ops;
31225+ abx500_ops_no_const ops;
31226 struct device *dev;
31227 };
31228
31229diff -urNp linux-3.1.4/drivers/mfd/janz-cmodio.c linux-3.1.4/drivers/mfd/janz-cmodio.c
31230--- linux-3.1.4/drivers/mfd/janz-cmodio.c 2011-11-11 15:19:27.000000000 -0500
31231+++ linux-3.1.4/drivers/mfd/janz-cmodio.c 2011-11-16 18:39:07.000000000 -0500
31232@@ -13,6 +13,7 @@
31233
31234 #include <linux/kernel.h>
31235 #include <linux/module.h>
31236+#include <linux/slab.h>
31237 #include <linux/init.h>
31238 #include <linux/pci.h>
31239 #include <linux/interrupt.h>
31240diff -urNp linux-3.1.4/drivers/mfd/wm8350-i2c.c linux-3.1.4/drivers/mfd/wm8350-i2c.c
31241--- linux-3.1.4/drivers/mfd/wm8350-i2c.c 2011-11-11 15:19:27.000000000 -0500
31242+++ linux-3.1.4/drivers/mfd/wm8350-i2c.c 2011-11-16 18:40:10.000000000 -0500
31243@@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
31244 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
31245 int ret;
31246
31247+ pax_track_stack();
31248+
31249 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
31250 return -EINVAL;
31251
31252diff -urNp linux-3.1.4/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.1.4/drivers/misc/lis3lv02d/lis3lv02d.c
31253--- linux-3.1.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-11-11 15:19:27.000000000 -0500
31254+++ linux-3.1.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-11-16 18:39:07.000000000 -0500
31255@@ -437,7 +437,7 @@ static irqreturn_t lis302dl_interrupt(in
31256 * the lid is closed. This leads to interrupts as soon as a little move
31257 * is done.
31258 */
31259- atomic_inc(&lis3_dev.count);
31260+ atomic_inc_unchecked(&lis3_dev.count);
31261
31262 wake_up_interruptible(&lis3_dev.misc_wait);
31263 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
31264@@ -520,7 +520,7 @@ static int lis3lv02d_misc_open(struct in
31265 if (lis3_dev.pm_dev)
31266 pm_runtime_get_sync(lis3_dev.pm_dev);
31267
31268- atomic_set(&lis3_dev.count, 0);
31269+ atomic_set_unchecked(&lis3_dev.count, 0);
31270 return 0;
31271 }
31272
31273@@ -547,7 +547,7 @@ static ssize_t lis3lv02d_misc_read(struc
31274 add_wait_queue(&lis3_dev.misc_wait, &wait);
31275 while (true) {
31276 set_current_state(TASK_INTERRUPTIBLE);
31277- data = atomic_xchg(&lis3_dev.count, 0);
31278+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
31279 if (data)
31280 break;
31281
31282@@ -585,7 +585,7 @@ out:
31283 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
31284 {
31285 poll_wait(file, &lis3_dev.misc_wait, wait);
31286- if (atomic_read(&lis3_dev.count))
31287+ if (atomic_read_unchecked(&lis3_dev.count))
31288 return POLLIN | POLLRDNORM;
31289 return 0;
31290 }
31291diff -urNp linux-3.1.4/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.1.4/drivers/misc/lis3lv02d/lis3lv02d.h
31292--- linux-3.1.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-11-11 15:19:27.000000000 -0500
31293+++ linux-3.1.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-11-16 18:39:07.000000000 -0500
31294@@ -265,7 +265,7 @@ struct lis3lv02d {
31295 struct input_polled_dev *idev; /* input device */
31296 struct platform_device *pdev; /* platform device */
31297 struct regulator_bulk_data regulators[2];
31298- atomic_t count; /* interrupt count after last read */
31299+ atomic_unchecked_t count; /* interrupt count after last read */
31300 union axis_conversion ac; /* hw -> logical axis */
31301 int mapped_btns[3];
31302
31303diff -urNp linux-3.1.4/drivers/misc/sgi-gru/gruhandles.c linux-3.1.4/drivers/misc/sgi-gru/gruhandles.c
31304--- linux-3.1.4/drivers/misc/sgi-gru/gruhandles.c 2011-11-11 15:19:27.000000000 -0500
31305+++ linux-3.1.4/drivers/misc/sgi-gru/gruhandles.c 2011-11-16 18:39:07.000000000 -0500
31306@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
31307 unsigned long nsec;
31308
31309 nsec = CLKS2NSEC(clks);
31310- atomic_long_inc(&mcs_op_statistics[op].count);
31311- atomic_long_add(nsec, &mcs_op_statistics[op].total);
31312+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
31313+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
31314 if (mcs_op_statistics[op].max < nsec)
31315 mcs_op_statistics[op].max = nsec;
31316 }
31317diff -urNp linux-3.1.4/drivers/misc/sgi-gru/gruprocfs.c linux-3.1.4/drivers/misc/sgi-gru/gruprocfs.c
31318--- linux-3.1.4/drivers/misc/sgi-gru/gruprocfs.c 2011-11-11 15:19:27.000000000 -0500
31319+++ linux-3.1.4/drivers/misc/sgi-gru/gruprocfs.c 2011-11-16 18:39:07.000000000 -0500
31320@@ -32,9 +32,9 @@
31321
31322 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
31323
31324-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
31325+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
31326 {
31327- unsigned long val = atomic_long_read(v);
31328+ unsigned long val = atomic_long_read_unchecked(v);
31329
31330 seq_printf(s, "%16lu %s\n", val, id);
31331 }
31332@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
31333
31334 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
31335 for (op = 0; op < mcsop_last; op++) {
31336- count = atomic_long_read(&mcs_op_statistics[op].count);
31337- total = atomic_long_read(&mcs_op_statistics[op].total);
31338+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
31339+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
31340 max = mcs_op_statistics[op].max;
31341 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
31342 count ? total / count : 0, max);
31343diff -urNp linux-3.1.4/drivers/misc/sgi-gru/grutables.h linux-3.1.4/drivers/misc/sgi-gru/grutables.h
31344--- linux-3.1.4/drivers/misc/sgi-gru/grutables.h 2011-11-11 15:19:27.000000000 -0500
31345+++ linux-3.1.4/drivers/misc/sgi-gru/grutables.h 2011-11-16 18:39:07.000000000 -0500
31346@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
31347 * GRU statistics.
31348 */
31349 struct gru_stats_s {
31350- atomic_long_t vdata_alloc;
31351- atomic_long_t vdata_free;
31352- atomic_long_t gts_alloc;
31353- atomic_long_t gts_free;
31354- atomic_long_t gms_alloc;
31355- atomic_long_t gms_free;
31356- atomic_long_t gts_double_allocate;
31357- atomic_long_t assign_context;
31358- atomic_long_t assign_context_failed;
31359- atomic_long_t free_context;
31360- atomic_long_t load_user_context;
31361- atomic_long_t load_kernel_context;
31362- atomic_long_t lock_kernel_context;
31363- atomic_long_t unlock_kernel_context;
31364- atomic_long_t steal_user_context;
31365- atomic_long_t steal_kernel_context;
31366- atomic_long_t steal_context_failed;
31367- atomic_long_t nopfn;
31368- atomic_long_t asid_new;
31369- atomic_long_t asid_next;
31370- atomic_long_t asid_wrap;
31371- atomic_long_t asid_reuse;
31372- atomic_long_t intr;
31373- atomic_long_t intr_cbr;
31374- atomic_long_t intr_tfh;
31375- atomic_long_t intr_spurious;
31376- atomic_long_t intr_mm_lock_failed;
31377- atomic_long_t call_os;
31378- atomic_long_t call_os_wait_queue;
31379- atomic_long_t user_flush_tlb;
31380- atomic_long_t user_unload_context;
31381- atomic_long_t user_exception;
31382- atomic_long_t set_context_option;
31383- atomic_long_t check_context_retarget_intr;
31384- atomic_long_t check_context_unload;
31385- atomic_long_t tlb_dropin;
31386- atomic_long_t tlb_preload_page;
31387- atomic_long_t tlb_dropin_fail_no_asid;
31388- atomic_long_t tlb_dropin_fail_upm;
31389- atomic_long_t tlb_dropin_fail_invalid;
31390- atomic_long_t tlb_dropin_fail_range_active;
31391- atomic_long_t tlb_dropin_fail_idle;
31392- atomic_long_t tlb_dropin_fail_fmm;
31393- atomic_long_t tlb_dropin_fail_no_exception;
31394- atomic_long_t tfh_stale_on_fault;
31395- atomic_long_t mmu_invalidate_range;
31396- atomic_long_t mmu_invalidate_page;
31397- atomic_long_t flush_tlb;
31398- atomic_long_t flush_tlb_gru;
31399- atomic_long_t flush_tlb_gru_tgh;
31400- atomic_long_t flush_tlb_gru_zero_asid;
31401-
31402- atomic_long_t copy_gpa;
31403- atomic_long_t read_gpa;
31404-
31405- atomic_long_t mesq_receive;
31406- atomic_long_t mesq_receive_none;
31407- atomic_long_t mesq_send;
31408- atomic_long_t mesq_send_failed;
31409- atomic_long_t mesq_noop;
31410- atomic_long_t mesq_send_unexpected_error;
31411- atomic_long_t mesq_send_lb_overflow;
31412- atomic_long_t mesq_send_qlimit_reached;
31413- atomic_long_t mesq_send_amo_nacked;
31414- atomic_long_t mesq_send_put_nacked;
31415- atomic_long_t mesq_page_overflow;
31416- atomic_long_t mesq_qf_locked;
31417- atomic_long_t mesq_qf_noop_not_full;
31418- atomic_long_t mesq_qf_switch_head_failed;
31419- atomic_long_t mesq_qf_unexpected_error;
31420- atomic_long_t mesq_noop_unexpected_error;
31421- atomic_long_t mesq_noop_lb_overflow;
31422- atomic_long_t mesq_noop_qlimit_reached;
31423- atomic_long_t mesq_noop_amo_nacked;
31424- atomic_long_t mesq_noop_put_nacked;
31425- atomic_long_t mesq_noop_page_overflow;
31426+ atomic_long_unchecked_t vdata_alloc;
31427+ atomic_long_unchecked_t vdata_free;
31428+ atomic_long_unchecked_t gts_alloc;
31429+ atomic_long_unchecked_t gts_free;
31430+ atomic_long_unchecked_t gms_alloc;
31431+ atomic_long_unchecked_t gms_free;
31432+ atomic_long_unchecked_t gts_double_allocate;
31433+ atomic_long_unchecked_t assign_context;
31434+ atomic_long_unchecked_t assign_context_failed;
31435+ atomic_long_unchecked_t free_context;
31436+ atomic_long_unchecked_t load_user_context;
31437+ atomic_long_unchecked_t load_kernel_context;
31438+ atomic_long_unchecked_t lock_kernel_context;
31439+ atomic_long_unchecked_t unlock_kernel_context;
31440+ atomic_long_unchecked_t steal_user_context;
31441+ atomic_long_unchecked_t steal_kernel_context;
31442+ atomic_long_unchecked_t steal_context_failed;
31443+ atomic_long_unchecked_t nopfn;
31444+ atomic_long_unchecked_t asid_new;
31445+ atomic_long_unchecked_t asid_next;
31446+ atomic_long_unchecked_t asid_wrap;
31447+ atomic_long_unchecked_t asid_reuse;
31448+ atomic_long_unchecked_t intr;
31449+ atomic_long_unchecked_t intr_cbr;
31450+ atomic_long_unchecked_t intr_tfh;
31451+ atomic_long_unchecked_t intr_spurious;
31452+ atomic_long_unchecked_t intr_mm_lock_failed;
31453+ atomic_long_unchecked_t call_os;
31454+ atomic_long_unchecked_t call_os_wait_queue;
31455+ atomic_long_unchecked_t user_flush_tlb;
31456+ atomic_long_unchecked_t user_unload_context;
31457+ atomic_long_unchecked_t user_exception;
31458+ atomic_long_unchecked_t set_context_option;
31459+ atomic_long_unchecked_t check_context_retarget_intr;
31460+ atomic_long_unchecked_t check_context_unload;
31461+ atomic_long_unchecked_t tlb_dropin;
31462+ atomic_long_unchecked_t tlb_preload_page;
31463+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
31464+ atomic_long_unchecked_t tlb_dropin_fail_upm;
31465+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
31466+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
31467+ atomic_long_unchecked_t tlb_dropin_fail_idle;
31468+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
31469+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
31470+ atomic_long_unchecked_t tfh_stale_on_fault;
31471+ atomic_long_unchecked_t mmu_invalidate_range;
31472+ atomic_long_unchecked_t mmu_invalidate_page;
31473+ atomic_long_unchecked_t flush_tlb;
31474+ atomic_long_unchecked_t flush_tlb_gru;
31475+ atomic_long_unchecked_t flush_tlb_gru_tgh;
31476+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
31477+
31478+ atomic_long_unchecked_t copy_gpa;
31479+ atomic_long_unchecked_t read_gpa;
31480+
31481+ atomic_long_unchecked_t mesq_receive;
31482+ atomic_long_unchecked_t mesq_receive_none;
31483+ atomic_long_unchecked_t mesq_send;
31484+ atomic_long_unchecked_t mesq_send_failed;
31485+ atomic_long_unchecked_t mesq_noop;
31486+ atomic_long_unchecked_t mesq_send_unexpected_error;
31487+ atomic_long_unchecked_t mesq_send_lb_overflow;
31488+ atomic_long_unchecked_t mesq_send_qlimit_reached;
31489+ atomic_long_unchecked_t mesq_send_amo_nacked;
31490+ atomic_long_unchecked_t mesq_send_put_nacked;
31491+ atomic_long_unchecked_t mesq_page_overflow;
31492+ atomic_long_unchecked_t mesq_qf_locked;
31493+ atomic_long_unchecked_t mesq_qf_noop_not_full;
31494+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
31495+ atomic_long_unchecked_t mesq_qf_unexpected_error;
31496+ atomic_long_unchecked_t mesq_noop_unexpected_error;
31497+ atomic_long_unchecked_t mesq_noop_lb_overflow;
31498+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
31499+ atomic_long_unchecked_t mesq_noop_amo_nacked;
31500+ atomic_long_unchecked_t mesq_noop_put_nacked;
31501+ atomic_long_unchecked_t mesq_noop_page_overflow;
31502
31503 };
31504
31505@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
31506 tghop_invalidate, mcsop_last};
31507
31508 struct mcs_op_statistic {
31509- atomic_long_t count;
31510- atomic_long_t total;
31511+ atomic_long_unchecked_t count;
31512+ atomic_long_unchecked_t total;
31513 unsigned long max;
31514 };
31515
31516@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
31517
31518 #define STAT(id) do { \
31519 if (gru_options & OPT_STATS) \
31520- atomic_long_inc(&gru_stats.id); \
31521+ atomic_long_inc_unchecked(&gru_stats.id); \
31522 } while (0)
31523
31524 #ifdef CONFIG_SGI_GRU_DEBUG
31525diff -urNp linux-3.1.4/drivers/misc/sgi-xp/xpc.h linux-3.1.4/drivers/misc/sgi-xp/xpc.h
31526--- linux-3.1.4/drivers/misc/sgi-xp/xpc.h 2011-11-11 15:19:27.000000000 -0500
31527+++ linux-3.1.4/drivers/misc/sgi-xp/xpc.h 2011-11-16 18:39:07.000000000 -0500
31528@@ -835,6 +835,7 @@ struct xpc_arch_operations {
31529 void (*received_payload) (struct xpc_channel *, void *);
31530 void (*notify_senders_of_disconnect) (struct xpc_channel *);
31531 };
31532+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
31533
31534 /* struct xpc_partition act_state values (for XPC HB) */
31535
31536@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_regis
31537 /* found in xpc_main.c */
31538 extern struct device *xpc_part;
31539 extern struct device *xpc_chan;
31540-extern struct xpc_arch_operations xpc_arch_ops;
31541+extern xpc_arch_operations_no_const xpc_arch_ops;
31542 extern int xpc_disengage_timelimit;
31543 extern int xpc_disengage_timedout;
31544 extern int xpc_activate_IRQ_rcvd;
31545diff -urNp linux-3.1.4/drivers/misc/sgi-xp/xpc_main.c linux-3.1.4/drivers/misc/sgi-xp/xpc_main.c
31546--- linux-3.1.4/drivers/misc/sgi-xp/xpc_main.c 2011-11-11 15:19:27.000000000 -0500
31547+++ linux-3.1.4/drivers/misc/sgi-xp/xpc_main.c 2011-11-16 18:39:07.000000000 -0500
31548@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_not
31549 .notifier_call = xpc_system_die,
31550 };
31551
31552-struct xpc_arch_operations xpc_arch_ops;
31553+xpc_arch_operations_no_const xpc_arch_ops;
31554
31555 /*
31556 * Timer function to enforce the timelimit on the partition disengage.
31557diff -urNp linux-3.1.4/drivers/misc/sgi-xp/xp.h linux-3.1.4/drivers/misc/sgi-xp/xp.h
31558--- linux-3.1.4/drivers/misc/sgi-xp/xp.h 2011-11-11 15:19:27.000000000 -0500
31559+++ linux-3.1.4/drivers/misc/sgi-xp/xp.h 2011-11-16 18:39:07.000000000 -0500
31560@@ -289,7 +289,7 @@ struct xpc_interface {
31561 xpc_notify_func, void *);
31562 void (*received) (short, int, void *);
31563 enum xp_retval (*partid_to_nasids) (short, void *);
31564-};
31565+} __no_const;
31566
31567 extern struct xpc_interface xpc_interface;
31568
31569diff -urNp linux-3.1.4/drivers/mmc/host/sdhci-pci.c linux-3.1.4/drivers/mmc/host/sdhci-pci.c
31570--- linux-3.1.4/drivers/mmc/host/sdhci-pci.c 2011-11-11 15:19:27.000000000 -0500
31571+++ linux-3.1.4/drivers/mmc/host/sdhci-pci.c 2011-11-16 18:39:07.000000000 -0500
31572@@ -542,7 +542,7 @@ static const struct sdhci_pci_fixes sdhc
31573 .probe = via_probe,
31574 };
31575
31576-static const struct pci_device_id pci_ids[] __devinitdata = {
31577+static const struct pci_device_id pci_ids[] __devinitconst = {
31578 {
31579 .vendor = PCI_VENDOR_ID_RICOH,
31580 .device = PCI_DEVICE_ID_RICOH_R5C822,
31581diff -urNp linux-3.1.4/drivers/mtd/chips/cfi_cmdset_0001.c linux-3.1.4/drivers/mtd/chips/cfi_cmdset_0001.c
31582--- linux-3.1.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-11-11 15:19:27.000000000 -0500
31583+++ linux-3.1.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-11-16 18:40:10.000000000 -0500
31584@@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
31585 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
31586 unsigned long timeo = jiffies + HZ;
31587
31588+ pax_track_stack();
31589+
31590 /* Prevent setting state FL_SYNCING for chip in suspended state. */
31591 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
31592 goto sleep;
31593@@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(stru
31594 unsigned long initial_adr;
31595 int initial_len = len;
31596
31597+ pax_track_stack();
31598+
31599 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
31600 adr += chip->start;
31601 initial_adr = adr;
31602@@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(st
31603 int retries = 3;
31604 int ret;
31605
31606+ pax_track_stack();
31607+
31608 adr += chip->start;
31609
31610 retry:
31611diff -urNp linux-3.1.4/drivers/mtd/chips/cfi_cmdset_0020.c linux-3.1.4/drivers/mtd/chips/cfi_cmdset_0020.c
31612--- linux-3.1.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-11-11 15:19:27.000000000 -0500
31613+++ linux-3.1.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-11-16 18:40:10.000000000 -0500
31614@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
31615 unsigned long cmd_addr;
31616 struct cfi_private *cfi = map->fldrv_priv;
31617
31618+ pax_track_stack();
31619+
31620 adr += chip->start;
31621
31622 /* Ensure cmd read/writes are aligned. */
31623@@ -429,6 +431,8 @@ static inline int do_write_buffer(struct
31624 DECLARE_WAITQUEUE(wait, current);
31625 int wbufsize, z;
31626
31627+ pax_track_stack();
31628+
31629 /* M58LW064A requires bus alignment for buffer wriets -- saw */
31630 if (adr & (map_bankwidth(map)-1))
31631 return -EINVAL;
31632@@ -743,6 +747,8 @@ static inline int do_erase_oneblock(stru
31633 DECLARE_WAITQUEUE(wait, current);
31634 int ret = 0;
31635
31636+ pax_track_stack();
31637+
31638 adr += chip->start;
31639
31640 /* Let's determine this according to the interleave only once */
31641@@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struc
31642 unsigned long timeo = jiffies + HZ;
31643 DECLARE_WAITQUEUE(wait, current);
31644
31645+ pax_track_stack();
31646+
31647 adr += chip->start;
31648
31649 /* Let's determine this according to the interleave only once */
31650@@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(str
31651 unsigned long timeo = jiffies + HZ;
31652 DECLARE_WAITQUEUE(wait, current);
31653
31654+ pax_track_stack();
31655+
31656 adr += chip->start;
31657
31658 /* Let's determine this according to the interleave only once */
31659diff -urNp linux-3.1.4/drivers/mtd/devices/doc2000.c linux-3.1.4/drivers/mtd/devices/doc2000.c
31660--- linux-3.1.4/drivers/mtd/devices/doc2000.c 2011-11-11 15:19:27.000000000 -0500
31661+++ linux-3.1.4/drivers/mtd/devices/doc2000.c 2011-11-16 18:39:07.000000000 -0500
31662@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
31663
31664 /* The ECC will not be calculated correctly if less than 512 is written */
31665 /* DBB-
31666- if (len != 0x200 && eccbuf)
31667+ if (len != 0x200)
31668 printk(KERN_WARNING
31669 "ECC needs a full sector write (adr: %lx size %lx)\n",
31670 (long) to, (long) len);
31671diff -urNp linux-3.1.4/drivers/mtd/devices/doc2001.c linux-3.1.4/drivers/mtd/devices/doc2001.c
31672--- linux-3.1.4/drivers/mtd/devices/doc2001.c 2011-11-11 15:19:27.000000000 -0500
31673+++ linux-3.1.4/drivers/mtd/devices/doc2001.c 2011-11-16 18:39:07.000000000 -0500
31674@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
31675 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
31676
31677 /* Don't allow read past end of device */
31678- if (from >= this->totlen)
31679+ if (from >= this->totlen || !len)
31680 return -EINVAL;
31681
31682 /* Don't allow a single read to cross a 512-byte block boundary */
31683diff -urNp linux-3.1.4/drivers/mtd/ftl.c linux-3.1.4/drivers/mtd/ftl.c
31684--- linux-3.1.4/drivers/mtd/ftl.c 2011-11-11 15:19:27.000000000 -0500
31685+++ linux-3.1.4/drivers/mtd/ftl.c 2011-11-16 18:40:10.000000000 -0500
31686@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
31687 loff_t offset;
31688 uint16_t srcunitswap = cpu_to_le16(srcunit);
31689
31690+ pax_track_stack();
31691+
31692 eun = &part->EUNInfo[srcunit];
31693 xfer = &part->XferInfo[xferunit];
31694 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
31695diff -urNp linux-3.1.4/drivers/mtd/inftlcore.c linux-3.1.4/drivers/mtd/inftlcore.c
31696--- linux-3.1.4/drivers/mtd/inftlcore.c 2011-11-11 15:19:27.000000000 -0500
31697+++ linux-3.1.4/drivers/mtd/inftlcore.c 2011-11-16 18:40:10.000000000 -0500
31698@@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
31699 struct inftl_oob oob;
31700 size_t retlen;
31701
31702+ pax_track_stack();
31703+
31704 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
31705 "pending=%d)\n", inftl, thisVUC, pendingblock);
31706
31707diff -urNp linux-3.1.4/drivers/mtd/inftlmount.c linux-3.1.4/drivers/mtd/inftlmount.c
31708--- linux-3.1.4/drivers/mtd/inftlmount.c 2011-11-11 15:19:27.000000000 -0500
31709+++ linux-3.1.4/drivers/mtd/inftlmount.c 2011-11-16 18:40:10.000000000 -0500
31710@@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
31711 struct INFTLPartition *ip;
31712 size_t retlen;
31713
31714+ pax_track_stack();
31715+
31716 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
31717
31718 /*
31719diff -urNp linux-3.1.4/drivers/mtd/lpddr/qinfo_probe.c linux-3.1.4/drivers/mtd/lpddr/qinfo_probe.c
31720--- linux-3.1.4/drivers/mtd/lpddr/qinfo_probe.c 2011-11-11 15:19:27.000000000 -0500
31721+++ linux-3.1.4/drivers/mtd/lpddr/qinfo_probe.c 2011-11-16 18:40:10.000000000 -0500
31722@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
31723 {
31724 map_word pfow_val[4];
31725
31726+ pax_track_stack();
31727+
31728 /* Check identification string */
31729 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
31730 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
31731diff -urNp linux-3.1.4/drivers/mtd/mtdchar.c linux-3.1.4/drivers/mtd/mtdchar.c
31732--- linux-3.1.4/drivers/mtd/mtdchar.c 2011-11-11 15:19:27.000000000 -0500
31733+++ linux-3.1.4/drivers/mtd/mtdchar.c 2011-11-16 18:40:10.000000000 -0500
31734@@ -554,6 +554,8 @@ static int mtd_ioctl(struct file *file,
31735 u_long size;
31736 struct mtd_info_user info;
31737
31738+ pax_track_stack();
31739+
31740 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
31741
31742 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
31743diff -urNp linux-3.1.4/drivers/mtd/nand/denali.c linux-3.1.4/drivers/mtd/nand/denali.c
31744--- linux-3.1.4/drivers/mtd/nand/denali.c 2011-11-11 15:19:27.000000000 -0500
31745+++ linux-3.1.4/drivers/mtd/nand/denali.c 2011-11-16 18:39:07.000000000 -0500
31746@@ -26,6 +26,7 @@
31747 #include <linux/pci.h>
31748 #include <linux/mtd/mtd.h>
31749 #include <linux/module.h>
31750+#include <linux/slab.h>
31751
31752 #include "denali.h"
31753
31754diff -urNp linux-3.1.4/drivers/mtd/nftlcore.c linux-3.1.4/drivers/mtd/nftlcore.c
31755--- linux-3.1.4/drivers/mtd/nftlcore.c 2011-11-11 15:19:27.000000000 -0500
31756+++ linux-3.1.4/drivers/mtd/nftlcore.c 2011-11-16 18:40:10.000000000 -0500
31757@@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
31758 int inplace = 1;
31759 size_t retlen;
31760
31761+ pax_track_stack();
31762+
31763 memset(BlockMap, 0xff, sizeof(BlockMap));
31764 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
31765
31766diff -urNp linux-3.1.4/drivers/mtd/nftlmount.c linux-3.1.4/drivers/mtd/nftlmount.c
31767--- linux-3.1.4/drivers/mtd/nftlmount.c 2011-11-11 15:19:27.000000000 -0500
31768+++ linux-3.1.4/drivers/mtd/nftlmount.c 2011-11-16 18:40:10.000000000 -0500
31769@@ -24,6 +24,7 @@
31770 #include <asm/errno.h>
31771 #include <linux/delay.h>
31772 #include <linux/slab.h>
31773+#include <linux/sched.h>
31774 #include <linux/mtd/mtd.h>
31775 #include <linux/mtd/nand.h>
31776 #include <linux/mtd/nftl.h>
31777@@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
31778 struct mtd_info *mtd = nftl->mbd.mtd;
31779 unsigned int i;
31780
31781+ pax_track_stack();
31782+
31783 /* Assume logical EraseSize == physical erasesize for starting the scan.
31784 We'll sort it out later if we find a MediaHeader which says otherwise */
31785 /* Actually, we won't. The new DiskOnChip driver has already scanned
31786diff -urNp linux-3.1.4/drivers/mtd/ubi/build.c linux-3.1.4/drivers/mtd/ubi/build.c
31787--- linux-3.1.4/drivers/mtd/ubi/build.c 2011-11-11 15:19:27.000000000 -0500
31788+++ linux-3.1.4/drivers/mtd/ubi/build.c 2011-11-16 18:39:07.000000000 -0500
31789@@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
31790 static int __init bytes_str_to_int(const char *str)
31791 {
31792 char *endp;
31793- unsigned long result;
31794+ unsigned long result, scale = 1;
31795
31796 result = simple_strtoul(str, &endp, 0);
31797 if (str == endp || result >= INT_MAX) {
31798@@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const
31799
31800 switch (*endp) {
31801 case 'G':
31802- result *= 1024;
31803+ scale *= 1024;
31804 case 'M':
31805- result *= 1024;
31806+ scale *= 1024;
31807 case 'K':
31808- result *= 1024;
31809+ scale *= 1024;
31810 if (endp[1] == 'i' && endp[2] == 'B')
31811 endp += 2;
31812 case '\0':
31813@@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const
31814 return -EINVAL;
31815 }
31816
31817- return result;
31818+ if ((intoverflow_t)result*scale >= INT_MAX) {
31819+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
31820+ str);
31821+ return -EINVAL;
31822+ }
31823+
31824+ return result*scale;
31825 }
31826
31827 /**
31828diff -urNp linux-3.1.4/drivers/net/atlx/atl2.c linux-3.1.4/drivers/net/atlx/atl2.c
31829--- linux-3.1.4/drivers/net/atlx/atl2.c 2011-11-11 15:19:27.000000000 -0500
31830+++ linux-3.1.4/drivers/net/atlx/atl2.c 2011-11-16 18:39:07.000000000 -0500
31831@@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw
31832 */
31833
31834 #define ATL2_PARAM(X, desc) \
31835- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
31836+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
31837 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
31838 MODULE_PARM_DESC(X, desc);
31839 #else
31840diff -urNp linux-3.1.4/drivers/net/bna/bfa_ioc_ct.c linux-3.1.4/drivers/net/bna/bfa_ioc_ct.c
31841--- linux-3.1.4/drivers/net/bna/bfa_ioc_ct.c 2011-11-11 15:19:27.000000000 -0500
31842+++ linux-3.1.4/drivers/net/bna/bfa_ioc_ct.c 2011-11-16 18:39:07.000000000 -0500
31843@@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
31844 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
31845 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
31846
31847-static struct bfa_ioc_hwif nw_hwif_ct;
31848+static struct bfa_ioc_hwif nw_hwif_ct = {
31849+ .ioc_pll_init = bfa_ioc_ct_pll_init,
31850+ .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
31851+ .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
31852+ .ioc_reg_init = bfa_ioc_ct_reg_init,
31853+ .ioc_map_port = bfa_ioc_ct_map_port,
31854+ .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
31855+ .ioc_notify_fail = bfa_ioc_ct_notify_fail,
31856+ .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
31857+ .ioc_sync_start = bfa_ioc_ct_sync_start,
31858+ .ioc_sync_join = bfa_ioc_ct_sync_join,
31859+ .ioc_sync_leave = bfa_ioc_ct_sync_leave,
31860+ .ioc_sync_ack = bfa_ioc_ct_sync_ack,
31861+ .ioc_sync_complete = bfa_ioc_ct_sync_complete
31862+};
31863
31864 /**
31865 * Called from bfa_ioc_attach() to map asic specific calls.
31866@@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
31867 void
31868 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
31869 {
31870- nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
31871- nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
31872- nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
31873- nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
31874- nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
31875- nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
31876- nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
31877- nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
31878- nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
31879- nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
31880- nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
31881- nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
31882- nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
31883-
31884 ioc->ioc_hwif = &nw_hwif_ct;
31885 }
31886
31887diff -urNp linux-3.1.4/drivers/net/bna/bnad.c linux-3.1.4/drivers/net/bna/bnad.c
31888--- linux-3.1.4/drivers/net/bna/bnad.c 2011-11-11 15:19:27.000000000 -0500
31889+++ linux-3.1.4/drivers/net/bna/bnad.c 2011-11-16 18:39:07.000000000 -0500
31890@@ -1673,7 +1673,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
31891 struct bna_intr_info *intr_info =
31892 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
31893 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
31894- struct bna_tx_event_cbfn tx_cbfn;
31895+ static struct bna_tx_event_cbfn tx_cbfn = {
31896+ /* Initialize the tx event handlers */
31897+ .tcb_setup_cbfn = bnad_cb_tcb_setup,
31898+ .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
31899+ .tx_stall_cbfn = bnad_cb_tx_stall,
31900+ .tx_resume_cbfn = bnad_cb_tx_resume,
31901+ .tx_cleanup_cbfn = bnad_cb_tx_cleanup
31902+ };
31903 struct bna_tx *tx;
31904 unsigned long flags;
31905
31906@@ -1682,13 +1689,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
31907 tx_config->txq_depth = bnad->txq_depth;
31908 tx_config->tx_type = BNA_TX_T_REGULAR;
31909
31910- /* Initialize the tx event handlers */
31911- tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
31912- tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
31913- tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
31914- tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
31915- tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
31916-
31917 /* Get BNA's resource requirement for one tx object */
31918 spin_lock_irqsave(&bnad->bna_lock, flags);
31919 bna_tx_res_req(bnad->num_txq_per_tx,
31920@@ -1819,21 +1819,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
31921 struct bna_intr_info *intr_info =
31922 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
31923 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
31924- struct bna_rx_event_cbfn rx_cbfn;
31925+ static struct bna_rx_event_cbfn rx_cbfn = {
31926+ /* Initialize the Rx event handlers */
31927+ .rcb_setup_cbfn = bnad_cb_rcb_setup,
31928+ .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
31929+ .ccb_setup_cbfn = bnad_cb_ccb_setup,
31930+ .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
31931+ .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
31932+ .rx_post_cbfn = bnad_cb_rx_post
31933+ };
31934 struct bna_rx *rx;
31935 unsigned long flags;
31936
31937 /* Initialize the Rx object configuration */
31938 bnad_init_rx_config(bnad, rx_config);
31939
31940- /* Initialize the Rx event handlers */
31941- rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
31942- rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
31943- rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
31944- rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
31945- rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
31946- rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
31947-
31948 /* Get BNA's resource requirement for one Rx object */
31949 spin_lock_irqsave(&bnad->bna_lock, flags);
31950 bna_rx_res_req(rx_config, res_info);
31951diff -urNp linux-3.1.4/drivers/net/bnx2.c linux-3.1.4/drivers/net/bnx2.c
31952--- linux-3.1.4/drivers/net/bnx2.c 2011-11-11 15:19:27.000000000 -0500
31953+++ linux-3.1.4/drivers/net/bnx2.c 2011-11-16 18:40:11.000000000 -0500
31954@@ -5877,6 +5877,8 @@ bnx2_test_nvram(struct bnx2 *bp)
31955 int rc = 0;
31956 u32 magic, csum;
31957
31958+ pax_track_stack();
31959+
31960 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
31961 goto test_nvram_done;
31962
31963diff -urNp linux-3.1.4/drivers/net/bnx2x/bnx2x_ethtool.c linux-3.1.4/drivers/net/bnx2x/bnx2x_ethtool.c
31964--- linux-3.1.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-11-11 15:19:27.000000000 -0500
31965+++ linux-3.1.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-11-16 18:40:11.000000000 -0500
31966@@ -1943,6 +1943,8 @@ static int bnx2x_test_nvram(struct bnx2x
31967 int i, rc;
31968 u32 magic, crc;
31969
31970+ pax_track_stack();
31971+
31972 if (BP_NOMCP(bp))
31973 return 0;
31974
31975diff -urNp linux-3.1.4/drivers/net/bnx2x/bnx2x_sp.h linux-3.1.4/drivers/net/bnx2x/bnx2x_sp.h
31976--- linux-3.1.4/drivers/net/bnx2x/bnx2x_sp.h 2011-11-11 15:19:27.000000000 -0500
31977+++ linux-3.1.4/drivers/net/bnx2x/bnx2x_sp.h 2011-11-16 18:39:07.000000000 -0500
31978@@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj {
31979
31980 int (*wait_comp)(struct bnx2x *bp,
31981 struct bnx2x_rx_mode_ramrod_params *p);
31982-};
31983+} __no_const;
31984
31985 /********************** Set multicast group ***********************************/
31986
31987diff -urNp linux-3.1.4/drivers/net/cxgb3/l2t.h linux-3.1.4/drivers/net/cxgb3/l2t.h
31988--- linux-3.1.4/drivers/net/cxgb3/l2t.h 2011-11-11 15:19:27.000000000 -0500
31989+++ linux-3.1.4/drivers/net/cxgb3/l2t.h 2011-11-16 18:39:07.000000000 -0500
31990@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)
31991 */
31992 struct l2t_skb_cb {
31993 arp_failure_handler_func arp_failure_handler;
31994-};
31995+} __no_const;
31996
31997 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
31998
31999diff -urNp linux-3.1.4/drivers/net/cxgb4/cxgb4_main.c linux-3.1.4/drivers/net/cxgb4/cxgb4_main.c
32000--- linux-3.1.4/drivers/net/cxgb4/cxgb4_main.c 2011-11-11 15:19:27.000000000 -0500
32001+++ linux-3.1.4/drivers/net/cxgb4/cxgb4_main.c 2011-11-16 18:40:22.000000000 -0500
32002@@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct
32003 unsigned int nchan = adap->params.nports;
32004 struct msix_entry entries[MAX_INGQ + 1];
32005
32006+ pax_track_stack();
32007+
32008 for (i = 0; i < ARRAY_SIZE(entries); ++i)
32009 entries[i].entry = i;
32010
32011diff -urNp linux-3.1.4/drivers/net/cxgb4/t4_hw.c linux-3.1.4/drivers/net/cxgb4/t4_hw.c
32012--- linux-3.1.4/drivers/net/cxgb4/t4_hw.c 2011-11-11 15:19:27.000000000 -0500
32013+++ linux-3.1.4/drivers/net/cxgb4/t4_hw.c 2011-11-16 18:40:22.000000000 -0500
32014@@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
32015 u8 vpd[VPD_LEN], csum;
32016 unsigned int vpdr_len, kw_offset, id_len;
32017
32018+ pax_track_stack();
32019+
32020 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
32021 if (ret < 0)
32022 return ret;
32023diff -urNp linux-3.1.4/drivers/net/e1000e/82571.c linux-3.1.4/drivers/net/e1000e/82571.c
32024--- linux-3.1.4/drivers/net/e1000e/82571.c 2011-11-11 15:19:27.000000000 -0500
32025+++ linux-3.1.4/drivers/net/e1000e/82571.c 2011-11-16 18:39:07.000000000 -0500
32026@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
32027 {
32028 struct e1000_hw *hw = &adapter->hw;
32029 struct e1000_mac_info *mac = &hw->mac;
32030- struct e1000_mac_operations *func = &mac->ops;
32031+ e1000_mac_operations_no_const *func = &mac->ops;
32032 u32 swsm = 0;
32033 u32 swsm2 = 0;
32034 bool force_clear_smbi = false;
32035diff -urNp linux-3.1.4/drivers/net/e1000e/es2lan.c linux-3.1.4/drivers/net/e1000e/es2lan.c
32036--- linux-3.1.4/drivers/net/e1000e/es2lan.c 2011-11-11 15:19:27.000000000 -0500
32037+++ linux-3.1.4/drivers/net/e1000e/es2lan.c 2011-11-16 18:39:07.000000000 -0500
32038@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
32039 {
32040 struct e1000_hw *hw = &adapter->hw;
32041 struct e1000_mac_info *mac = &hw->mac;
32042- struct e1000_mac_operations *func = &mac->ops;
32043+ e1000_mac_operations_no_const *func = &mac->ops;
32044
32045 /* Set media type */
32046 switch (adapter->pdev->device) {
32047diff -urNp linux-3.1.4/drivers/net/e1000e/hw.h linux-3.1.4/drivers/net/e1000e/hw.h
32048--- linux-3.1.4/drivers/net/e1000e/hw.h 2011-11-11 15:19:27.000000000 -0500
32049+++ linux-3.1.4/drivers/net/e1000e/hw.h 2011-11-16 18:39:07.000000000 -0500
32050@@ -778,6 +778,7 @@ struct e1000_mac_operations {
32051 void (*write_vfta)(struct e1000_hw *, u32, u32);
32052 s32 (*read_mac_addr)(struct e1000_hw *);
32053 };
32054+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32055
32056 /*
32057 * When to use various PHY register access functions:
32058@@ -818,6 +819,7 @@ struct e1000_phy_operations {
32059 void (*power_up)(struct e1000_hw *);
32060 void (*power_down)(struct e1000_hw *);
32061 };
32062+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32063
32064 /* Function pointers for the NVM. */
32065 struct e1000_nvm_operations {
32066@@ -829,9 +831,10 @@ struct e1000_nvm_operations {
32067 s32 (*validate)(struct e1000_hw *);
32068 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
32069 };
32070+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32071
32072 struct e1000_mac_info {
32073- struct e1000_mac_operations ops;
32074+ e1000_mac_operations_no_const ops;
32075 u8 addr[ETH_ALEN];
32076 u8 perm_addr[ETH_ALEN];
32077
32078@@ -872,7 +875,7 @@ struct e1000_mac_info {
32079 };
32080
32081 struct e1000_phy_info {
32082- struct e1000_phy_operations ops;
32083+ e1000_phy_operations_no_const ops;
32084
32085 enum e1000_phy_type type;
32086
32087@@ -906,7 +909,7 @@ struct e1000_phy_info {
32088 };
32089
32090 struct e1000_nvm_info {
32091- struct e1000_nvm_operations ops;
32092+ e1000_nvm_operations_no_const ops;
32093
32094 enum e1000_nvm_type type;
32095 enum e1000_nvm_override override;
32096diff -urNp linux-3.1.4/drivers/net/fealnx.c linux-3.1.4/drivers/net/fealnx.c
32097--- linux-3.1.4/drivers/net/fealnx.c 2011-11-11 15:19:27.000000000 -0500
32098+++ linux-3.1.4/drivers/net/fealnx.c 2011-11-16 18:39:07.000000000 -0500
32099@@ -150,7 +150,7 @@ struct chip_info {
32100 int flags;
32101 };
32102
32103-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
32104+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
32105 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32106 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
32107 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32108diff -urNp linux-3.1.4/drivers/net/hamradio/6pack.c linux-3.1.4/drivers/net/hamradio/6pack.c
32109--- linux-3.1.4/drivers/net/hamradio/6pack.c 2011-11-11 15:19:27.000000000 -0500
32110+++ linux-3.1.4/drivers/net/hamradio/6pack.c 2011-11-16 18:40:22.000000000 -0500
32111@@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
32112 unsigned char buf[512];
32113 int count1;
32114
32115+ pax_track_stack();
32116+
32117 if (!count)
32118 return;
32119
32120diff -urNp linux-3.1.4/drivers/net/igb/e1000_hw.h linux-3.1.4/drivers/net/igb/e1000_hw.h
32121--- linux-3.1.4/drivers/net/igb/e1000_hw.h 2011-11-11 15:19:27.000000000 -0500
32122+++ linux-3.1.4/drivers/net/igb/e1000_hw.h 2011-11-16 18:39:07.000000000 -0500
32123@@ -314,6 +314,7 @@ struct e1000_mac_operations {
32124 s32 (*read_mac_addr)(struct e1000_hw *);
32125 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
32126 };
32127+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32128
32129 struct e1000_phy_operations {
32130 s32 (*acquire)(struct e1000_hw *);
32131@@ -330,6 +331,7 @@ struct e1000_phy_operations {
32132 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
32133 s32 (*write_reg)(struct e1000_hw *, u32, u16);
32134 };
32135+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32136
32137 struct e1000_nvm_operations {
32138 s32 (*acquire)(struct e1000_hw *);
32139@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
32140 s32 (*update)(struct e1000_hw *);
32141 s32 (*validate)(struct e1000_hw *);
32142 };
32143+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32144
32145 struct e1000_info {
32146 s32 (*get_invariants)(struct e1000_hw *);
32147@@ -350,7 +353,7 @@ struct e1000_info {
32148 extern const struct e1000_info e1000_82575_info;
32149
32150 struct e1000_mac_info {
32151- struct e1000_mac_operations ops;
32152+ e1000_mac_operations_no_const ops;
32153
32154 u8 addr[6];
32155 u8 perm_addr[6];
32156@@ -388,7 +391,7 @@ struct e1000_mac_info {
32157 };
32158
32159 struct e1000_phy_info {
32160- struct e1000_phy_operations ops;
32161+ e1000_phy_operations_no_const ops;
32162
32163 enum e1000_phy_type type;
32164
32165@@ -423,7 +426,7 @@ struct e1000_phy_info {
32166 };
32167
32168 struct e1000_nvm_info {
32169- struct e1000_nvm_operations ops;
32170+ e1000_nvm_operations_no_const ops;
32171 enum e1000_nvm_type type;
32172 enum e1000_nvm_override override;
32173
32174@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
32175 s32 (*check_for_ack)(struct e1000_hw *, u16);
32176 s32 (*check_for_rst)(struct e1000_hw *, u16);
32177 };
32178+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32179
32180 struct e1000_mbx_stats {
32181 u32 msgs_tx;
32182@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
32183 };
32184
32185 struct e1000_mbx_info {
32186- struct e1000_mbx_operations ops;
32187+ e1000_mbx_operations_no_const ops;
32188 struct e1000_mbx_stats stats;
32189 u32 timeout;
32190 u32 usec_delay;
32191diff -urNp linux-3.1.4/drivers/net/igbvf/vf.h linux-3.1.4/drivers/net/igbvf/vf.h
32192--- linux-3.1.4/drivers/net/igbvf/vf.h 2011-11-11 15:19:27.000000000 -0500
32193+++ linux-3.1.4/drivers/net/igbvf/vf.h 2011-11-16 18:39:07.000000000 -0500
32194@@ -189,9 +189,10 @@ struct e1000_mac_operations {
32195 s32 (*read_mac_addr)(struct e1000_hw *);
32196 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
32197 };
32198+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32199
32200 struct e1000_mac_info {
32201- struct e1000_mac_operations ops;
32202+ e1000_mac_operations_no_const ops;
32203 u8 addr[6];
32204 u8 perm_addr[6];
32205
32206@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
32207 s32 (*check_for_ack)(struct e1000_hw *);
32208 s32 (*check_for_rst)(struct e1000_hw *);
32209 };
32210+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32211
32212 struct e1000_mbx_stats {
32213 u32 msgs_tx;
32214@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
32215 };
32216
32217 struct e1000_mbx_info {
32218- struct e1000_mbx_operations ops;
32219+ e1000_mbx_operations_no_const ops;
32220 struct e1000_mbx_stats stats;
32221 u32 timeout;
32222 u32 usec_delay;
32223diff -urNp linux-3.1.4/drivers/net/ixgb/ixgb_main.c linux-3.1.4/drivers/net/ixgb/ixgb_main.c
32224--- linux-3.1.4/drivers/net/ixgb/ixgb_main.c 2011-11-11 15:19:27.000000000 -0500
32225+++ linux-3.1.4/drivers/net/ixgb/ixgb_main.c 2011-11-16 18:40:22.000000000 -0500
32226@@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev
32227 u32 rctl;
32228 int i;
32229
32230+ pax_track_stack();
32231+
32232 /* Check for Promiscuous and All Multicast modes */
32233
32234 rctl = IXGB_READ_REG(hw, RCTL);
32235diff -urNp linux-3.1.4/drivers/net/ixgb/ixgb_param.c linux-3.1.4/drivers/net/ixgb/ixgb_param.c
32236--- linux-3.1.4/drivers/net/ixgb/ixgb_param.c 2011-11-11 15:19:27.000000000 -0500
32237+++ linux-3.1.4/drivers/net/ixgb/ixgb_param.c 2011-11-16 18:40:22.000000000 -0500
32238@@ -261,6 +261,9 @@ void __devinit
32239 ixgb_check_options(struct ixgb_adapter *adapter)
32240 {
32241 int bd = adapter->bd_number;
32242+
32243+ pax_track_stack();
32244+
32245 if (bd >= IXGB_MAX_NIC) {
32246 pr_notice("Warning: no configuration for board #%i\n", bd);
32247 pr_notice("Using defaults for all values\n");
32248diff -urNp linux-3.1.4/drivers/net/ixgbe/ixgbe_type.h linux-3.1.4/drivers/net/ixgbe/ixgbe_type.h
32249--- linux-3.1.4/drivers/net/ixgbe/ixgbe_type.h 2011-11-11 15:19:27.000000000 -0500
32250+++ linux-3.1.4/drivers/net/ixgbe/ixgbe_type.h 2011-11-16 18:39:07.000000000 -0500
32251@@ -2642,6 +2642,7 @@ struct ixgbe_eeprom_operations {
32252 s32 (*update_checksum)(struct ixgbe_hw *);
32253 u16 (*calc_checksum)(struct ixgbe_hw *);
32254 };
32255+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
32256
32257 struct ixgbe_mac_operations {
32258 s32 (*init_hw)(struct ixgbe_hw *);
32259@@ -2703,6 +2704,7 @@ struct ixgbe_mac_operations {
32260 /* Manageability interface */
32261 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
32262 };
32263+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
32264
32265 struct ixgbe_phy_operations {
32266 s32 (*identify)(struct ixgbe_hw *);
32267@@ -2722,9 +2724,10 @@ struct ixgbe_phy_operations {
32268 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
32269 s32 (*check_overtemp)(struct ixgbe_hw *);
32270 };
32271+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
32272
32273 struct ixgbe_eeprom_info {
32274- struct ixgbe_eeprom_operations ops;
32275+ ixgbe_eeprom_operations_no_const ops;
32276 enum ixgbe_eeprom_type type;
32277 u32 semaphore_delay;
32278 u16 word_size;
32279@@ -2734,7 +2737,7 @@ struct ixgbe_eeprom_info {
32280
32281 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
32282 struct ixgbe_mac_info {
32283- struct ixgbe_mac_operations ops;
32284+ ixgbe_mac_operations_no_const ops;
32285 enum ixgbe_mac_type type;
32286 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32287 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32288@@ -2762,7 +2765,7 @@ struct ixgbe_mac_info {
32289 };
32290
32291 struct ixgbe_phy_info {
32292- struct ixgbe_phy_operations ops;
32293+ ixgbe_phy_operations_no_const ops;
32294 struct mdio_if_info mdio;
32295 enum ixgbe_phy_type type;
32296 u32 id;
32297@@ -2790,6 +2793,7 @@ struct ixgbe_mbx_operations {
32298 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
32299 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
32300 };
32301+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
32302
32303 struct ixgbe_mbx_stats {
32304 u32 msgs_tx;
32305@@ -2801,7 +2805,7 @@ struct ixgbe_mbx_stats {
32306 };
32307
32308 struct ixgbe_mbx_info {
32309- struct ixgbe_mbx_operations ops;
32310+ ixgbe_mbx_operations_no_const ops;
32311 struct ixgbe_mbx_stats stats;
32312 u32 timeout;
32313 u32 usec_delay;
32314diff -urNp linux-3.1.4/drivers/net/ixgbevf/vf.h linux-3.1.4/drivers/net/ixgbevf/vf.h
32315--- linux-3.1.4/drivers/net/ixgbevf/vf.h 2011-11-11 15:19:27.000000000 -0500
32316+++ linux-3.1.4/drivers/net/ixgbevf/vf.h 2011-11-16 18:39:07.000000000 -0500
32317@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
32318 s32 (*clear_vfta)(struct ixgbe_hw *);
32319 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
32320 };
32321+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
32322
32323 enum ixgbe_mac_type {
32324 ixgbe_mac_unknown = 0,
32325@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
32326 };
32327
32328 struct ixgbe_mac_info {
32329- struct ixgbe_mac_operations ops;
32330+ ixgbe_mac_operations_no_const ops;
32331 u8 addr[6];
32332 u8 perm_addr[6];
32333
32334@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
32335 s32 (*check_for_ack)(struct ixgbe_hw *);
32336 s32 (*check_for_rst)(struct ixgbe_hw *);
32337 };
32338+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
32339
32340 struct ixgbe_mbx_stats {
32341 u32 msgs_tx;
32342@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
32343 };
32344
32345 struct ixgbe_mbx_info {
32346- struct ixgbe_mbx_operations ops;
32347+ ixgbe_mbx_operations_no_const ops;
32348 struct ixgbe_mbx_stats stats;
32349 u32 timeout;
32350 u32 udelay;
32351diff -urNp linux-3.1.4/drivers/net/ksz884x.c linux-3.1.4/drivers/net/ksz884x.c
32352--- linux-3.1.4/drivers/net/ksz884x.c 2011-11-11 15:19:27.000000000 -0500
32353+++ linux-3.1.4/drivers/net/ksz884x.c 2011-11-16 18:40:22.000000000 -0500
32354@@ -6533,6 +6533,8 @@ static void netdev_get_ethtool_stats(str
32355 int rc;
32356 u64 counter[TOTAL_PORT_COUNTER_NUM];
32357
32358+ pax_track_stack();
32359+
32360 mutex_lock(&hw_priv->lock);
32361 n = SWITCH_PORT_NUM;
32362 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
32363diff -urNp linux-3.1.4/drivers/net/mlx4/main.c linux-3.1.4/drivers/net/mlx4/main.c
32364--- linux-3.1.4/drivers/net/mlx4/main.c 2011-11-11 15:19:27.000000000 -0500
32365+++ linux-3.1.4/drivers/net/mlx4/main.c 2011-11-16 18:40:22.000000000 -0500
32366@@ -40,6 +40,7 @@
32367 #include <linux/dma-mapping.h>
32368 #include <linux/slab.h>
32369 #include <linux/io-mapping.h>
32370+#include <linux/sched.h>
32371
32372 #include <linux/mlx4/device.h>
32373 #include <linux/mlx4/doorbell.h>
32374@@ -762,6 +763,8 @@ static int mlx4_init_hca(struct mlx4_dev
32375 u64 icm_size;
32376 int err;
32377
32378+ pax_track_stack();
32379+
32380 err = mlx4_QUERY_FW(dev);
32381 if (err) {
32382 if (err == -EACCES)
32383diff -urNp linux-3.1.4/drivers/net/niu.c linux-3.1.4/drivers/net/niu.c
32384--- linux-3.1.4/drivers/net/niu.c 2011-11-11 15:19:27.000000000 -0500
32385+++ linux-3.1.4/drivers/net/niu.c 2011-11-16 18:40:22.000000000 -0500
32386@@ -9061,6 +9061,8 @@ static void __devinit niu_try_msix(struc
32387 int i, num_irqs, err;
32388 u8 first_ldg;
32389
32390+ pax_track_stack();
32391+
32392 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
32393 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
32394 ldg_num_map[i] = first_ldg + i;
32395diff -urNp linux-3.1.4/drivers/net/pcnet32.c linux-3.1.4/drivers/net/pcnet32.c
32396--- linux-3.1.4/drivers/net/pcnet32.c 2011-11-11 15:19:27.000000000 -0500
32397+++ linux-3.1.4/drivers/net/pcnet32.c 2011-11-16 18:39:07.000000000 -0500
32398@@ -270,7 +270,7 @@ struct pcnet32_private {
32399 struct sk_buff **rx_skbuff;
32400 dma_addr_t *tx_dma_addr;
32401 dma_addr_t *rx_dma_addr;
32402- struct pcnet32_access a;
32403+ struct pcnet32_access *a;
32404 spinlock_t lock; /* Guard lock */
32405 unsigned int cur_rx, cur_tx; /* The next free ring entry */
32406 unsigned int rx_ring_size; /* current rx ring size */
32407@@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
32408 u16 val;
32409
32410 netif_wake_queue(dev);
32411- val = lp->a.read_csr(ioaddr, CSR3);
32412+ val = lp->a->read_csr(ioaddr, CSR3);
32413 val &= 0x00ff;
32414- lp->a.write_csr(ioaddr, CSR3, val);
32415+ lp->a->write_csr(ioaddr, CSR3, val);
32416 napi_enable(&lp->napi);
32417 }
32418
32419@@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
32420 r = mii_link_ok(&lp->mii_if);
32421 } else if (lp->chip_version >= PCNET32_79C970A) {
32422 ulong ioaddr = dev->base_addr; /* card base I/O address */
32423- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
32424+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
32425 } else { /* can not detect link on really old chips */
32426 r = 1;
32427 }
32428@@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
32429 pcnet32_netif_stop(dev);
32430
32431 spin_lock_irqsave(&lp->lock, flags);
32432- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
32433+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
32434
32435 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
32436
32437@@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
32438 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
32439 {
32440 struct pcnet32_private *lp = netdev_priv(dev);
32441- struct pcnet32_access *a = &lp->a; /* access to registers */
32442+ struct pcnet32_access *a = lp->a; /* access to registers */
32443 ulong ioaddr = dev->base_addr; /* card base I/O address */
32444 struct sk_buff *skb; /* sk buff */
32445 int x, i; /* counters */
32446@@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
32447 pcnet32_netif_stop(dev);
32448
32449 spin_lock_irqsave(&lp->lock, flags);
32450- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
32451+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
32452
32453 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
32454
32455 /* Reset the PCNET32 */
32456- lp->a.reset(ioaddr);
32457- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
32458+ lp->a->reset(ioaddr);
32459+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
32460
32461 /* switch pcnet32 to 32bit mode */
32462- lp->a.write_bcr(ioaddr, 20, 2);
32463+ lp->a->write_bcr(ioaddr, 20, 2);
32464
32465 /* purge & init rings but don't actually restart */
32466 pcnet32_restart(dev, 0x0000);
32467
32468- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
32469+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
32470
32471 /* Initialize Transmit buffers. */
32472 size = data_len + 15;
32473@@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
32474
32475 /* set int loopback in CSR15 */
32476 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
32477- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
32478+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
32479
32480 teststatus = cpu_to_le16(0x8000);
32481- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
32482+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
32483
32484 /* Check status of descriptors */
32485 for (x = 0; x < numbuffs; x++) {
32486@@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
32487 }
32488 }
32489
32490- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
32491+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
32492 wmb();
32493 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
32494 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
32495@@ -1015,7 +1015,7 @@ clean_up:
32496 pcnet32_restart(dev, CSR0_NORMAL);
32497 } else {
32498 pcnet32_purge_rx_ring(dev);
32499- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
32500+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
32501 }
32502 spin_unlock_irqrestore(&lp->lock, flags);
32503
32504@@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct ne
32505 enum ethtool_phys_id_state state)
32506 {
32507 struct pcnet32_private *lp = netdev_priv(dev);
32508- struct pcnet32_access *a = &lp->a;
32509+ struct pcnet32_access *a = lp->a;
32510 ulong ioaddr = dev->base_addr;
32511 unsigned long flags;
32512 int i;
32513@@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_de
32514 {
32515 int csr5;
32516 struct pcnet32_private *lp = netdev_priv(dev);
32517- struct pcnet32_access *a = &lp->a;
32518+ struct pcnet32_access *a = lp->a;
32519 ulong ioaddr = dev->base_addr;
32520 int ticks;
32521
32522@@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_stru
32523 spin_lock_irqsave(&lp->lock, flags);
32524 if (pcnet32_tx(dev)) {
32525 /* reset the chip to clear the error condition, then restart */
32526- lp->a.reset(ioaddr);
32527- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
32528+ lp->a->reset(ioaddr);
32529+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
32530 pcnet32_restart(dev, CSR0_START);
32531 netif_wake_queue(dev);
32532 }
32533@@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_stru
32534 __napi_complete(napi);
32535
32536 /* clear interrupt masks */
32537- val = lp->a.read_csr(ioaddr, CSR3);
32538+ val = lp->a->read_csr(ioaddr, CSR3);
32539 val &= 0x00ff;
32540- lp->a.write_csr(ioaddr, CSR3, val);
32541+ lp->a->write_csr(ioaddr, CSR3, val);
32542
32543 /* Set interrupt enable. */
32544- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
32545+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
32546
32547 spin_unlock_irqrestore(&lp->lock, flags);
32548 }
32549@@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_
32550 int i, csr0;
32551 u16 *buff = ptr;
32552 struct pcnet32_private *lp = netdev_priv(dev);
32553- struct pcnet32_access *a = &lp->a;
32554+ struct pcnet32_access *a = lp->a;
32555 ulong ioaddr = dev->base_addr;
32556 unsigned long flags;
32557
32558@@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_
32559 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
32560 if (lp->phymask & (1 << j)) {
32561 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
32562- lp->a.write_bcr(ioaddr, 33,
32563+ lp->a->write_bcr(ioaddr, 33,
32564 (j << 5) | i);
32565- *buff++ = lp->a.read_bcr(ioaddr, 34);
32566+ *buff++ = lp->a->read_bcr(ioaddr, 34);
32567 }
32568 }
32569 }
32570@@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int
32571 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
32572 lp->options |= PCNET32_PORT_FD;
32573
32574- lp->a = *a;
32575+ lp->a = a;
32576
32577 /* prior to register_netdev, dev->name is not yet correct */
32578 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
32579@@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int
32580 if (lp->mii) {
32581 /* lp->phycount and lp->phymask are set to 0 by memset above */
32582
32583- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
32584+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
32585 /* scan for PHYs */
32586 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
32587 unsigned short id1, id2;
32588@@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int
32589 pr_info("Found PHY %04x:%04x at address %d\n",
32590 id1, id2, i);
32591 }
32592- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
32593+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
32594 if (lp->phycount > 1)
32595 lp->options |= PCNET32_PORT_MII;
32596 }
32597@@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_devic
32598 }
32599
32600 /* Reset the PCNET32 */
32601- lp->a.reset(ioaddr);
32602+ lp->a->reset(ioaddr);
32603
32604 /* switch pcnet32 to 32bit mode */
32605- lp->a.write_bcr(ioaddr, 20, 2);
32606+ lp->a->write_bcr(ioaddr, 20, 2);
32607
32608 netif_printk(lp, ifup, KERN_DEBUG, dev,
32609 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
32610@@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_devic
32611 (u32) (lp->init_dma_addr));
32612
32613 /* set/reset autoselect bit */
32614- val = lp->a.read_bcr(ioaddr, 2) & ~2;
32615+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
32616 if (lp->options & PCNET32_PORT_ASEL)
32617 val |= 2;
32618- lp->a.write_bcr(ioaddr, 2, val);
32619+ lp->a->write_bcr(ioaddr, 2, val);
32620
32621 /* handle full duplex setting */
32622 if (lp->mii_if.full_duplex) {
32623- val = lp->a.read_bcr(ioaddr, 9) & ~3;
32624+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
32625 if (lp->options & PCNET32_PORT_FD) {
32626 val |= 1;
32627 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
32628@@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_devic
32629 if (lp->chip_version == 0x2627)
32630 val |= 3;
32631 }
32632- lp->a.write_bcr(ioaddr, 9, val);
32633+ lp->a->write_bcr(ioaddr, 9, val);
32634 }
32635
32636 /* set/reset GPSI bit in test register */
32637- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
32638+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
32639 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
32640 val |= 0x10;
32641- lp->a.write_csr(ioaddr, 124, val);
32642+ lp->a->write_csr(ioaddr, 124, val);
32643
32644 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
32645 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
32646@@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_devic
32647 * duplex, and/or enable auto negotiation, and clear DANAS
32648 */
32649 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
32650- lp->a.write_bcr(ioaddr, 32,
32651- lp->a.read_bcr(ioaddr, 32) | 0x0080);
32652+ lp->a->write_bcr(ioaddr, 32,
32653+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
32654 /* disable Auto Negotiation, set 10Mpbs, HD */
32655- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
32656+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
32657 if (lp->options & PCNET32_PORT_FD)
32658 val |= 0x10;
32659 if (lp->options & PCNET32_PORT_100)
32660 val |= 0x08;
32661- lp->a.write_bcr(ioaddr, 32, val);
32662+ lp->a->write_bcr(ioaddr, 32, val);
32663 } else {
32664 if (lp->options & PCNET32_PORT_ASEL) {
32665- lp->a.write_bcr(ioaddr, 32,
32666- lp->a.read_bcr(ioaddr,
32667+ lp->a->write_bcr(ioaddr, 32,
32668+ lp->a->read_bcr(ioaddr,
32669 32) | 0x0080);
32670 /* enable auto negotiate, setup, disable fd */
32671- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
32672+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
32673 val |= 0x20;
32674- lp->a.write_bcr(ioaddr, 32, val);
32675+ lp->a->write_bcr(ioaddr, 32, val);
32676 }
32677 }
32678 } else {
32679@@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_devic
32680 * There is really no good other way to handle multiple PHYs
32681 * other than turning off all automatics
32682 */
32683- val = lp->a.read_bcr(ioaddr, 2);
32684- lp->a.write_bcr(ioaddr, 2, val & ~2);
32685- val = lp->a.read_bcr(ioaddr, 32);
32686- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
32687+ val = lp->a->read_bcr(ioaddr, 2);
32688+ lp->a->write_bcr(ioaddr, 2, val & ~2);
32689+ val = lp->a->read_bcr(ioaddr, 32);
32690+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
32691
32692 if (!(lp->options & PCNET32_PORT_ASEL)) {
32693 /* setup ecmd */
32694@@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_devic
32695 ethtool_cmd_speed_set(&ecmd,
32696 (lp->options & PCNET32_PORT_100) ?
32697 SPEED_100 : SPEED_10);
32698- bcr9 = lp->a.read_bcr(ioaddr, 9);
32699+ bcr9 = lp->a->read_bcr(ioaddr, 9);
32700
32701 if (lp->options & PCNET32_PORT_FD) {
32702 ecmd.duplex = DUPLEX_FULL;
32703@@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_devic
32704 ecmd.duplex = DUPLEX_HALF;
32705 bcr9 |= ~(1 << 0);
32706 }
32707- lp->a.write_bcr(ioaddr, 9, bcr9);
32708+ lp->a->write_bcr(ioaddr, 9, bcr9);
32709 }
32710
32711 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
32712@@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_devic
32713
32714 #ifdef DO_DXSUFLO
32715 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
32716- val = lp->a.read_csr(ioaddr, CSR3);
32717+ val = lp->a->read_csr(ioaddr, CSR3);
32718 val |= 0x40;
32719- lp->a.write_csr(ioaddr, CSR3, val);
32720+ lp->a->write_csr(ioaddr, CSR3, val);
32721 }
32722 #endif
32723
32724@@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_devic
32725 napi_enable(&lp->napi);
32726
32727 /* Re-initialize the PCNET32, and start it when done. */
32728- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
32729- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
32730+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
32731+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
32732
32733- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
32734- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
32735+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
32736+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
32737
32738 netif_start_queue(dev);
32739
32740@@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_devic
32741
32742 i = 0;
32743 while (i++ < 100)
32744- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
32745+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
32746 break;
32747 /*
32748 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
32749 * reports that doing so triggers a bug in the '974.
32750 */
32751- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
32752+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
32753
32754 netif_printk(lp, ifup, KERN_DEBUG, dev,
32755 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
32756 i,
32757 (u32) (lp->init_dma_addr),
32758- lp->a.read_csr(ioaddr, CSR0));
32759+ lp->a->read_csr(ioaddr, CSR0));
32760
32761 spin_unlock_irqrestore(&lp->lock, flags);
32762
32763@@ -2218,7 +2218,7 @@ err_free_ring:
32764 * Switch back to 16bit mode to avoid problems with dumb
32765 * DOS packet driver after a warm reboot
32766 */
32767- lp->a.write_bcr(ioaddr, 20, 4);
32768+ lp->a->write_bcr(ioaddr, 20, 4);
32769
32770 err_free_irq:
32771 spin_unlock_irqrestore(&lp->lock, flags);
32772@@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_d
32773
32774 /* wait for stop */
32775 for (i = 0; i < 100; i++)
32776- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
32777+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
32778 break;
32779
32780 if (i >= 100)
32781@@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_d
32782 return;
32783
32784 /* ReInit Ring */
32785- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
32786+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
32787 i = 0;
32788 while (i++ < 1000)
32789- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
32790+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
32791 break;
32792
32793- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
32794+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
32795 }
32796
32797 static void pcnet32_tx_timeout(struct net_device *dev)
32798@@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct ne
32799 /* Transmitter timeout, serious problems. */
32800 if (pcnet32_debug & NETIF_MSG_DRV)
32801 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
32802- dev->name, lp->a.read_csr(ioaddr, CSR0));
32803- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
32804+ dev->name, lp->a->read_csr(ioaddr, CSR0));
32805+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
32806 dev->stats.tx_errors++;
32807 if (netif_msg_tx_err(lp)) {
32808 int i;
32809@@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(st
32810
32811 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
32812 "%s() called, csr0 %4.4x\n",
32813- __func__, lp->a.read_csr(ioaddr, CSR0));
32814+ __func__, lp->a->read_csr(ioaddr, CSR0));
32815
32816 /* Default status -- will not enable Successful-TxDone
32817 * interrupt when that option is available to us.
32818@@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(st
32819 dev->stats.tx_bytes += skb->len;
32820
32821 /* Trigger an immediate send poll. */
32822- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
32823+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
32824
32825 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
32826 lp->tx_full = 1;
32827@@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
32828
32829 spin_lock(&lp->lock);
32830
32831- csr0 = lp->a.read_csr(ioaddr, CSR0);
32832+ csr0 = lp->a->read_csr(ioaddr, CSR0);
32833 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
32834 if (csr0 == 0xffff)
32835 break; /* PCMCIA remove happened */
32836 /* Acknowledge all of the current interrupt sources ASAP. */
32837- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
32838+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
32839
32840 netif_printk(lp, intr, KERN_DEBUG, dev,
32841 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
32842- csr0, lp->a.read_csr(ioaddr, CSR0));
32843+ csr0, lp->a->read_csr(ioaddr, CSR0));
32844
32845 /* Log misc errors. */
32846 if (csr0 & 0x4000)
32847@@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
32848 if (napi_schedule_prep(&lp->napi)) {
32849 u16 val;
32850 /* set interrupt masks */
32851- val = lp->a.read_csr(ioaddr, CSR3);
32852+ val = lp->a->read_csr(ioaddr, CSR3);
32853 val |= 0x5f00;
32854- lp->a.write_csr(ioaddr, CSR3, val);
32855+ lp->a->write_csr(ioaddr, CSR3, val);
32856
32857 __napi_schedule(&lp->napi);
32858 break;
32859 }
32860- csr0 = lp->a.read_csr(ioaddr, CSR0);
32861+ csr0 = lp->a->read_csr(ioaddr, CSR0);
32862 }
32863
32864 netif_printk(lp, intr, KERN_DEBUG, dev,
32865 "exiting interrupt, csr0=%#4.4x\n",
32866- lp->a.read_csr(ioaddr, CSR0));
32867+ lp->a->read_csr(ioaddr, CSR0));
32868
32869 spin_unlock(&lp->lock);
32870
32871@@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_devi
32872
32873 spin_lock_irqsave(&lp->lock, flags);
32874
32875- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
32876+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
32877
32878 netif_printk(lp, ifdown, KERN_DEBUG, dev,
32879 "Shutting down ethercard, status was %2.2x\n",
32880- lp->a.read_csr(ioaddr, CSR0));
32881+ lp->a->read_csr(ioaddr, CSR0));
32882
32883 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
32884- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
32885+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
32886
32887 /*
32888 * Switch back to 16bit mode to avoid problems with dumb
32889 * DOS packet driver after a warm reboot
32890 */
32891- lp->a.write_bcr(ioaddr, 20, 4);
32892+ lp->a->write_bcr(ioaddr, 20, 4);
32893
32894 spin_unlock_irqrestore(&lp->lock, flags);
32895
32896@@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_
32897 unsigned long flags;
32898
32899 spin_lock_irqsave(&lp->lock, flags);
32900- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
32901+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
32902 spin_unlock_irqrestore(&lp->lock, flags);
32903
32904 return &dev->stats;
32905@@ -2577,10 +2577,10 @@ static void pcnet32_load_multicast(struc
32906 if (dev->flags & IFF_ALLMULTI) {
32907 ib->filter[0] = cpu_to_le32(~0U);
32908 ib->filter[1] = cpu_to_le32(~0U);
32909- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
32910- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
32911- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
32912- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
32913+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
32914+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
32915+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
32916+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
32917 return;
32918 }
32919 /* clear the multicast filter */
32920@@ -2594,7 +2594,7 @@ static void pcnet32_load_multicast(struc
32921 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
32922 }
32923 for (i = 0; i < 4; i++)
32924- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
32925+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
32926 le16_to_cpu(mcast_table[i]));
32927 }
32928
32929@@ -2609,28 +2609,28 @@ static void pcnet32_set_multicast_list(s
32930
32931 spin_lock_irqsave(&lp->lock, flags);
32932 suspended = pcnet32_suspend(dev, &flags, 0);
32933- csr15 = lp->a.read_csr(ioaddr, CSR15);
32934+ csr15 = lp->a->read_csr(ioaddr, CSR15);
32935 if (dev->flags & IFF_PROMISC) {
32936 /* Log any net taps. */
32937 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
32938 lp->init_block->mode =
32939 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
32940 7);
32941- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
32942+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
32943 } else {
32944 lp->init_block->mode =
32945 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
32946- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
32947+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
32948 pcnet32_load_multicast(dev);
32949 }
32950
32951 if (suspended) {
32952 int csr5;
32953 /* clear SUSPEND (SPND) - CSR5 bit 0 */
32954- csr5 = lp->a.read_csr(ioaddr, CSR5);
32955- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
32956+ csr5 = lp->a->read_csr(ioaddr, CSR5);
32957+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
32958 } else {
32959- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
32960+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
32961 pcnet32_restart(dev, CSR0_NORMAL);
32962 netif_wake_queue(dev);
32963 }
32964@@ -2648,8 +2648,8 @@ static int mdio_read(struct net_device *
32965 if (!lp->mii)
32966 return 0;
32967
32968- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
32969- val_out = lp->a.read_bcr(ioaddr, 34);
32970+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
32971+ val_out = lp->a->read_bcr(ioaddr, 34);
32972
32973 return val_out;
32974 }
32975@@ -2663,8 +2663,8 @@ static void mdio_write(struct net_device
32976 if (!lp->mii)
32977 return;
32978
32979- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
32980- lp->a.write_bcr(ioaddr, 34, val);
32981+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
32982+ lp->a->write_bcr(ioaddr, 34, val);
32983 }
32984
32985 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32986@@ -2741,7 +2741,7 @@ static void pcnet32_check_media(struct n
32987 curr_link = mii_link_ok(&lp->mii_if);
32988 } else {
32989 ulong ioaddr = dev->base_addr; /* card base I/O address */
32990- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
32991+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
32992 }
32993 if (!curr_link) {
32994 if (prev_link || verbose) {
32995@@ -2764,13 +2764,13 @@ static void pcnet32_check_media(struct n
32996 (ecmd.duplex == DUPLEX_FULL)
32997 ? "full" : "half");
32998 }
32999- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
33000+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
33001 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
33002 if (lp->mii_if.full_duplex)
33003 bcr9 |= (1 << 0);
33004 else
33005 bcr9 &= ~(1 << 0);
33006- lp->a.write_bcr(dev->base_addr, 9, bcr9);
33007+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
33008 }
33009 } else {
33010 netif_info(lp, link, dev, "link up\n");
33011diff -urNp linux-3.1.4/drivers/net/ppp_generic.c linux-3.1.4/drivers/net/ppp_generic.c
33012--- linux-3.1.4/drivers/net/ppp_generic.c 2011-11-11 15:19:27.000000000 -0500
33013+++ linux-3.1.4/drivers/net/ppp_generic.c 2011-11-16 18:39:07.000000000 -0500
33014@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
33015 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
33016 struct ppp_stats stats;
33017 struct ppp_comp_stats cstats;
33018- char *vers;
33019
33020 switch (cmd) {
33021 case SIOCGPPPSTATS:
33022@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
33023 break;
33024
33025 case SIOCGPPPVER:
33026- vers = PPP_VERSION;
33027- if (copy_to_user(addr, vers, strlen(vers) + 1))
33028+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
33029 break;
33030 err = 0;
33031 break;
33032diff -urNp linux-3.1.4/drivers/net/r8169.c linux-3.1.4/drivers/net/r8169.c
33033--- linux-3.1.4/drivers/net/r8169.c 2011-11-11 15:19:27.000000000 -0500
33034+++ linux-3.1.4/drivers/net/r8169.c 2011-11-16 18:39:07.000000000 -0500
33035@@ -663,12 +663,12 @@ struct rtl8169_private {
33036 struct mdio_ops {
33037 void (*write)(void __iomem *, int, int);
33038 int (*read)(void __iomem *, int);
33039- } mdio_ops;
33040+ } __no_const mdio_ops;
33041
33042 struct pll_power_ops {
33043 void (*down)(struct rtl8169_private *);
33044 void (*up)(struct rtl8169_private *);
33045- } pll_power_ops;
33046+ } __no_const pll_power_ops;
33047
33048 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
33049 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
33050diff -urNp linux-3.1.4/drivers/net/sis190.c linux-3.1.4/drivers/net/sis190.c
33051--- linux-3.1.4/drivers/net/sis190.c 2011-11-11 15:19:27.000000000 -0500
33052+++ linux-3.1.4/drivers/net/sis190.c 2011-11-16 18:39:07.000000000 -0500
33053@@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr
33054 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
33055 struct net_device *dev)
33056 {
33057- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
33058+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
33059 struct sis190_private *tp = netdev_priv(dev);
33060 struct pci_dev *isa_bridge;
33061 u8 reg, tmp8;
33062diff -urNp linux-3.1.4/drivers/net/sundance.c linux-3.1.4/drivers/net/sundance.c
33063--- linux-3.1.4/drivers/net/sundance.c 2011-11-11 15:19:27.000000000 -0500
33064+++ linux-3.1.4/drivers/net/sundance.c 2011-11-16 18:39:07.000000000 -0500
33065@@ -218,7 +218,7 @@ enum {
33066 struct pci_id_info {
33067 const char *name;
33068 };
33069-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
33070+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
33071 {"D-Link DFE-550TX FAST Ethernet Adapter"},
33072 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
33073 {"D-Link DFE-580TX 4 port Server Adapter"},
33074diff -urNp linux-3.1.4/drivers/net/tg3.h linux-3.1.4/drivers/net/tg3.h
33075--- linux-3.1.4/drivers/net/tg3.h 2011-11-11 15:19:27.000000000 -0500
33076+++ linux-3.1.4/drivers/net/tg3.h 2011-11-16 18:39:07.000000000 -0500
33077@@ -134,6 +134,7 @@
33078 #define CHIPREV_ID_5750_A0 0x4000
33079 #define CHIPREV_ID_5750_A1 0x4001
33080 #define CHIPREV_ID_5750_A3 0x4003
33081+#define CHIPREV_ID_5750_C1 0x4201
33082 #define CHIPREV_ID_5750_C2 0x4202
33083 #define CHIPREV_ID_5752_A0_HW 0x5000
33084 #define CHIPREV_ID_5752_A0 0x6000
33085diff -urNp linux-3.1.4/drivers/net/tokenring/abyss.c linux-3.1.4/drivers/net/tokenring/abyss.c
33086--- linux-3.1.4/drivers/net/tokenring/abyss.c 2011-11-11 15:19:27.000000000 -0500
33087+++ linux-3.1.4/drivers/net/tokenring/abyss.c 2011-11-16 18:39:07.000000000 -0500
33088@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
33089
33090 static int __init abyss_init (void)
33091 {
33092- abyss_netdev_ops = tms380tr_netdev_ops;
33093+ pax_open_kernel();
33094+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33095
33096- abyss_netdev_ops.ndo_open = abyss_open;
33097- abyss_netdev_ops.ndo_stop = abyss_close;
33098+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
33099+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
33100+ pax_close_kernel();
33101
33102 return pci_register_driver(&abyss_driver);
33103 }
33104diff -urNp linux-3.1.4/drivers/net/tokenring/madgemc.c linux-3.1.4/drivers/net/tokenring/madgemc.c
33105--- linux-3.1.4/drivers/net/tokenring/madgemc.c 2011-11-11 15:19:27.000000000 -0500
33106+++ linux-3.1.4/drivers/net/tokenring/madgemc.c 2011-11-16 18:39:07.000000000 -0500
33107@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
33108
33109 static int __init madgemc_init (void)
33110 {
33111- madgemc_netdev_ops = tms380tr_netdev_ops;
33112- madgemc_netdev_ops.ndo_open = madgemc_open;
33113- madgemc_netdev_ops.ndo_stop = madgemc_close;
33114+ pax_open_kernel();
33115+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33116+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
33117+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
33118+ pax_close_kernel();
33119
33120 return mca_register_driver (&madgemc_driver);
33121 }
33122diff -urNp linux-3.1.4/drivers/net/tokenring/proteon.c linux-3.1.4/drivers/net/tokenring/proteon.c
33123--- linux-3.1.4/drivers/net/tokenring/proteon.c 2011-11-11 15:19:27.000000000 -0500
33124+++ linux-3.1.4/drivers/net/tokenring/proteon.c 2011-11-16 18:39:07.000000000 -0500
33125@@ -353,9 +353,11 @@ static int __init proteon_init(void)
33126 struct platform_device *pdev;
33127 int i, num = 0, err = 0;
33128
33129- proteon_netdev_ops = tms380tr_netdev_ops;
33130- proteon_netdev_ops.ndo_open = proteon_open;
33131- proteon_netdev_ops.ndo_stop = tms380tr_close;
33132+ pax_open_kernel();
33133+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33134+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
33135+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
33136+ pax_close_kernel();
33137
33138 err = platform_driver_register(&proteon_driver);
33139 if (err)
33140diff -urNp linux-3.1.4/drivers/net/tokenring/skisa.c linux-3.1.4/drivers/net/tokenring/skisa.c
33141--- linux-3.1.4/drivers/net/tokenring/skisa.c 2011-11-11 15:19:27.000000000 -0500
33142+++ linux-3.1.4/drivers/net/tokenring/skisa.c 2011-11-16 18:39:07.000000000 -0500
33143@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
33144 struct platform_device *pdev;
33145 int i, num = 0, err = 0;
33146
33147- sk_isa_netdev_ops = tms380tr_netdev_ops;
33148- sk_isa_netdev_ops.ndo_open = sk_isa_open;
33149- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
33150+ pax_open_kernel();
33151+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33152+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
33153+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
33154+ pax_close_kernel();
33155
33156 err = platform_driver_register(&sk_isa_driver);
33157 if (err)
33158diff -urNp linux-3.1.4/drivers/net/tulip/de2104x.c linux-3.1.4/drivers/net/tulip/de2104x.c
33159--- linux-3.1.4/drivers/net/tulip/de2104x.c 2011-11-11 15:19:27.000000000 -0500
33160+++ linux-3.1.4/drivers/net/tulip/de2104x.c 2011-11-16 18:40:22.000000000 -0500
33161@@ -1795,6 +1795,8 @@ static void __devinit de21041_get_srom_i
33162 struct de_srom_info_leaf *il;
33163 void *bufp;
33164
33165+ pax_track_stack();
33166+
33167 /* download entire eeprom */
33168 for (i = 0; i < DE_EEPROM_WORDS; i++)
33169 ((__le16 *)ee_data)[i] =
33170diff -urNp linux-3.1.4/drivers/net/tulip/de4x5.c linux-3.1.4/drivers/net/tulip/de4x5.c
33171--- linux-3.1.4/drivers/net/tulip/de4x5.c 2011-11-11 15:19:27.000000000 -0500
33172+++ linux-3.1.4/drivers/net/tulip/de4x5.c 2011-11-16 18:39:07.000000000 -0500
33173@@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, stru
33174 for (i=0; i<ETH_ALEN; i++) {
33175 tmp.addr[i] = dev->dev_addr[i];
33176 }
33177- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
33178+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
33179 break;
33180
33181 case DE4X5_SET_HWADDR: /* Set the hardware address */
33182@@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, stru
33183 spin_lock_irqsave(&lp->lock, flags);
33184 memcpy(&statbuf, &lp->pktStats, ioc->len);
33185 spin_unlock_irqrestore(&lp->lock, flags);
33186- if (copy_to_user(ioc->data, &statbuf, ioc->len))
33187+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
33188 return -EFAULT;
33189 break;
33190 }
33191diff -urNp linux-3.1.4/drivers/net/tulip/eeprom.c linux-3.1.4/drivers/net/tulip/eeprom.c
33192--- linux-3.1.4/drivers/net/tulip/eeprom.c 2011-11-11 15:19:27.000000000 -0500
33193+++ linux-3.1.4/drivers/net/tulip/eeprom.c 2011-11-16 18:39:07.000000000 -0500
33194@@ -81,7 +81,7 @@ static struct eeprom_fixup eeprom_fixups
33195 {NULL}};
33196
33197
33198-static const char *block_name[] __devinitdata = {
33199+static const char *block_name[] __devinitconst = {
33200 "21140 non-MII",
33201 "21140 MII PHY",
33202 "21142 Serial PHY",
33203diff -urNp linux-3.1.4/drivers/net/tulip/winbond-840.c linux-3.1.4/drivers/net/tulip/winbond-840.c
33204--- linux-3.1.4/drivers/net/tulip/winbond-840.c 2011-11-11 15:19:27.000000000 -0500
33205+++ linux-3.1.4/drivers/net/tulip/winbond-840.c 2011-11-16 18:39:07.000000000 -0500
33206@@ -236,7 +236,7 @@ struct pci_id_info {
33207 int drv_flags; /* Driver use, intended as capability flags. */
33208 };
33209
33210-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
33211+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
33212 { /* Sometime a Level-One switch card. */
33213 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
33214 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
33215diff -urNp linux-3.1.4/drivers/net/usb/hso.c linux-3.1.4/drivers/net/usb/hso.c
33216--- linux-3.1.4/drivers/net/usb/hso.c 2011-11-11 15:19:27.000000000 -0500
33217+++ linux-3.1.4/drivers/net/usb/hso.c 2011-11-16 18:39:07.000000000 -0500
33218@@ -71,7 +71,7 @@
33219 #include <asm/byteorder.h>
33220 #include <linux/serial_core.h>
33221 #include <linux/serial.h>
33222-
33223+#include <asm/local.h>
33224
33225 #define MOD_AUTHOR "Option Wireless"
33226 #define MOD_DESCRIPTION "USB High Speed Option driver"
33227@@ -257,7 +257,7 @@ struct hso_serial {
33228
33229 /* from usb_serial_port */
33230 struct tty_struct *tty;
33231- int open_count;
33232+ local_t open_count;
33233 spinlock_t serial_lock;
33234
33235 int (*write_data) (struct hso_serial *serial);
33236@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
33237 struct urb *urb;
33238
33239 urb = serial->rx_urb[0];
33240- if (serial->open_count > 0) {
33241+ if (local_read(&serial->open_count) > 0) {
33242 count = put_rxbuf_data(urb, serial);
33243 if (count == -1)
33244 return;
33245@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
33246 DUMP1(urb->transfer_buffer, urb->actual_length);
33247
33248 /* Anyone listening? */
33249- if (serial->open_count == 0)
33250+ if (local_read(&serial->open_count) == 0)
33251 return;
33252
33253 if (status == 0) {
33254@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
33255 spin_unlock_irq(&serial->serial_lock);
33256
33257 /* check for port already opened, if not set the termios */
33258- serial->open_count++;
33259- if (serial->open_count == 1) {
33260+ if (local_inc_return(&serial->open_count) == 1) {
33261 serial->rx_state = RX_IDLE;
33262 /* Force default termio settings */
33263 _hso_serial_set_termios(tty, NULL);
33264@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
33265 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
33266 if (result) {
33267 hso_stop_serial_device(serial->parent);
33268- serial->open_count--;
33269+ local_dec(&serial->open_count);
33270 kref_put(&serial->parent->ref, hso_serial_ref_free);
33271 }
33272 } else {
33273@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
33274
33275 /* reset the rts and dtr */
33276 /* do the actual close */
33277- serial->open_count--;
33278+ local_dec(&serial->open_count);
33279
33280- if (serial->open_count <= 0) {
33281- serial->open_count = 0;
33282+ if (local_read(&serial->open_count) <= 0) {
33283+ local_set(&serial->open_count, 0);
33284 spin_lock_irq(&serial->serial_lock);
33285 if (serial->tty == tty) {
33286 serial->tty->driver_data = NULL;
33287@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
33288
33289 /* the actual setup */
33290 spin_lock_irqsave(&serial->serial_lock, flags);
33291- if (serial->open_count)
33292+ if (local_read(&serial->open_count))
33293 _hso_serial_set_termios(tty, old);
33294 else
33295 tty->termios = old;
33296@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
33297 D1("Pending read interrupt on port %d\n", i);
33298 spin_lock(&serial->serial_lock);
33299 if (serial->rx_state == RX_IDLE &&
33300- serial->open_count > 0) {
33301+ local_read(&serial->open_count) > 0) {
33302 /* Setup and send a ctrl req read on
33303 * port i */
33304 if (!serial->rx_urb_filled[0]) {
33305@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interfa
33306 /* Start all serial ports */
33307 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
33308 if (serial_table[i] && (serial_table[i]->interface == iface)) {
33309- if (dev2ser(serial_table[i])->open_count) {
33310+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
33311 result =
33312 hso_start_serial_device(serial_table[i], GFP_NOIO);
33313 hso_kick_transmit(dev2ser(serial_table[i]));
33314diff -urNp linux-3.1.4/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-3.1.4/drivers/net/vmxnet3/vmxnet3_ethtool.c
33315--- linux-3.1.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-11-11 15:19:27.000000000 -0500
33316+++ linux-3.1.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-11-16 18:39:07.000000000 -0500
33317@@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device
33318 * Return with error code if any of the queue indices
33319 * is out of range
33320 */
33321- if (p->ring_index[i] < 0 ||
33322- p->ring_index[i] >= adapter->num_rx_queues)
33323+ if (p->ring_index[i] >= adapter->num_rx_queues)
33324 return -EINVAL;
33325 }
33326
33327diff -urNp linux-3.1.4/drivers/net/vxge/vxge-config.h linux-3.1.4/drivers/net/vxge/vxge-config.h
33328--- linux-3.1.4/drivers/net/vxge/vxge-config.h 2011-11-11 15:19:27.000000000 -0500
33329+++ linux-3.1.4/drivers/net/vxge/vxge-config.h 2011-11-16 18:39:07.000000000 -0500
33330@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
33331 void (*link_down)(struct __vxge_hw_device *devh);
33332 void (*crit_err)(struct __vxge_hw_device *devh,
33333 enum vxge_hw_event type, u64 ext_data);
33334-};
33335+} __no_const;
33336
33337 /*
33338 * struct __vxge_hw_blockpool_entry - Block private data structure
33339diff -urNp linux-3.1.4/drivers/net/vxge/vxge-main.c linux-3.1.4/drivers/net/vxge/vxge-main.c
33340--- linux-3.1.4/drivers/net/vxge/vxge-main.c 2011-11-11 15:19:27.000000000 -0500
33341+++ linux-3.1.4/drivers/net/vxge/vxge-main.c 2011-11-16 18:40:22.000000000 -0500
33342@@ -100,6 +100,8 @@ static inline void VXGE_COMPLETE_VPATH_T
33343 struct sk_buff *completed[NR_SKB_COMPLETED];
33344 int more;
33345
33346+ pax_track_stack();
33347+
33348 do {
33349 more = 0;
33350 skb_ptr = completed;
33351@@ -1915,6 +1917,8 @@ static enum vxge_hw_status vxge_rth_conf
33352 u8 mtable[256] = {0}; /* CPU to vpath mapping */
33353 int index;
33354
33355+ pax_track_stack();
33356+
33357 /*
33358 * Filling
33359 * - itable with bucket numbers
33360diff -urNp linux-3.1.4/drivers/net/vxge/vxge-traffic.h linux-3.1.4/drivers/net/vxge/vxge-traffic.h
33361--- linux-3.1.4/drivers/net/vxge/vxge-traffic.h 2011-11-11 15:19:27.000000000 -0500
33362+++ linux-3.1.4/drivers/net/vxge/vxge-traffic.h 2011-11-16 18:39:07.000000000 -0500
33363@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
33364 struct vxge_hw_mempool_dma *dma_object,
33365 u32 index,
33366 u32 is_last);
33367-};
33368+} __no_const;
33369
33370 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
33371 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
33372diff -urNp linux-3.1.4/drivers/net/wan/hdlc_x25.c linux-3.1.4/drivers/net/wan/hdlc_x25.c
33373--- linux-3.1.4/drivers/net/wan/hdlc_x25.c 2011-11-11 15:19:27.000000000 -0500
33374+++ linux-3.1.4/drivers/net/wan/hdlc_x25.c 2011-11-16 18:39:07.000000000 -0500
33375@@ -134,16 +134,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
33376
33377 static int x25_open(struct net_device *dev)
33378 {
33379- struct lapb_register_struct cb;
33380+ static struct lapb_register_struct cb = {
33381+ .connect_confirmation = x25_connected,
33382+ .connect_indication = x25_connected,
33383+ .disconnect_confirmation = x25_disconnected,
33384+ .disconnect_indication = x25_disconnected,
33385+ .data_indication = x25_data_indication,
33386+ .data_transmit = x25_data_transmit
33387+ };
33388 int result;
33389
33390- cb.connect_confirmation = x25_connected;
33391- cb.connect_indication = x25_connected;
33392- cb.disconnect_confirmation = x25_disconnected;
33393- cb.disconnect_indication = x25_disconnected;
33394- cb.data_indication = x25_data_indication;
33395- cb.data_transmit = x25_data_transmit;
33396-
33397 result = lapb_register(dev, &cb);
33398 if (result != LAPB_OK)
33399 return result;
33400diff -urNp linux-3.1.4/drivers/net/wimax/i2400m/usb-fw.c linux-3.1.4/drivers/net/wimax/i2400m/usb-fw.c
33401--- linux-3.1.4/drivers/net/wimax/i2400m/usb-fw.c 2011-11-11 15:19:27.000000000 -0500
33402+++ linux-3.1.4/drivers/net/wimax/i2400m/usb-fw.c 2011-11-16 18:40:22.000000000 -0500
33403@@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
33404 int do_autopm = 1;
33405 DECLARE_COMPLETION_ONSTACK(notif_completion);
33406
33407+ pax_track_stack();
33408+
33409 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
33410 i2400m, ack, ack_size);
33411 BUG_ON(_ack == i2400m->bm_ack_buf);
33412diff -urNp linux-3.1.4/drivers/net/wireless/airo.c linux-3.1.4/drivers/net/wireless/airo.c
33413--- linux-3.1.4/drivers/net/wireless/airo.c 2011-11-11 15:19:27.000000000 -0500
33414+++ linux-3.1.4/drivers/net/wireless/airo.c 2011-11-16 18:40:22.000000000 -0500
33415@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
33416 BSSListElement * loop_net;
33417 BSSListElement * tmp_net;
33418
33419+ pax_track_stack();
33420+
33421 /* Blow away current list of scan results */
33422 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
33423 list_move_tail (&loop_net->list, &ai->network_free_list);
33424@@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *
33425 WepKeyRid wkr;
33426 int rc;
33427
33428+ pax_track_stack();
33429+
33430 memset( &mySsid, 0, sizeof( mySsid ) );
33431 kfree (ai->flash);
33432 ai->flash = NULL;
33433@@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct i
33434 __le32 *vals = stats.vals;
33435 int len;
33436
33437+ pax_track_stack();
33438+
33439 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
33440 return -ENOMEM;
33441 data = file->private_data;
33442@@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct ino
33443 /* If doLoseSync is not 1, we won't do a Lose Sync */
33444 int doLoseSync = -1;
33445
33446+ pax_track_stack();
33447+
33448 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
33449 return -ENOMEM;
33450 data = file->private_data;
33451@@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_de
33452 int i;
33453 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
33454
33455+ pax_track_stack();
33456+
33457 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
33458 if (!qual)
33459 return -ENOMEM;
33460@@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(str
33461 CapabilityRid cap_rid;
33462 __le32 *vals = stats_rid.vals;
33463
33464+ pax_track_stack();
33465+
33466 /* Get stats out of the card */
33467 clear_bit(JOB_WSTATS, &local->jobs);
33468 if (local->power.event) {
33469diff -urNp linux-3.1.4/drivers/net/wireless/ath/ath5k/debug.c linux-3.1.4/drivers/net/wireless/ath/ath5k/debug.c
33470--- linux-3.1.4/drivers/net/wireless/ath/ath5k/debug.c 2011-11-11 15:19:27.000000000 -0500
33471+++ linux-3.1.4/drivers/net/wireless/ath/ath5k/debug.c 2011-11-16 19:08:21.000000000 -0500
33472@@ -203,6 +203,8 @@ static ssize_t read_file_beacon(struct f
33473 unsigned int v;
33474 u64 tsf;
33475
33476+ pax_track_stack();
33477+
33478 v = ath5k_hw_reg_read(ah, AR5K_BEACON);
33479 len += snprintf(buf + len, sizeof(buf) - len,
33480 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
33481@@ -321,6 +323,8 @@ static ssize_t read_file_debug(struct fi
33482 unsigned int len = 0;
33483 unsigned int i;
33484
33485+ pax_track_stack();
33486+
33487 len += snprintf(buf + len, sizeof(buf) - len,
33488 "DEBUG LEVEL: 0x%08x\n\n", ah->debug.level);
33489
33490@@ -492,6 +496,8 @@ static ssize_t read_file_misc(struct fil
33491 unsigned int len = 0;
33492 u32 filt = ath5k_hw_get_rx_filter(ah);
33493
33494+ pax_track_stack();
33495+
33496 len += snprintf(buf + len, sizeof(buf) - len, "bssid-mask: %pM\n",
33497 ah->bssidmask);
33498 len += snprintf(buf + len, sizeof(buf) - len, "filter-flags: 0x%x ",
33499@@ -548,6 +554,8 @@ static ssize_t read_file_frameerrors(str
33500 unsigned int len = 0;
33501 int i;
33502
33503+ pax_track_stack();
33504+
33505 len += snprintf(buf + len, sizeof(buf) - len,
33506 "RX\n---------------------\n");
33507 len += snprintf(buf + len, sizeof(buf) - len, "CRC\t%u\t(%u%%)\n",
33508@@ -665,6 +673,8 @@ static ssize_t read_file_ani(struct file
33509 char buf[700];
33510 unsigned int len = 0;
33511
33512+ pax_track_stack();
33513+
33514 len += snprintf(buf + len, sizeof(buf) - len,
33515 "HW has PHY error counters:\t%s\n",
33516 ah->ah_capabilities.cap_has_phyerr_counters ?
33517@@ -829,6 +839,8 @@ static ssize_t read_file_queue(struct fi
33518 struct ath5k_buf *bf, *bf0;
33519 int i, n;
33520
33521+ pax_track_stack();
33522+
33523 len += snprintf(buf + len, sizeof(buf) - len,
33524 "available txbuffers: %d\n", ah->txbuf_len);
33525
33526diff -urNp linux-3.1.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-3.1.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c
33527--- linux-3.1.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-11-11 15:19:27.000000000 -0500
33528+++ linux-3.1.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-11-16 18:40:22.000000000 -0500
33529@@ -758,6 +758,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
33530 int i, im, j;
33531 int nmeasurement;
33532
33533+ pax_track_stack();
33534+
33535 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
33536 if (ah->txchainmask & (1 << i))
33537 num_chains++;
33538diff -urNp linux-3.1.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-3.1.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
33539--- linux-3.1.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-11-11 15:19:27.000000000 -0500
33540+++ linux-3.1.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-11-16 18:40:22.000000000 -0500
33541@@ -406,6 +406,8 @@ static bool create_pa_curve(u32 *data_L,
33542 int theta_low_bin = 0;
33543 int i;
33544
33545+ pax_track_stack();
33546+
33547 /* disregard any bin that contains <= 16 samples */
33548 thresh_accum_cnt = 16;
33549 scale_factor = 5;
33550diff -urNp linux-3.1.4/drivers/net/wireless/ath/ath9k/debug.c linux-3.1.4/drivers/net/wireless/ath/ath9k/debug.c
33551--- linux-3.1.4/drivers/net/wireless/ath/ath9k/debug.c 2011-11-11 15:19:27.000000000 -0500
33552+++ linux-3.1.4/drivers/net/wireless/ath/ath9k/debug.c 2011-11-16 18:40:22.000000000 -0500
33553@@ -387,6 +387,8 @@ static ssize_t read_file_interrupt(struc
33554 char buf[512];
33555 unsigned int len = 0;
33556
33557+ pax_track_stack();
33558+
33559 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
33560 len += snprintf(buf + len, sizeof(buf) - len,
33561 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
33562@@ -477,6 +479,8 @@ static ssize_t read_file_wiphy(struct fi
33563 u8 addr[ETH_ALEN];
33564 u32 tmp;
33565
33566+ pax_track_stack();
33567+
33568 len += snprintf(buf + len, sizeof(buf) - len,
33569 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
33570 wiphy_name(sc->hw->wiphy),
33571diff -urNp linux-3.1.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c linux-3.1.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
33572--- linux-3.1.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-11-11 15:19:27.000000000 -0500
33573+++ linux-3.1.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-11-16 18:40:22.000000000 -0500
33574@@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(s
33575 unsigned int len = 0;
33576 int ret = 0;
33577
33578+ pax_track_stack();
33579+
33580 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
33581
33582 ath9k_htc_ps_wakeup(priv);
33583@@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(st
33584 unsigned int len = 0;
33585 int ret = 0;
33586
33587+ pax_track_stack();
33588+
33589 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
33590
33591 ath9k_htc_ps_wakeup(priv);
33592@@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(st
33593 unsigned int len = 0;
33594 int ret = 0;
33595
33596+ pax_track_stack();
33597+
33598 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
33599
33600 ath9k_htc_ps_wakeup(priv);
33601@@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct fil
33602 char buf[512];
33603 unsigned int len = 0;
33604
33605+ pax_track_stack();
33606+
33607 len += snprintf(buf + len, sizeof(buf) - len,
33608 "%20s : %10u\n", "Buffers queued",
33609 priv->debug.tx_stats.buf_queued);
33610@@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct fil
33611 char buf[512];
33612 unsigned int len = 0;
33613
33614+ pax_track_stack();
33615+
33616 spin_lock_bh(&priv->tx.tx_lock);
33617
33618 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
33619@@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct fi
33620 char buf[512];
33621 unsigned int len = 0;
33622
33623+ pax_track_stack();
33624+
33625 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
33626 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
33627
33628diff -urNp linux-3.1.4/drivers/net/wireless/ath/ath9k/hw.h linux-3.1.4/drivers/net/wireless/ath/ath9k/hw.h
33629--- linux-3.1.4/drivers/net/wireless/ath/ath9k/hw.h 2011-11-11 15:19:27.000000000 -0500
33630+++ linux-3.1.4/drivers/net/wireless/ath/ath9k/hw.h 2011-11-16 18:39:07.000000000 -0500
33631@@ -588,7 +588,7 @@ struct ath_hw_private_ops {
33632
33633 /* ANI */
33634 void (*ani_cache_ini_regs)(struct ath_hw *ah);
33635-};
33636+} __no_const;
33637
33638 /**
33639 * struct ath_hw_ops - callbacks used by hardware code and driver code
33640@@ -639,7 +639,7 @@ struct ath_hw_ops {
33641 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
33642 struct ath_hw_antcomb_conf *antconf);
33643
33644-};
33645+} __no_const;
33646
33647 struct ath_nf_limits {
33648 s16 max;
33649@@ -652,7 +652,7 @@ struct ath_nf_limits {
33650 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
33651
33652 struct ath_hw {
33653- struct ath_ops reg_ops;
33654+ ath_ops_no_const reg_ops;
33655
33656 struct ieee80211_hw *hw;
33657 struct ath_common common;
33658diff -urNp linux-3.1.4/drivers/net/wireless/ath/ath.h linux-3.1.4/drivers/net/wireless/ath/ath.h
33659--- linux-3.1.4/drivers/net/wireless/ath/ath.h 2011-11-11 15:19:27.000000000 -0500
33660+++ linux-3.1.4/drivers/net/wireless/ath/ath.h 2011-11-16 18:39:07.000000000 -0500
33661@@ -121,6 +121,7 @@ struct ath_ops {
33662 void (*write_flush) (void *);
33663 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
33664 };
33665+typedef struct ath_ops __no_const ath_ops_no_const;
33666
33667 struct ath_common;
33668 struct ath_bus_ops;
33669diff -urNp linux-3.1.4/drivers/net/wireless/ipw2x00/ipw2100.c linux-3.1.4/drivers/net/wireless/ipw2x00/ipw2100.c
33670--- linux-3.1.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-11-11 15:19:27.000000000 -0500
33671+++ linux-3.1.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-11-16 18:40:22.000000000 -0500
33672@@ -2102,6 +2102,8 @@ static int ipw2100_set_essid(struct ipw2
33673 int err;
33674 DECLARE_SSID_BUF(ssid);
33675
33676+ pax_track_stack();
33677+
33678 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
33679
33680 if (ssid_len)
33681@@ -5451,6 +5453,8 @@ static int ipw2100_set_key(struct ipw210
33682 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
33683 int err;
33684
33685+ pax_track_stack();
33686+
33687 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
33688 idx, keylen, len);
33689
33690diff -urNp linux-3.1.4/drivers/net/wireless/ipw2x00/libipw_rx.c linux-3.1.4/drivers/net/wireless/ipw2x00/libipw_rx.c
33691--- linux-3.1.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-11-11 15:19:27.000000000 -0500
33692+++ linux-3.1.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-11-16 18:40:22.000000000 -0500
33693@@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
33694 unsigned long flags;
33695 DECLARE_SSID_BUF(ssid);
33696
33697+ pax_track_stack();
33698+
33699 LIBIPW_DEBUG_SCAN("'%s' (%pM"
33700 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
33701 print_ssid(ssid, info_element->data, info_element->len),
33702diff -urNp linux-3.1.4/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-3.1.4/drivers/net/wireless/iwlegacy/iwl3945-base.c
33703--- linux-3.1.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-11-11 15:19:27.000000000 -0500
33704+++ linux-3.1.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-11-16 18:39:07.000000000 -0500
33705@@ -3687,7 +3687,9 @@ static int iwl3945_pci_probe(struct pci_
33706 */
33707 if (iwl3945_mod_params.disable_hw_scan) {
33708 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
33709- iwl3945_hw_ops.hw_scan = NULL;
33710+ pax_open_kernel();
33711+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
33712+ pax_close_kernel();
33713 }
33714
33715 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
33716diff -urNp linux-3.1.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-3.1.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
33717--- linux-3.1.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-11-11 15:19:27.000000000 -0500
33718+++ linux-3.1.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-11-16 18:40:22.000000000 -0500
33719@@ -920,6 +920,8 @@ static void rs_tx_status(void *priv_r, s
33720 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
33721 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
33722
33723+ pax_track_stack();
33724+
33725 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
33726
33727 /* Treat uninitialized rate scaling data same as non-existing. */
33728@@ -2931,6 +2933,8 @@ static void rs_fill_link_cmd(struct iwl_
33729 container_of(lq_sta, struct iwl_station_priv, lq_sta);
33730 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
33731
33732+ pax_track_stack();
33733+
33734 /* Override starting rate (index 0) if needed for debug purposes */
33735 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
33736
33737diff -urNp linux-3.1.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-3.1.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c
33738--- linux-3.1.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-11-11 15:19:27.000000000 -0500
33739+++ linux-3.1.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-11-16 18:40:22.000000000 -0500
33740@@ -561,6 +561,8 @@ static ssize_t iwl_dbgfs_status_read(str
33741 int pos = 0;
33742 const size_t bufsz = sizeof(buf);
33743
33744+ pax_track_stack();
33745+
33746 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
33747 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
33748 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
33749@@ -693,6 +695,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
33750 char buf[256 * NUM_IWL_RXON_CTX];
33751 const size_t bufsz = sizeof(buf);
33752
33753+ pax_track_stack();
33754+
33755 for_each_context(priv, ctx) {
33756 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
33757 ctx->ctxid);
33758diff -urNp linux-3.1.4/drivers/net/wireless/iwlwifi/iwl-debug.h linux-3.1.4/drivers/net/wireless/iwlwifi/iwl-debug.h
33759--- linux-3.1.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-11-11 15:19:27.000000000 -0500
33760+++ linux-3.1.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-11-16 18:39:07.000000000 -0500
33761@@ -68,8 +68,8 @@ do {
33762 } while (0)
33763
33764 #else
33765-#define IWL_DEBUG(__priv, level, fmt, args...)
33766-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
33767+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
33768+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
33769 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
33770 const void *p, u32 len)
33771 {}
33772diff -urNp linux-3.1.4/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-3.1.4/drivers/net/wireless/iwmc3200wifi/debugfs.c
33773--- linux-3.1.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-11-11 15:19:27.000000000 -0500
33774+++ linux-3.1.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-11-16 18:40:22.000000000 -0500
33775@@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
33776 int buf_len = 512;
33777 size_t len = 0;
33778
33779+ pax_track_stack();
33780+
33781 if (*ppos != 0)
33782 return 0;
33783 if (count < sizeof(buf))
33784diff -urNp linux-3.1.4/drivers/net/wireless/mac80211_hwsim.c linux-3.1.4/drivers/net/wireless/mac80211_hwsim.c
33785--- linux-3.1.4/drivers/net/wireless/mac80211_hwsim.c 2011-11-11 15:19:27.000000000 -0500
33786+++ linux-3.1.4/drivers/net/wireless/mac80211_hwsim.c 2011-11-16 18:39:07.000000000 -0500
33787@@ -1670,9 +1670,11 @@ static int __init init_mac80211_hwsim(vo
33788 return -EINVAL;
33789
33790 if (fake_hw_scan) {
33791- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33792- mac80211_hwsim_ops.sw_scan_start = NULL;
33793- mac80211_hwsim_ops.sw_scan_complete = NULL;
33794+ pax_open_kernel();
33795+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33796+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
33797+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
33798+ pax_close_kernel();
33799 }
33800
33801 spin_lock_init(&hwsim_radio_lock);
33802diff -urNp linux-3.1.4/drivers/net/wireless/mwifiex/main.h linux-3.1.4/drivers/net/wireless/mwifiex/main.h
33803--- linux-3.1.4/drivers/net/wireless/mwifiex/main.h 2011-11-11 15:19:27.000000000 -0500
33804+++ linux-3.1.4/drivers/net/wireless/mwifiex/main.h 2011-11-16 18:39:07.000000000 -0500
33805@@ -560,7 +560,7 @@ struct mwifiex_if_ops {
33806
33807 void (*update_mp_end_port) (struct mwifiex_adapter *, u16);
33808 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
33809-};
33810+} __no_const;
33811
33812 struct mwifiex_adapter {
33813 struct mwifiex_private *priv[MWIFIEX_MAX_BSS_NUM];
33814diff -urNp linux-3.1.4/drivers/net/wireless/rndis_wlan.c linux-3.1.4/drivers/net/wireless/rndis_wlan.c
33815--- linux-3.1.4/drivers/net/wireless/rndis_wlan.c 2011-11-11 15:19:27.000000000 -0500
33816+++ linux-3.1.4/drivers/net/wireless/rndis_wlan.c 2011-11-16 18:39:07.000000000 -0500
33817@@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
33818
33819 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
33820
33821- if (rts_threshold < 0 || rts_threshold > 2347)
33822+ if (rts_threshold > 2347)
33823 rts_threshold = 2347;
33824
33825 tmp = cpu_to_le32(rts_threshold);
33826diff -urNp linux-3.1.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-3.1.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
33827--- linux-3.1.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-11-11 15:19:27.000000000 -0500
33828+++ linux-3.1.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-11-16 18:40:22.000000000 -0500
33829@@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(st
33830 u8 rfpath;
33831 u8 num_total_rfpath = rtlphy->num_total_rfpath;
33832
33833+ pax_track_stack();
33834+
33835 precommoncmdcnt = 0;
33836 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
33837 MAX_PRECMD_CNT,
33838diff -urNp linux-3.1.4/drivers/net/wireless/wl1251/wl1251.h linux-3.1.4/drivers/net/wireless/wl1251/wl1251.h
33839--- linux-3.1.4/drivers/net/wireless/wl1251/wl1251.h 2011-11-11 15:19:27.000000000 -0500
33840+++ linux-3.1.4/drivers/net/wireless/wl1251/wl1251.h 2011-11-16 18:39:07.000000000 -0500
33841@@ -266,7 +266,7 @@ struct wl1251_if_operations {
33842 void (*reset)(struct wl1251 *wl);
33843 void (*enable_irq)(struct wl1251 *wl);
33844 void (*disable_irq)(struct wl1251 *wl);
33845-};
33846+} __no_const;
33847
33848 struct wl1251 {
33849 struct ieee80211_hw *hw;
33850diff -urNp linux-3.1.4/drivers/net/wireless/wl12xx/spi.c linux-3.1.4/drivers/net/wireless/wl12xx/spi.c
33851--- linux-3.1.4/drivers/net/wireless/wl12xx/spi.c 2011-11-11 15:19:27.000000000 -0500
33852+++ linux-3.1.4/drivers/net/wireless/wl12xx/spi.c 2011-11-16 18:40:22.000000000 -0500
33853@@ -281,6 +281,8 @@ static void wl1271_spi_raw_write(struct
33854 u32 chunk_len;
33855 int i;
33856
33857+ pax_track_stack();
33858+
33859 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
33860
33861 spi_message_init(&m);
33862diff -urNp linux-3.1.4/drivers/oprofile/buffer_sync.c linux-3.1.4/drivers/oprofile/buffer_sync.c
33863--- linux-3.1.4/drivers/oprofile/buffer_sync.c 2011-11-11 15:19:27.000000000 -0500
33864+++ linux-3.1.4/drivers/oprofile/buffer_sync.c 2011-11-16 18:39:07.000000000 -0500
33865@@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
33866 if (cookie == NO_COOKIE)
33867 offset = pc;
33868 if (cookie == INVALID_COOKIE) {
33869- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33870+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33871 offset = pc;
33872 }
33873 if (cookie != last_cookie) {
33874@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
33875 /* add userspace sample */
33876
33877 if (!mm) {
33878- atomic_inc(&oprofile_stats.sample_lost_no_mm);
33879+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
33880 return 0;
33881 }
33882
33883 cookie = lookup_dcookie(mm, s->eip, &offset);
33884
33885 if (cookie == INVALID_COOKIE) {
33886- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33887+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33888 return 0;
33889 }
33890
33891@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
33892 /* ignore backtraces if failed to add a sample */
33893 if (state == sb_bt_start) {
33894 state = sb_bt_ignore;
33895- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
33896+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
33897 }
33898 }
33899 release_mm(mm);
33900diff -urNp linux-3.1.4/drivers/oprofile/event_buffer.c linux-3.1.4/drivers/oprofile/event_buffer.c
33901--- linux-3.1.4/drivers/oprofile/event_buffer.c 2011-11-11 15:19:27.000000000 -0500
33902+++ linux-3.1.4/drivers/oprofile/event_buffer.c 2011-11-16 18:39:07.000000000 -0500
33903@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
33904 }
33905
33906 if (buffer_pos == buffer_size) {
33907- atomic_inc(&oprofile_stats.event_lost_overflow);
33908+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
33909 return;
33910 }
33911
33912diff -urNp linux-3.1.4/drivers/oprofile/oprof.c linux-3.1.4/drivers/oprofile/oprof.c
33913--- linux-3.1.4/drivers/oprofile/oprof.c 2011-11-11 15:19:27.000000000 -0500
33914+++ linux-3.1.4/drivers/oprofile/oprof.c 2011-11-16 18:39:07.000000000 -0500
33915@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
33916 if (oprofile_ops.switch_events())
33917 return;
33918
33919- atomic_inc(&oprofile_stats.multiplex_counter);
33920+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
33921 start_switch_worker();
33922 }
33923
33924diff -urNp linux-3.1.4/drivers/oprofile/oprofilefs.c linux-3.1.4/drivers/oprofile/oprofilefs.c
33925--- linux-3.1.4/drivers/oprofile/oprofilefs.c 2011-11-11 15:19:27.000000000 -0500
33926+++ linux-3.1.4/drivers/oprofile/oprofilefs.c 2011-11-16 18:39:07.000000000 -0500
33927@@ -186,7 +186,7 @@ static const struct file_operations atom
33928
33929
33930 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
33931- char const *name, atomic_t *val)
33932+ char const *name, atomic_unchecked_t *val)
33933 {
33934 return __oprofilefs_create_file(sb, root, name,
33935 &atomic_ro_fops, 0444, val);
33936diff -urNp linux-3.1.4/drivers/oprofile/oprofile_stats.c linux-3.1.4/drivers/oprofile/oprofile_stats.c
33937--- linux-3.1.4/drivers/oprofile/oprofile_stats.c 2011-11-11 15:19:27.000000000 -0500
33938+++ linux-3.1.4/drivers/oprofile/oprofile_stats.c 2011-11-16 18:39:07.000000000 -0500
33939@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
33940 cpu_buf->sample_invalid_eip = 0;
33941 }
33942
33943- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
33944- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
33945- atomic_set(&oprofile_stats.event_lost_overflow, 0);
33946- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
33947- atomic_set(&oprofile_stats.multiplex_counter, 0);
33948+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
33949+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
33950+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
33951+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
33952+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
33953 }
33954
33955
33956diff -urNp linux-3.1.4/drivers/oprofile/oprofile_stats.h linux-3.1.4/drivers/oprofile/oprofile_stats.h
33957--- linux-3.1.4/drivers/oprofile/oprofile_stats.h 2011-11-11 15:19:27.000000000 -0500
33958+++ linux-3.1.4/drivers/oprofile/oprofile_stats.h 2011-11-16 18:39:07.000000000 -0500
33959@@ -13,11 +13,11 @@
33960 #include <linux/atomic.h>
33961
33962 struct oprofile_stat_struct {
33963- atomic_t sample_lost_no_mm;
33964- atomic_t sample_lost_no_mapping;
33965- atomic_t bt_lost_no_mapping;
33966- atomic_t event_lost_overflow;
33967- atomic_t multiplex_counter;
33968+ atomic_unchecked_t sample_lost_no_mm;
33969+ atomic_unchecked_t sample_lost_no_mapping;
33970+ atomic_unchecked_t bt_lost_no_mapping;
33971+ atomic_unchecked_t event_lost_overflow;
33972+ atomic_unchecked_t multiplex_counter;
33973 };
33974
33975 extern struct oprofile_stat_struct oprofile_stats;
33976diff -urNp linux-3.1.4/drivers/parport/procfs.c linux-3.1.4/drivers/parport/procfs.c
33977--- linux-3.1.4/drivers/parport/procfs.c 2011-11-11 15:19:27.000000000 -0500
33978+++ linux-3.1.4/drivers/parport/procfs.c 2011-11-16 18:39:07.000000000 -0500
33979@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
33980
33981 *ppos += len;
33982
33983- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
33984+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
33985 }
33986
33987 #ifdef CONFIG_PARPORT_1284
33988@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
33989
33990 *ppos += len;
33991
33992- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
33993+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
33994 }
33995 #endif /* IEEE1284.3 support. */
33996
33997diff -urNp linux-3.1.4/drivers/pci/hotplug/cpci_hotplug.h linux-3.1.4/drivers/pci/hotplug/cpci_hotplug.h
33998--- linux-3.1.4/drivers/pci/hotplug/cpci_hotplug.h 2011-11-11 15:19:27.000000000 -0500
33999+++ linux-3.1.4/drivers/pci/hotplug/cpci_hotplug.h 2011-11-16 18:39:07.000000000 -0500
34000@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
34001 int (*hardware_test) (struct slot* slot, u32 value);
34002 u8 (*get_power) (struct slot* slot);
34003 int (*set_power) (struct slot* slot, int value);
34004-};
34005+} __no_const;
34006
34007 struct cpci_hp_controller {
34008 unsigned int irq;
34009diff -urNp linux-3.1.4/drivers/pci/hotplug/cpqphp_nvram.c linux-3.1.4/drivers/pci/hotplug/cpqphp_nvram.c
34010--- linux-3.1.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-11-11 15:19:27.000000000 -0500
34011+++ linux-3.1.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-11-16 18:39:07.000000000 -0500
34012@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
34013
34014 void compaq_nvram_init (void __iomem *rom_start)
34015 {
34016+
34017+#ifndef CONFIG_PAX_KERNEXEC
34018 if (rom_start) {
34019 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
34020 }
34021+#endif
34022+
34023 dbg("int15 entry = %p\n", compaq_int15_entry_point);
34024
34025 /* initialize our int15 lock */
34026diff -urNp linux-3.1.4/drivers/pci/pcie/aspm.c linux-3.1.4/drivers/pci/pcie/aspm.c
34027--- linux-3.1.4/drivers/pci/pcie/aspm.c 2011-11-11 15:19:27.000000000 -0500
34028+++ linux-3.1.4/drivers/pci/pcie/aspm.c 2011-11-16 18:39:07.000000000 -0500
34029@@ -27,9 +27,9 @@
34030 #define MODULE_PARAM_PREFIX "pcie_aspm."
34031
34032 /* Note: those are not register definitions */
34033-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
34034-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
34035-#define ASPM_STATE_L1 (4) /* L1 state */
34036+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
34037+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
34038+#define ASPM_STATE_L1 (4U) /* L1 state */
34039 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
34040 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
34041
34042diff -urNp linux-3.1.4/drivers/pci/probe.c linux-3.1.4/drivers/pci/probe.c
34043--- linux-3.1.4/drivers/pci/probe.c 2011-11-11 15:19:27.000000000 -0500
34044+++ linux-3.1.4/drivers/pci/probe.c 2011-11-16 18:39:07.000000000 -0500
34045@@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev,
34046 u32 l, sz, mask;
34047 u16 orig_cmd;
34048
34049- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
34050+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
34051
34052 if (!dev->mmio_always_on) {
34053 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
34054diff -urNp linux-3.1.4/drivers/pci/proc.c linux-3.1.4/drivers/pci/proc.c
34055--- linux-3.1.4/drivers/pci/proc.c 2011-11-11 15:19:27.000000000 -0500
34056+++ linux-3.1.4/drivers/pci/proc.c 2011-11-16 18:40:22.000000000 -0500
34057@@ -476,7 +476,16 @@ static const struct file_operations proc
34058 static int __init pci_proc_init(void)
34059 {
34060 struct pci_dev *dev = NULL;
34061+
34062+#ifdef CONFIG_GRKERNSEC_PROC_ADD
34063+#ifdef CONFIG_GRKERNSEC_PROC_USER
34064+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
34065+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
34066+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
34067+#endif
34068+#else
34069 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
34070+#endif
34071 proc_create("devices", 0, proc_bus_pci_dir,
34072 &proc_bus_pci_dev_operations);
34073 proc_initialized = 1;
34074diff -urNp linux-3.1.4/drivers/pci/xen-pcifront.c linux-3.1.4/drivers/pci/xen-pcifront.c
34075--- linux-3.1.4/drivers/pci/xen-pcifront.c 2011-11-11 15:19:27.000000000 -0500
34076+++ linux-3.1.4/drivers/pci/xen-pcifront.c 2011-11-16 18:40:22.000000000 -0500
34077@@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
34078 struct pcifront_sd *sd = bus->sysdata;
34079 struct pcifront_device *pdev = pcifront_get_pdev(sd);
34080
34081+ pax_track_stack();
34082+
34083 if (verbose_request)
34084 dev_info(&pdev->xdev->dev,
34085 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
34086@@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
34087 struct pcifront_sd *sd = bus->sysdata;
34088 struct pcifront_device *pdev = pcifront_get_pdev(sd);
34089
34090+ pax_track_stack();
34091+
34092 if (verbose_request)
34093 dev_info(&pdev->xdev->dev,
34094 "write dev=%04x:%02x:%02x.%01x - "
34095@@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
34096 struct pcifront_device *pdev = pcifront_get_pdev(sd);
34097 struct msi_desc *entry;
34098
34099+ pax_track_stack();
34100+
34101 if (nvec > SH_INFO_MAX_VEC) {
34102 dev_err(&dev->dev, "too much vector for pci frontend: %x."
34103 " Increase SH_INFO_MAX_VEC.\n", nvec);
34104@@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
34105 struct pcifront_sd *sd = dev->bus->sysdata;
34106 struct pcifront_device *pdev = pcifront_get_pdev(sd);
34107
34108+ pax_track_stack();
34109+
34110 err = do_pci_op(pdev, &op);
34111
34112 /* What should do for error ? */
34113@@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
34114 struct pcifront_sd *sd = dev->bus->sysdata;
34115 struct pcifront_device *pdev = pcifront_get_pdev(sd);
34116
34117+ pax_track_stack();
34118+
34119 err = do_pci_op(pdev, &op);
34120 if (likely(!err)) {
34121 vector[0] = op.value;
34122diff -urNp linux-3.1.4/drivers/platform/x86/thinkpad_acpi.c linux-3.1.4/drivers/platform/x86/thinkpad_acpi.c
34123--- linux-3.1.4/drivers/platform/x86/thinkpad_acpi.c 2011-11-11 15:19:27.000000000 -0500
34124+++ linux-3.1.4/drivers/platform/x86/thinkpad_acpi.c 2011-12-02 17:38:47.000000000 -0500
34125@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
34126 return 0;
34127 }
34128
34129-void static hotkey_mask_warn_incomplete_mask(void)
34130+static void hotkey_mask_warn_incomplete_mask(void)
34131 {
34132 /* log only what the user can fix... */
34133 const u32 wantedmask = hotkey_driver_mask &
34134@@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_
34135 }
34136 }
34137
34138-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
34139- struct tp_nvram_state *newn,
34140- const u32 event_mask)
34141-{
34142-
34143 #define TPACPI_COMPARE_KEY(__scancode, __member) \
34144 do { \
34145 if ((event_mask & (1 << __scancode)) && \
34146@@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_eve
34147 tpacpi_hotkey_send_key(__scancode); \
34148 } while (0)
34149
34150- void issue_volchange(const unsigned int oldvol,
34151- const unsigned int newvol)
34152- {
34153- unsigned int i = oldvol;
34154+static void issue_volchange(const unsigned int oldvol,
34155+ const unsigned int newvol,
34156+ const u32 event_mask)
34157+{
34158+ unsigned int i = oldvol;
34159
34160- while (i > newvol) {
34161- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
34162- i--;
34163- }
34164- while (i < newvol) {
34165- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
34166- i++;
34167- }
34168+ while (i > newvol) {
34169+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
34170+ i--;
34171 }
34172+ while (i < newvol) {
34173+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
34174+ i++;
34175+ }
34176+}
34177
34178- void issue_brightnesschange(const unsigned int oldbrt,
34179- const unsigned int newbrt)
34180- {
34181- unsigned int i = oldbrt;
34182+static void issue_brightnesschange(const unsigned int oldbrt,
34183+ const unsigned int newbrt,
34184+ const u32 event_mask)
34185+{
34186+ unsigned int i = oldbrt;
34187
34188- while (i > newbrt) {
34189- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
34190- i--;
34191- }
34192- while (i < newbrt) {
34193- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
34194- i++;
34195- }
34196+ while (i > newbrt) {
34197+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
34198+ i--;
34199+ }
34200+ while (i < newbrt) {
34201+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
34202+ i++;
34203 }
34204+}
34205
34206+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
34207+ struct tp_nvram_state *newn,
34208+ const u32 event_mask)
34209+{
34210 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
34211 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
34212 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
34213@@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_eve
34214 oldn->volume_level != newn->volume_level) {
34215 /* recently muted, or repeated mute keypress, or
34216 * multiple presses ending in mute */
34217- issue_volchange(oldn->volume_level, newn->volume_level);
34218+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
34219 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
34220 }
34221 } else {
34222@@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_eve
34223 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
34224 }
34225 if (oldn->volume_level != newn->volume_level) {
34226- issue_volchange(oldn->volume_level, newn->volume_level);
34227+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
34228 } else if (oldn->volume_toggle != newn->volume_toggle) {
34229 /* repeated vol up/down keypress at end of scale ? */
34230 if (newn->volume_level == 0)
34231@@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_eve
34232 /* handle brightness */
34233 if (oldn->brightness_level != newn->brightness_level) {
34234 issue_brightnesschange(oldn->brightness_level,
34235- newn->brightness_level);
34236+ newn->brightness_level,
34237+ event_mask);
34238 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
34239 /* repeated key presses that didn't change state */
34240 if (newn->brightness_level == 0)
34241@@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_eve
34242 && !tp_features.bright_unkfw)
34243 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
34244 }
34245+}
34246
34247 #undef TPACPI_COMPARE_KEY
34248 #undef TPACPI_MAY_SEND_KEY
34249-}
34250
34251 /*
34252 * Polling driver
34253diff -urNp linux-3.1.4/drivers/pnp/pnpbios/bioscalls.c linux-3.1.4/drivers/pnp/pnpbios/bioscalls.c
34254--- linux-3.1.4/drivers/pnp/pnpbios/bioscalls.c 2011-11-11 15:19:27.000000000 -0500
34255+++ linux-3.1.4/drivers/pnp/pnpbios/bioscalls.c 2011-11-16 18:39:07.000000000 -0500
34256@@ -59,7 +59,7 @@ do { \
34257 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
34258 } while(0)
34259
34260-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
34261+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
34262 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
34263
34264 /*
34265@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
34266
34267 cpu = get_cpu();
34268 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
34269+
34270+ pax_open_kernel();
34271 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
34272+ pax_close_kernel();
34273
34274 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
34275 spin_lock_irqsave(&pnp_bios_lock, flags);
34276@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
34277 :"memory");
34278 spin_unlock_irqrestore(&pnp_bios_lock, flags);
34279
34280+ pax_open_kernel();
34281 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
34282+ pax_close_kernel();
34283+
34284 put_cpu();
34285
34286 /* If we get here and this is set then the PnP BIOS faulted on us. */
34287@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
34288 return status;
34289 }
34290
34291-void pnpbios_calls_init(union pnp_bios_install_struct *header)
34292+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
34293 {
34294 int i;
34295
34296@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
34297 pnp_bios_callpoint.offset = header->fields.pm16offset;
34298 pnp_bios_callpoint.segment = PNP_CS16;
34299
34300+ pax_open_kernel();
34301+
34302 for_each_possible_cpu(i) {
34303 struct desc_struct *gdt = get_cpu_gdt_table(i);
34304 if (!gdt)
34305@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
34306 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
34307 (unsigned long)__va(header->fields.pm16dseg));
34308 }
34309+
34310+ pax_close_kernel();
34311 }
34312diff -urNp linux-3.1.4/drivers/pnp/resource.c linux-3.1.4/drivers/pnp/resource.c
34313--- linux-3.1.4/drivers/pnp/resource.c 2011-11-11 15:19:27.000000000 -0500
34314+++ linux-3.1.4/drivers/pnp/resource.c 2011-11-16 18:39:07.000000000 -0500
34315@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
34316 return 1;
34317
34318 /* check if the resource is valid */
34319- if (*irq < 0 || *irq > 15)
34320+ if (*irq > 15)
34321 return 0;
34322
34323 /* check if the resource is reserved */
34324@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
34325 return 1;
34326
34327 /* check if the resource is valid */
34328- if (*dma < 0 || *dma == 4 || *dma > 7)
34329+ if (*dma == 4 || *dma > 7)
34330 return 0;
34331
34332 /* check if the resource is reserved */
34333diff -urNp linux-3.1.4/drivers/power/bq27x00_battery.c linux-3.1.4/drivers/power/bq27x00_battery.c
34334--- linux-3.1.4/drivers/power/bq27x00_battery.c 2011-11-11 15:19:27.000000000 -0500
34335+++ linux-3.1.4/drivers/power/bq27x00_battery.c 2011-11-16 18:39:07.000000000 -0500
34336@@ -67,7 +67,7 @@
34337 struct bq27x00_device_info;
34338 struct bq27x00_access_methods {
34339 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
34340-};
34341+} __no_const;
34342
34343 enum bq27x00_chip { BQ27000, BQ27500 };
34344
34345diff -urNp linux-3.1.4/drivers/regulator/max8660.c linux-3.1.4/drivers/regulator/max8660.c
34346--- linux-3.1.4/drivers/regulator/max8660.c 2011-11-11 15:19:27.000000000 -0500
34347+++ linux-3.1.4/drivers/regulator/max8660.c 2011-11-16 18:39:07.000000000 -0500
34348@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
34349 max8660->shadow_regs[MAX8660_OVER1] = 5;
34350 } else {
34351 /* Otherwise devices can be toggled via software */
34352- max8660_dcdc_ops.enable = max8660_dcdc_enable;
34353- max8660_dcdc_ops.disable = max8660_dcdc_disable;
34354+ pax_open_kernel();
34355+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
34356+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
34357+ pax_close_kernel();
34358 }
34359
34360 /*
34361diff -urNp linux-3.1.4/drivers/regulator/mc13892-regulator.c linux-3.1.4/drivers/regulator/mc13892-regulator.c
34362--- linux-3.1.4/drivers/regulator/mc13892-regulator.c 2011-11-11 15:19:27.000000000 -0500
34363+++ linux-3.1.4/drivers/regulator/mc13892-regulator.c 2011-11-16 18:39:07.000000000 -0500
34364@@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_p
34365 }
34366 mc13xxx_unlock(mc13892);
34367
34368- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
34369+ pax_open_kernel();
34370+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
34371 = mc13892_vcam_set_mode;
34372- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
34373+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
34374 = mc13892_vcam_get_mode;
34375+ pax_close_kernel();
34376 for (i = 0; i < pdata->num_regulators; i++) {
34377 init_data = &pdata->regulators[i];
34378 priv->regulators[i] = regulator_register(
34379diff -urNp linux-3.1.4/drivers/rtc/rtc-dev.c linux-3.1.4/drivers/rtc/rtc-dev.c
34380--- linux-3.1.4/drivers/rtc/rtc-dev.c 2011-11-11 15:19:27.000000000 -0500
34381+++ linux-3.1.4/drivers/rtc/rtc-dev.c 2011-11-16 18:40:22.000000000 -0500
34382@@ -14,6 +14,7 @@
34383 #include <linux/module.h>
34384 #include <linux/rtc.h>
34385 #include <linux/sched.h>
34386+#include <linux/grsecurity.h>
34387 #include "rtc-core.h"
34388
34389 static dev_t rtc_devt;
34390@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
34391 if (copy_from_user(&tm, uarg, sizeof(tm)))
34392 return -EFAULT;
34393
34394+ gr_log_timechange();
34395+
34396 return rtc_set_time(rtc, &tm);
34397
34398 case RTC_PIE_ON:
34399diff -urNp linux-3.1.4/drivers/scsi/aacraid/aacraid.h linux-3.1.4/drivers/scsi/aacraid/aacraid.h
34400--- linux-3.1.4/drivers/scsi/aacraid/aacraid.h 2011-11-11 15:19:27.000000000 -0500
34401+++ linux-3.1.4/drivers/scsi/aacraid/aacraid.h 2011-11-16 18:39:07.000000000 -0500
34402@@ -492,7 +492,7 @@ struct adapter_ops
34403 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
34404 /* Administrative operations */
34405 int (*adapter_comm)(struct aac_dev * dev, int comm);
34406-};
34407+} __no_const;
34408
34409 /*
34410 * Define which interrupt handler needs to be installed
34411diff -urNp linux-3.1.4/drivers/scsi/aacraid/commctrl.c linux-3.1.4/drivers/scsi/aacraid/commctrl.c
34412--- linux-3.1.4/drivers/scsi/aacraid/commctrl.c 2011-11-11 15:19:27.000000000 -0500
34413+++ linux-3.1.4/drivers/scsi/aacraid/commctrl.c 2011-11-16 18:40:22.000000000 -0500
34414@@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
34415 u32 actual_fibsize64, actual_fibsize = 0;
34416 int i;
34417
34418+ pax_track_stack();
34419
34420 if (dev->in_reset) {
34421 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
34422diff -urNp linux-3.1.4/drivers/scsi/aacraid/linit.c linux-3.1.4/drivers/scsi/aacraid/linit.c
34423--- linux-3.1.4/drivers/scsi/aacraid/linit.c 2011-11-26 19:57:29.000000000 -0500
34424+++ linux-3.1.4/drivers/scsi/aacraid/linit.c 2011-11-26 20:00:43.000000000 -0500
34425@@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_
34426 #elif defined(__devinitconst)
34427 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
34428 #else
34429-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
34430+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
34431 #endif
34432 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
34433 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
34434diff -urNp linux-3.1.4/drivers/scsi/aic94xx/aic94xx_init.c linux-3.1.4/drivers/scsi/aic94xx/aic94xx_init.c
34435--- linux-3.1.4/drivers/scsi/aic94xx/aic94xx_init.c 2011-11-11 15:19:27.000000000 -0500
34436+++ linux-3.1.4/drivers/scsi/aic94xx/aic94xx_init.c 2011-11-16 18:39:07.000000000 -0500
34437@@ -1012,7 +1012,7 @@ static struct sas_domain_function_templa
34438 .lldd_control_phy = asd_control_phy,
34439 };
34440
34441-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
34442+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
34443 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
34444 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
34445 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
34446diff -urNp linux-3.1.4/drivers/scsi/bfa/bfad.c linux-3.1.4/drivers/scsi/bfa/bfad.c
34447--- linux-3.1.4/drivers/scsi/bfa/bfad.c 2011-11-11 15:19:27.000000000 -0500
34448+++ linux-3.1.4/drivers/scsi/bfa/bfad.c 2011-11-16 19:01:15.000000000 -0500
34449@@ -1019,6 +1019,8 @@ bfad_start_ops(struct bfad_s *bfad) {
34450 struct bfad_vport_s *vport, *vport_new;
34451 struct bfa_fcs_driver_info_s driver_info;
34452
34453+ pax_track_stack();
34454+
34455 /* Limit min/max. xfer size to [64k-32MB] */
34456 if (max_xfer_size < BFAD_MIN_SECTORS >> 1)
34457 max_xfer_size = BFAD_MIN_SECTORS >> 1;
34458diff -urNp linux-3.1.4/drivers/scsi/bfa/bfa_fcpim.c linux-3.1.4/drivers/scsi/bfa/bfa_fcpim.c
34459--- linux-3.1.4/drivers/scsi/bfa/bfa_fcpim.c 2011-11-11 15:19:27.000000000 -0500
34460+++ linux-3.1.4/drivers/scsi/bfa/bfa_fcpim.c 2011-11-16 18:39:07.000000000 -0500
34461@@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct
34462 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
34463 {
34464 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
34465- struct bfa_itn_s *itn;
34466+ bfa_itn_s_no_const *itn;
34467
34468 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
34469 itn->isr = isr;
34470diff -urNp linux-3.1.4/drivers/scsi/bfa/bfa_fcpim.h linux-3.1.4/drivers/scsi/bfa/bfa_fcpim.h
34471--- linux-3.1.4/drivers/scsi/bfa/bfa_fcpim.h 2011-11-11 15:19:27.000000000 -0500
34472+++ linux-3.1.4/drivers/scsi/bfa/bfa_fcpim.h 2011-11-16 18:39:07.000000000 -0500
34473@@ -37,6 +37,7 @@ struct bfa_iotag_s {
34474 struct bfa_itn_s {
34475 bfa_isr_func_t isr;
34476 };
34477+typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
34478
34479 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
34480 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
34481@@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
34482 struct list_head iotag_tio_free_q; /* free IO resources */
34483 struct list_head iotag_unused_q; /* unused IO resources*/
34484 struct bfa_iotag_s *iotag_arr;
34485- struct bfa_itn_s *itn_arr;
34486+ bfa_itn_s_no_const *itn_arr;
34487 int num_ioim_reqs;
34488 int num_fwtio_reqs;
34489 int num_itns;
34490diff -urNp linux-3.1.4/drivers/scsi/bfa/bfa_fcs_lport.c linux-3.1.4/drivers/scsi/bfa/bfa_fcs_lport.c
34491--- linux-3.1.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-11-11 15:19:27.000000000 -0500
34492+++ linux-3.1.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-11-16 18:40:22.000000000 -0500
34493@@ -1700,6 +1700,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
34494 u16 len, count;
34495 u16 templen;
34496
34497+ pax_track_stack();
34498+
34499 /*
34500 * get hba attributes
34501 */
34502@@ -1977,6 +1979,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
34503 u8 count = 0;
34504 u16 templen;
34505
34506+ pax_track_stack();
34507+
34508 /*
34509 * get port attributes
34510 */
34511diff -urNp linux-3.1.4/drivers/scsi/bfa/bfa_fcs_rport.c linux-3.1.4/drivers/scsi/bfa/bfa_fcs_rport.c
34512--- linux-3.1.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-11-11 15:19:27.000000000 -0500
34513+++ linux-3.1.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-11-16 18:40:22.000000000 -0500
34514@@ -1871,6 +1871,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
34515 struct fc_rpsc_speed_info_s speeds;
34516 struct bfa_port_attr_s pport_attr;
34517
34518+ pax_track_stack();
34519+
34520 bfa_trc(port->fcs, rx_fchs->s_id);
34521 bfa_trc(port->fcs, rx_fchs->d_id);
34522
34523diff -urNp linux-3.1.4/drivers/scsi/bfa/bfa.h linux-3.1.4/drivers/scsi/bfa/bfa.h
34524--- linux-3.1.4/drivers/scsi/bfa/bfa.h 2011-11-11 15:19:27.000000000 -0500
34525+++ linux-3.1.4/drivers/scsi/bfa/bfa.h 2011-11-16 18:39:07.000000000 -0500
34526@@ -196,7 +196,7 @@ struct bfa_hwif_s {
34527 u32 *end);
34528 int cpe_vec_q0;
34529 int rme_vec_q0;
34530-};
34531+} __no_const;
34532 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
34533
34534 struct bfa_faa_cbfn_s {
34535diff -urNp linux-3.1.4/drivers/scsi/bfa/bfa_ioc.h linux-3.1.4/drivers/scsi/bfa/bfa_ioc.h
34536--- linux-3.1.4/drivers/scsi/bfa/bfa_ioc.h 2011-11-11 15:19:27.000000000 -0500
34537+++ linux-3.1.4/drivers/scsi/bfa/bfa_ioc.h 2011-11-16 18:39:07.000000000 -0500
34538@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
34539 bfa_ioc_disable_cbfn_t disable_cbfn;
34540 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
34541 bfa_ioc_reset_cbfn_t reset_cbfn;
34542-};
34543+} __no_const;
34544
34545 /*
34546 * IOC event notification mechanism.
34547@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
34548 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
34549 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
34550 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
34551-};
34552+} __no_const;
34553
34554 /*
34555 * Queue element to wait for room in request queue. FIFO order is
34556diff -urNp linux-3.1.4/drivers/scsi/BusLogic.c linux-3.1.4/drivers/scsi/BusLogic.c
34557--- linux-3.1.4/drivers/scsi/BusLogic.c 2011-11-11 15:19:27.000000000 -0500
34558+++ linux-3.1.4/drivers/scsi/BusLogic.c 2011-11-16 18:40:22.000000000 -0500
34559@@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
34560 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
34561 *PrototypeHostAdapter)
34562 {
34563+ pax_track_stack();
34564+
34565 /*
34566 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
34567 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
34568diff -urNp linux-3.1.4/drivers/scsi/dpt_i2o.c linux-3.1.4/drivers/scsi/dpt_i2o.c
34569--- linux-3.1.4/drivers/scsi/dpt_i2o.c 2011-11-11 15:19:27.000000000 -0500
34570+++ linux-3.1.4/drivers/scsi/dpt_i2o.c 2011-11-16 18:40:22.000000000 -0500
34571@@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
34572 dma_addr_t addr;
34573 ulong flags = 0;
34574
34575+ pax_track_stack();
34576+
34577 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
34578 // get user msg size in u32s
34579 if(get_user(size, &user_msg[0])){
34580@@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
34581 s32 rcode;
34582 dma_addr_t addr;
34583
34584+ pax_track_stack();
34585+
34586 memset(msg, 0 , sizeof(msg));
34587 len = scsi_bufflen(cmd);
34588 direction = 0x00000000;
34589diff -urNp linux-3.1.4/drivers/scsi/eata.c linux-3.1.4/drivers/scsi/eata.c
34590--- linux-3.1.4/drivers/scsi/eata.c 2011-11-11 15:19:27.000000000 -0500
34591+++ linux-3.1.4/drivers/scsi/eata.c 2011-11-16 18:40:22.000000000 -0500
34592@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
34593 struct hostdata *ha;
34594 char name[16];
34595
34596+ pax_track_stack();
34597+
34598 sprintf(name, "%s%d", driver_name, j);
34599
34600 if (!request_region(port_base, REGION_SIZE, driver_name)) {
34601diff -urNp linux-3.1.4/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.1.4/drivers/scsi/fcoe/fcoe_ctlr.c
34602--- linux-3.1.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-11-11 15:19:27.000000000 -0500
34603+++ linux-3.1.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-11-16 18:40:22.000000000 -0500
34604@@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
34605 } buf;
34606 int rc;
34607
34608+ pax_track_stack();
34609+
34610 fiph = (struct fip_header *)skb->data;
34611 sub = fiph->fip_subcode;
34612
34613diff -urNp linux-3.1.4/drivers/scsi/gdth.c linux-3.1.4/drivers/scsi/gdth.c
34614--- linux-3.1.4/drivers/scsi/gdth.c 2011-11-11 15:19:27.000000000 -0500
34615+++ linux-3.1.4/drivers/scsi/gdth.c 2011-11-16 18:40:22.000000000 -0500
34616@@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
34617 unsigned long flags;
34618 gdth_ha_str *ha;
34619
34620+ pax_track_stack();
34621+
34622 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
34623 return -EFAULT;
34624 ha = gdth_find_ha(ldrv.ionode);
34625@@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
34626 gdth_ha_str *ha;
34627 int rval;
34628
34629+ pax_track_stack();
34630+
34631 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
34632 res.number >= MAX_HDRIVES)
34633 return -EFAULT;
34634@@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
34635 gdth_ha_str *ha;
34636 int rval;
34637
34638+ pax_track_stack();
34639+
34640 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
34641 return -EFAULT;
34642 ha = gdth_find_ha(gen.ionode);
34643@@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
34644 int i;
34645 gdth_cmd_str gdtcmd;
34646 char cmnd[MAX_COMMAND_SIZE];
34647+
34648+ pax_track_stack();
34649+
34650 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
34651
34652 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
34653diff -urNp linux-3.1.4/drivers/scsi/gdth_proc.c linux-3.1.4/drivers/scsi/gdth_proc.c
34654--- linux-3.1.4/drivers/scsi/gdth_proc.c 2011-11-11 15:19:27.000000000 -0500
34655+++ linux-3.1.4/drivers/scsi/gdth_proc.c 2011-11-16 18:40:22.000000000 -0500
34656@@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
34657 u64 paddr;
34658
34659 char cmnd[MAX_COMMAND_SIZE];
34660+
34661+ pax_track_stack();
34662+
34663 memset(cmnd, 0xff, 12);
34664 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
34665
34666@@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
34667 gdth_hget_str *phg;
34668 char cmnd[MAX_COMMAND_SIZE];
34669
34670+ pax_track_stack();
34671+
34672 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
34673 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
34674 if (!gdtcmd || !estr)
34675diff -urNp linux-3.1.4/drivers/scsi/hosts.c linux-3.1.4/drivers/scsi/hosts.c
34676--- linux-3.1.4/drivers/scsi/hosts.c 2011-11-11 15:19:27.000000000 -0500
34677+++ linux-3.1.4/drivers/scsi/hosts.c 2011-11-16 18:39:07.000000000 -0500
34678@@ -42,7 +42,7 @@
34679 #include "scsi_logging.h"
34680
34681
34682-static atomic_t scsi_host_next_hn; /* host_no for next new host */
34683+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
34684
34685
34686 static void scsi_host_cls_release(struct device *dev)
34687@@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct
34688 * subtract one because we increment first then return, but we need to
34689 * know what the next host number was before increment
34690 */
34691- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
34692+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
34693 shost->dma_channel = 0xff;
34694
34695 /* These three are default values which can be overridden */
34696diff -urNp linux-3.1.4/drivers/scsi/hpsa.c linux-3.1.4/drivers/scsi/hpsa.c
34697--- linux-3.1.4/drivers/scsi/hpsa.c 2011-11-26 19:57:29.000000000 -0500
34698+++ linux-3.1.4/drivers/scsi/hpsa.c 2011-11-26 20:00:43.000000000 -0500
34699@@ -499,7 +499,7 @@ static inline u32 next_command(struct ct
34700 u32 a;
34701
34702 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
34703- return h->access.command_completed(h);
34704+ return h->access->command_completed(h);
34705
34706 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
34707 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
34708@@ -2956,7 +2956,7 @@ static void start_io(struct ctlr_info *h
34709 while (!list_empty(&h->reqQ)) {
34710 c = list_entry(h->reqQ.next, struct CommandList, list);
34711 /* can't do anything if fifo is full */
34712- if ((h->access.fifo_full(h))) {
34713+ if ((h->access->fifo_full(h))) {
34714 dev_warn(&h->pdev->dev, "fifo full\n");
34715 break;
34716 }
34717@@ -2966,7 +2966,7 @@ static void start_io(struct ctlr_info *h
34718 h->Qdepth--;
34719
34720 /* Tell the controller execute command */
34721- h->access.submit_command(h, c);
34722+ h->access->submit_command(h, c);
34723
34724 /* Put job onto the completed Q */
34725 addQ(&h->cmpQ, c);
34726@@ -2975,17 +2975,17 @@ static void start_io(struct ctlr_info *h
34727
34728 static inline unsigned long get_next_completion(struct ctlr_info *h)
34729 {
34730- return h->access.command_completed(h);
34731+ return h->access->command_completed(h);
34732 }
34733
34734 static inline bool interrupt_pending(struct ctlr_info *h)
34735 {
34736- return h->access.intr_pending(h);
34737+ return h->access->intr_pending(h);
34738 }
34739
34740 static inline long interrupt_not_for_us(struct ctlr_info *h)
34741 {
34742- return (h->access.intr_pending(h) == 0) ||
34743+ return (h->access->intr_pending(h) == 0) ||
34744 (h->interrupts_enabled == 0);
34745 }
34746
34747@@ -3882,7 +3882,7 @@ static int __devinit hpsa_pci_init(struc
34748 if (prod_index < 0)
34749 return -ENODEV;
34750 h->product_name = products[prod_index].product_name;
34751- h->access = *(products[prod_index].access);
34752+ h->access = products[prod_index].access;
34753
34754 if (hpsa_board_disabled(h->pdev)) {
34755 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
34756@@ -4163,7 +4163,7 @@ reinit_after_soft_reset:
34757 }
34758
34759 /* make sure the board interrupts are off */
34760- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34761+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34762
34763 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
34764 goto clean2;
34765@@ -4197,7 +4197,7 @@ reinit_after_soft_reset:
34766 * fake ones to scoop up any residual completions.
34767 */
34768 spin_lock_irqsave(&h->lock, flags);
34769- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34770+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34771 spin_unlock_irqrestore(&h->lock, flags);
34772 free_irq(h->intr[h->intr_mode], h);
34773 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
34774@@ -4216,9 +4216,9 @@ reinit_after_soft_reset:
34775 dev_info(&h->pdev->dev, "Board READY.\n");
34776 dev_info(&h->pdev->dev,
34777 "Waiting for stale completions to drain.\n");
34778- h->access.set_intr_mask(h, HPSA_INTR_ON);
34779+ h->access->set_intr_mask(h, HPSA_INTR_ON);
34780 msleep(10000);
34781- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34782+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34783
34784 rc = controller_reset_failed(h->cfgtable);
34785 if (rc)
34786@@ -4239,7 +4239,7 @@ reinit_after_soft_reset:
34787 }
34788
34789 /* Turn the interrupts on so we can service requests */
34790- h->access.set_intr_mask(h, HPSA_INTR_ON);
34791+ h->access->set_intr_mask(h, HPSA_INTR_ON);
34792
34793 hpsa_hba_inquiry(h);
34794 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
34795@@ -4292,7 +4292,7 @@ static void hpsa_shutdown(struct pci_dev
34796 * To write all data in the battery backed cache to disks
34797 */
34798 hpsa_flush_cache(h);
34799- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34800+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34801 free_irq(h->intr[h->intr_mode], h);
34802 #ifdef CONFIG_PCI_MSI
34803 if (h->msix_vector)
34804@@ -4455,7 +4455,7 @@ static __devinit void hpsa_enter_perform
34805 return;
34806 }
34807 /* Change the access methods to the performant access methods */
34808- h->access = SA5_performant_access;
34809+ h->access = &SA5_performant_access;
34810 h->transMethod = CFGTBL_Trans_Performant;
34811 }
34812
34813diff -urNp linux-3.1.4/drivers/scsi/hpsa.h linux-3.1.4/drivers/scsi/hpsa.h
34814--- linux-3.1.4/drivers/scsi/hpsa.h 2011-11-11 15:19:27.000000000 -0500
34815+++ linux-3.1.4/drivers/scsi/hpsa.h 2011-11-16 18:39:07.000000000 -0500
34816@@ -73,7 +73,7 @@ struct ctlr_info {
34817 unsigned int msix_vector;
34818 unsigned int msi_vector;
34819 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
34820- struct access_method access;
34821+ struct access_method *access;
34822
34823 /* queue and queue Info */
34824 struct list_head reqQ;
34825diff -urNp linux-3.1.4/drivers/scsi/ips.h linux-3.1.4/drivers/scsi/ips.h
34826--- linux-3.1.4/drivers/scsi/ips.h 2011-11-11 15:19:27.000000000 -0500
34827+++ linux-3.1.4/drivers/scsi/ips.h 2011-11-16 18:39:07.000000000 -0500
34828@@ -1027,7 +1027,7 @@ typedef struct {
34829 int (*intr)(struct ips_ha *);
34830 void (*enableint)(struct ips_ha *);
34831 uint32_t (*statupd)(struct ips_ha *);
34832-} ips_hw_func_t;
34833+} __no_const ips_hw_func_t;
34834
34835 typedef struct ips_ha {
34836 uint8_t ha_id[IPS_MAX_CHANNELS+1];
34837diff -urNp linux-3.1.4/drivers/scsi/libfc/fc_exch.c linux-3.1.4/drivers/scsi/libfc/fc_exch.c
34838--- linux-3.1.4/drivers/scsi/libfc/fc_exch.c 2011-11-11 15:19:27.000000000 -0500
34839+++ linux-3.1.4/drivers/scsi/libfc/fc_exch.c 2011-11-16 18:39:07.000000000 -0500
34840@@ -105,12 +105,12 @@ struct fc_exch_mgr {
34841 * all together if not used XXX
34842 */
34843 struct {
34844- atomic_t no_free_exch;
34845- atomic_t no_free_exch_xid;
34846- atomic_t xid_not_found;
34847- atomic_t xid_busy;
34848- atomic_t seq_not_found;
34849- atomic_t non_bls_resp;
34850+ atomic_unchecked_t no_free_exch;
34851+ atomic_unchecked_t no_free_exch_xid;
34852+ atomic_unchecked_t xid_not_found;
34853+ atomic_unchecked_t xid_busy;
34854+ atomic_unchecked_t seq_not_found;
34855+ atomic_unchecked_t non_bls_resp;
34856 } stats;
34857 };
34858
34859@@ -718,7 +718,7 @@ static struct fc_exch *fc_exch_em_alloc(
34860 /* allocate memory for exchange */
34861 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
34862 if (!ep) {
34863- atomic_inc(&mp->stats.no_free_exch);
34864+ atomic_inc_unchecked(&mp->stats.no_free_exch);
34865 goto out;
34866 }
34867 memset(ep, 0, sizeof(*ep));
34868@@ -779,7 +779,7 @@ out:
34869 return ep;
34870 err:
34871 spin_unlock_bh(&pool->lock);
34872- atomic_inc(&mp->stats.no_free_exch_xid);
34873+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
34874 mempool_free(ep, mp->ep_pool);
34875 return NULL;
34876 }
34877@@ -922,7 +922,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34878 xid = ntohs(fh->fh_ox_id); /* we originated exch */
34879 ep = fc_exch_find(mp, xid);
34880 if (!ep) {
34881- atomic_inc(&mp->stats.xid_not_found);
34882+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34883 reject = FC_RJT_OX_ID;
34884 goto out;
34885 }
34886@@ -952,7 +952,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34887 ep = fc_exch_find(mp, xid);
34888 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
34889 if (ep) {
34890- atomic_inc(&mp->stats.xid_busy);
34891+ atomic_inc_unchecked(&mp->stats.xid_busy);
34892 reject = FC_RJT_RX_ID;
34893 goto rel;
34894 }
34895@@ -963,7 +963,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34896 }
34897 xid = ep->xid; /* get our XID */
34898 } else if (!ep) {
34899- atomic_inc(&mp->stats.xid_not_found);
34900+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34901 reject = FC_RJT_RX_ID; /* XID not found */
34902 goto out;
34903 }
34904@@ -980,7 +980,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34905 } else {
34906 sp = &ep->seq;
34907 if (sp->id != fh->fh_seq_id) {
34908- atomic_inc(&mp->stats.seq_not_found);
34909+ atomic_inc_unchecked(&mp->stats.seq_not_found);
34910 if (f_ctl & FC_FC_END_SEQ) {
34911 /*
34912 * Update sequence_id based on incoming last
34913@@ -1430,22 +1430,22 @@ static void fc_exch_recv_seq_resp(struct
34914
34915 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
34916 if (!ep) {
34917- atomic_inc(&mp->stats.xid_not_found);
34918+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34919 goto out;
34920 }
34921 if (ep->esb_stat & ESB_ST_COMPLETE) {
34922- atomic_inc(&mp->stats.xid_not_found);
34923+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34924 goto rel;
34925 }
34926 if (ep->rxid == FC_XID_UNKNOWN)
34927 ep->rxid = ntohs(fh->fh_rx_id);
34928 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
34929- atomic_inc(&mp->stats.xid_not_found);
34930+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34931 goto rel;
34932 }
34933 if (ep->did != ntoh24(fh->fh_s_id) &&
34934 ep->did != FC_FID_FLOGI) {
34935- atomic_inc(&mp->stats.xid_not_found);
34936+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34937 goto rel;
34938 }
34939 sof = fr_sof(fp);
34940@@ -1454,7 +1454,7 @@ static void fc_exch_recv_seq_resp(struct
34941 sp->ssb_stat |= SSB_ST_RESP;
34942 sp->id = fh->fh_seq_id;
34943 } else if (sp->id != fh->fh_seq_id) {
34944- atomic_inc(&mp->stats.seq_not_found);
34945+ atomic_inc_unchecked(&mp->stats.seq_not_found);
34946 goto rel;
34947 }
34948
34949@@ -1518,9 +1518,9 @@ static void fc_exch_recv_resp(struct fc_
34950 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
34951
34952 if (!sp)
34953- atomic_inc(&mp->stats.xid_not_found);
34954+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34955 else
34956- atomic_inc(&mp->stats.non_bls_resp);
34957+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
34958
34959 fc_frame_free(fp);
34960 }
34961diff -urNp linux-3.1.4/drivers/scsi/libsas/sas_ata.c linux-3.1.4/drivers/scsi/libsas/sas_ata.c
34962--- linux-3.1.4/drivers/scsi/libsas/sas_ata.c 2011-11-11 15:19:27.000000000 -0500
34963+++ linux-3.1.4/drivers/scsi/libsas/sas_ata.c 2011-11-16 18:39:07.000000000 -0500
34964@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sa
34965 .postreset = ata_std_postreset,
34966 .error_handler = ata_std_error_handler,
34967 .post_internal_cmd = sas_ata_post_internal,
34968- .qc_defer = ata_std_qc_defer,
34969+ .qc_defer = ata_std_qc_defer,
34970 .qc_prep = ata_noop_qc_prep,
34971 .qc_issue = sas_ata_qc_issue,
34972 .qc_fill_rtf = sas_ata_qc_fill_rtf,
34973diff -urNp linux-3.1.4/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.1.4/drivers/scsi/lpfc/lpfc_debugfs.c
34974--- linux-3.1.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-11-11 15:19:27.000000000 -0500
34975+++ linux-3.1.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-11-16 18:40:22.000000000 -0500
34976@@ -105,7 +105,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
34977
34978 #include <linux/debugfs.h>
34979
34980-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34981+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34982 static unsigned long lpfc_debugfs_start_time = 0L;
34983
34984 /* iDiag */
34985@@ -146,7 +146,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
34986 lpfc_debugfs_enable = 0;
34987
34988 len = 0;
34989- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
34990+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
34991 (lpfc_debugfs_max_disc_trc - 1);
34992 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
34993 dtp = vport->disc_trc + i;
34994@@ -212,7 +212,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
34995 lpfc_debugfs_enable = 0;
34996
34997 len = 0;
34998- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
34999+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
35000 (lpfc_debugfs_max_slow_ring_trc - 1);
35001 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
35002 dtp = phba->slow_ring_trc + i;
35003@@ -635,14 +635,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
35004 !vport || !vport->disc_trc)
35005 return;
35006
35007- index = atomic_inc_return(&vport->disc_trc_cnt) &
35008+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
35009 (lpfc_debugfs_max_disc_trc - 1);
35010 dtp = vport->disc_trc + index;
35011 dtp->fmt = fmt;
35012 dtp->data1 = data1;
35013 dtp->data2 = data2;
35014 dtp->data3 = data3;
35015- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
35016+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
35017 dtp->jif = jiffies;
35018 #endif
35019 return;
35020@@ -673,14 +673,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
35021 !phba || !phba->slow_ring_trc)
35022 return;
35023
35024- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
35025+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
35026 (lpfc_debugfs_max_slow_ring_trc - 1);
35027 dtp = phba->slow_ring_trc + index;
35028 dtp->fmt = fmt;
35029 dtp->data1 = data1;
35030 dtp->data2 = data2;
35031 dtp->data3 = data3;
35032- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
35033+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
35034 dtp->jif = jiffies;
35035 #endif
35036 return;
35037@@ -3828,7 +3828,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
35038 "slow_ring buffer\n");
35039 goto debug_failed;
35040 }
35041- atomic_set(&phba->slow_ring_trc_cnt, 0);
35042+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
35043 memset(phba->slow_ring_trc, 0,
35044 (sizeof(struct lpfc_debugfs_trc) *
35045 lpfc_debugfs_max_slow_ring_trc));
35046@@ -3874,7 +3874,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
35047 "buffer\n");
35048 goto debug_failed;
35049 }
35050- atomic_set(&vport->disc_trc_cnt, 0);
35051+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
35052
35053 snprintf(name, sizeof(name), "discovery_trace");
35054 vport->debug_disc_trc =
35055diff -urNp linux-3.1.4/drivers/scsi/lpfc/lpfc.h linux-3.1.4/drivers/scsi/lpfc/lpfc.h
35056--- linux-3.1.4/drivers/scsi/lpfc/lpfc.h 2011-11-11 15:19:27.000000000 -0500
35057+++ linux-3.1.4/drivers/scsi/lpfc/lpfc.h 2011-11-16 18:39:07.000000000 -0500
35058@@ -425,7 +425,7 @@ struct lpfc_vport {
35059 struct dentry *debug_nodelist;
35060 struct dentry *vport_debugfs_root;
35061 struct lpfc_debugfs_trc *disc_trc;
35062- atomic_t disc_trc_cnt;
35063+ atomic_unchecked_t disc_trc_cnt;
35064 #endif
35065 uint8_t stat_data_enabled;
35066 uint8_t stat_data_blocked;
35067@@ -835,8 +835,8 @@ struct lpfc_hba {
35068 struct timer_list fabric_block_timer;
35069 unsigned long bit_flags;
35070 #define FABRIC_COMANDS_BLOCKED 0
35071- atomic_t num_rsrc_err;
35072- atomic_t num_cmd_success;
35073+ atomic_unchecked_t num_rsrc_err;
35074+ atomic_unchecked_t num_cmd_success;
35075 unsigned long last_rsrc_error_time;
35076 unsigned long last_ramp_down_time;
35077 unsigned long last_ramp_up_time;
35078@@ -850,7 +850,7 @@ struct lpfc_hba {
35079 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
35080 struct dentry *debug_slow_ring_trc;
35081 struct lpfc_debugfs_trc *slow_ring_trc;
35082- atomic_t slow_ring_trc_cnt;
35083+ atomic_unchecked_t slow_ring_trc_cnt;
35084 /* iDiag debugfs sub-directory */
35085 struct dentry *idiag_root;
35086 struct dentry *idiag_pci_cfg;
35087diff -urNp linux-3.1.4/drivers/scsi/lpfc/lpfc_init.c linux-3.1.4/drivers/scsi/lpfc/lpfc_init.c
35088--- linux-3.1.4/drivers/scsi/lpfc/lpfc_init.c 2011-11-11 15:19:27.000000000 -0500
35089+++ linux-3.1.4/drivers/scsi/lpfc/lpfc_init.c 2011-11-16 18:39:07.000000000 -0500
35090@@ -9969,8 +9969,10 @@ lpfc_init(void)
35091 printk(LPFC_COPYRIGHT "\n");
35092
35093 if (lpfc_enable_npiv) {
35094- lpfc_transport_functions.vport_create = lpfc_vport_create;
35095- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
35096+ pax_open_kernel();
35097+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
35098+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
35099+ pax_close_kernel();
35100 }
35101 lpfc_transport_template =
35102 fc_attach_transport(&lpfc_transport_functions);
35103diff -urNp linux-3.1.4/drivers/scsi/lpfc/lpfc_scsi.c linux-3.1.4/drivers/scsi/lpfc/lpfc_scsi.c
35104--- linux-3.1.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-11-11 15:19:27.000000000 -0500
35105+++ linux-3.1.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-11-16 18:39:07.000000000 -0500
35106@@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
35107 uint32_t evt_posted;
35108
35109 spin_lock_irqsave(&phba->hbalock, flags);
35110- atomic_inc(&phba->num_rsrc_err);
35111+ atomic_inc_unchecked(&phba->num_rsrc_err);
35112 phba->last_rsrc_error_time = jiffies;
35113
35114 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
35115@@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
35116 unsigned long flags;
35117 struct lpfc_hba *phba = vport->phba;
35118 uint32_t evt_posted;
35119- atomic_inc(&phba->num_cmd_success);
35120+ atomic_inc_unchecked(&phba->num_cmd_success);
35121
35122 if (vport->cfg_lun_queue_depth <= queue_depth)
35123 return;
35124@@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
35125 unsigned long num_rsrc_err, num_cmd_success;
35126 int i;
35127
35128- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
35129- num_cmd_success = atomic_read(&phba->num_cmd_success);
35130+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
35131+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
35132
35133 vports = lpfc_create_vport_work_array(phba);
35134 if (vports != NULL)
35135@@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
35136 }
35137 }
35138 lpfc_destroy_vport_work_array(phba, vports);
35139- atomic_set(&phba->num_rsrc_err, 0);
35140- atomic_set(&phba->num_cmd_success, 0);
35141+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
35142+ atomic_set_unchecked(&phba->num_cmd_success, 0);
35143 }
35144
35145 /**
35146@@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
35147 }
35148 }
35149 lpfc_destroy_vport_work_array(phba, vports);
35150- atomic_set(&phba->num_rsrc_err, 0);
35151- atomic_set(&phba->num_cmd_success, 0);
35152+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
35153+ atomic_set_unchecked(&phba->num_cmd_success, 0);
35154 }
35155
35156 /**
35157diff -urNp linux-3.1.4/drivers/scsi/megaraid/megaraid_mbox.c linux-3.1.4/drivers/scsi/megaraid/megaraid_mbox.c
35158--- linux-3.1.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-11-11 15:19:27.000000000 -0500
35159+++ linux-3.1.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-11-16 18:40:22.000000000 -0500
35160@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
35161 int rval;
35162 int i;
35163
35164+ pax_track_stack();
35165+
35166 // Allocate memory for the base list of scb for management module.
35167 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
35168
35169diff -urNp linux-3.1.4/drivers/scsi/osd/osd_initiator.c linux-3.1.4/drivers/scsi/osd/osd_initiator.c
35170--- linux-3.1.4/drivers/scsi/osd/osd_initiator.c 2011-11-11 15:19:27.000000000 -0500
35171+++ linux-3.1.4/drivers/scsi/osd/osd_initiator.c 2011-11-16 18:40:22.000000000 -0500
35172@@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
35173 int nelem = ARRAY_SIZE(get_attrs), a = 0;
35174 int ret;
35175
35176+ pax_track_stack();
35177+
35178 or = osd_start_request(od, GFP_KERNEL);
35179 if (!or)
35180 return -ENOMEM;
35181diff -urNp linux-3.1.4/drivers/scsi/pmcraid.c linux-3.1.4/drivers/scsi/pmcraid.c
35182--- linux-3.1.4/drivers/scsi/pmcraid.c 2011-11-11 15:19:27.000000000 -0500
35183+++ linux-3.1.4/drivers/scsi/pmcraid.c 2011-11-16 18:39:07.000000000 -0500
35184@@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
35185 res->scsi_dev = scsi_dev;
35186 scsi_dev->hostdata = res;
35187 res->change_detected = 0;
35188- atomic_set(&res->read_failures, 0);
35189- atomic_set(&res->write_failures, 0);
35190+ atomic_set_unchecked(&res->read_failures, 0);
35191+ atomic_set_unchecked(&res->write_failures, 0);
35192 rc = 0;
35193 }
35194 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
35195@@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
35196
35197 /* If this was a SCSI read/write command keep count of errors */
35198 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
35199- atomic_inc(&res->read_failures);
35200+ atomic_inc_unchecked(&res->read_failures);
35201 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
35202- atomic_inc(&res->write_failures);
35203+ atomic_inc_unchecked(&res->write_failures);
35204
35205 if (!RES_IS_GSCSI(res->cfg_entry) &&
35206 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
35207@@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
35208 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
35209 * hrrq_id assigned here in queuecommand
35210 */
35211- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
35212+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
35213 pinstance->num_hrrq;
35214 cmd->cmd_done = pmcraid_io_done;
35215
35216@@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
35217 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
35218 * hrrq_id assigned here in queuecommand
35219 */
35220- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
35221+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
35222 pinstance->num_hrrq;
35223
35224 if (request_size) {
35225@@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(stru
35226
35227 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
35228 /* add resources only after host is added into system */
35229- if (!atomic_read(&pinstance->expose_resources))
35230+ if (!atomic_read_unchecked(&pinstance->expose_resources))
35231 return;
35232
35233 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
35234@@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instan
35235 init_waitqueue_head(&pinstance->reset_wait_q);
35236
35237 atomic_set(&pinstance->outstanding_cmds, 0);
35238- atomic_set(&pinstance->last_message_id, 0);
35239- atomic_set(&pinstance->expose_resources, 0);
35240+ atomic_set_unchecked(&pinstance->last_message_id, 0);
35241+ atomic_set_unchecked(&pinstance->expose_resources, 0);
35242
35243 INIT_LIST_HEAD(&pinstance->free_res_q);
35244 INIT_LIST_HEAD(&pinstance->used_res_q);
35245@@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
35246 /* Schedule worker thread to handle CCN and take care of adding and
35247 * removing devices to OS
35248 */
35249- atomic_set(&pinstance->expose_resources, 1);
35250+ atomic_set_unchecked(&pinstance->expose_resources, 1);
35251 schedule_work(&pinstance->worker_q);
35252 return rc;
35253
35254diff -urNp linux-3.1.4/drivers/scsi/pmcraid.h linux-3.1.4/drivers/scsi/pmcraid.h
35255--- linux-3.1.4/drivers/scsi/pmcraid.h 2011-11-11 15:19:27.000000000 -0500
35256+++ linux-3.1.4/drivers/scsi/pmcraid.h 2011-11-16 18:39:07.000000000 -0500
35257@@ -749,7 +749,7 @@ struct pmcraid_instance {
35258 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
35259
35260 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
35261- atomic_t last_message_id;
35262+ atomic_unchecked_t last_message_id;
35263
35264 /* configuration table */
35265 struct pmcraid_config_table *cfg_table;
35266@@ -778,7 +778,7 @@ struct pmcraid_instance {
35267 atomic_t outstanding_cmds;
35268
35269 /* should add/delete resources to mid-layer now ?*/
35270- atomic_t expose_resources;
35271+ atomic_unchecked_t expose_resources;
35272
35273
35274
35275@@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
35276 struct pmcraid_config_table_entry_ext cfg_entry_ext;
35277 };
35278 struct scsi_device *scsi_dev; /* Link scsi_device structure */
35279- atomic_t read_failures; /* count of failed READ commands */
35280- atomic_t write_failures; /* count of failed WRITE commands */
35281+ atomic_unchecked_t read_failures; /* count of failed READ commands */
35282+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
35283
35284 /* To indicate add/delete/modify during CCN */
35285 u8 change_detected;
35286diff -urNp linux-3.1.4/drivers/scsi/qla2xxx/qla_def.h linux-3.1.4/drivers/scsi/qla2xxx/qla_def.h
35287--- linux-3.1.4/drivers/scsi/qla2xxx/qla_def.h 2011-11-11 15:19:27.000000000 -0500
35288+++ linux-3.1.4/drivers/scsi/qla2xxx/qla_def.h 2011-11-16 18:39:07.000000000 -0500
35289@@ -2244,7 +2244,7 @@ struct isp_operations {
35290 int (*get_flash_version) (struct scsi_qla_host *, void *);
35291 int (*start_scsi) (srb_t *);
35292 int (*abort_isp) (struct scsi_qla_host *);
35293-};
35294+} __no_const;
35295
35296 /* MSI-X Support *************************************************************/
35297
35298diff -urNp linux-3.1.4/drivers/scsi/qla4xxx/ql4_def.h linux-3.1.4/drivers/scsi/qla4xxx/ql4_def.h
35299--- linux-3.1.4/drivers/scsi/qla4xxx/ql4_def.h 2011-11-11 15:19:27.000000000 -0500
35300+++ linux-3.1.4/drivers/scsi/qla4xxx/ql4_def.h 2011-11-16 18:39:07.000000000 -0500
35301@@ -256,7 +256,7 @@ struct ddb_entry {
35302 atomic_t retry_relogin_timer; /* Min Time between relogins
35303 * (4000 only) */
35304 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
35305- atomic_t relogin_retry_count; /* Num of times relogin has been
35306+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
35307 * retried */
35308
35309 uint16_t port;
35310diff -urNp linux-3.1.4/drivers/scsi/qla4xxx/ql4_init.c linux-3.1.4/drivers/scsi/qla4xxx/ql4_init.c
35311--- linux-3.1.4/drivers/scsi/qla4xxx/ql4_init.c 2011-11-11 15:19:27.000000000 -0500
35312+++ linux-3.1.4/drivers/scsi/qla4xxx/ql4_init.c 2011-11-16 18:39:07.000000000 -0500
35313@@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
35314 ddb_entry->fw_ddb_index = fw_ddb_index;
35315 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
35316 atomic_set(&ddb_entry->relogin_timer, 0);
35317- atomic_set(&ddb_entry->relogin_retry_count, 0);
35318+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
35319 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
35320 list_add_tail(&ddb_entry->list, &ha->ddb_list);
35321 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
35322@@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
35323 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
35324 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
35325 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
35326- atomic_set(&ddb_entry->relogin_retry_count, 0);
35327+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
35328 atomic_set(&ddb_entry->relogin_timer, 0);
35329 clear_bit(DF_RELOGIN, &ddb_entry->flags);
35330 iscsi_unblock_session(ddb_entry->sess);
35331diff -urNp linux-3.1.4/drivers/scsi/qla4xxx/ql4_os.c linux-3.1.4/drivers/scsi/qla4xxx/ql4_os.c
35332--- linux-3.1.4/drivers/scsi/qla4xxx/ql4_os.c 2011-11-11 15:19:27.000000000 -0500
35333+++ linux-3.1.4/drivers/scsi/qla4xxx/ql4_os.c 2011-11-16 18:39:07.000000000 -0500
35334@@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_ql
35335 ddb_entry->fw_ddb_device_state ==
35336 DDB_DS_SESSION_FAILED) {
35337 /* Reset retry relogin timer */
35338- atomic_inc(&ddb_entry->relogin_retry_count);
35339+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
35340 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
35341 " timed out-retrying"
35342 " relogin (%d)\n",
35343 ha->host_no,
35344 ddb_entry->fw_ddb_index,
35345- atomic_read(&ddb_entry->
35346+ atomic_read_unchecked(&ddb_entry->
35347 relogin_retry_count))
35348 );
35349 start_dpc++;
35350diff -urNp linux-3.1.4/drivers/scsi/scsi.c linux-3.1.4/drivers/scsi/scsi.c
35351--- linux-3.1.4/drivers/scsi/scsi.c 2011-11-11 15:19:27.000000000 -0500
35352+++ linux-3.1.4/drivers/scsi/scsi.c 2011-11-16 18:39:07.000000000 -0500
35353@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
35354 unsigned long timeout;
35355 int rtn = 0;
35356
35357- atomic_inc(&cmd->device->iorequest_cnt);
35358+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
35359
35360 /* check if the device is still usable */
35361 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
35362diff -urNp linux-3.1.4/drivers/scsi/scsi_debug.c linux-3.1.4/drivers/scsi/scsi_debug.c
35363--- linux-3.1.4/drivers/scsi/scsi_debug.c 2011-11-11 15:19:27.000000000 -0500
35364+++ linux-3.1.4/drivers/scsi/scsi_debug.c 2011-11-16 18:40:22.000000000 -0500
35365@@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
35366 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
35367 unsigned char *cmd = (unsigned char *)scp->cmnd;
35368
35369+ pax_track_stack();
35370+
35371 if ((errsts = check_readiness(scp, 1, devip)))
35372 return errsts;
35373 memset(arr, 0, sizeof(arr));
35374@@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
35375 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
35376 unsigned char *cmd = (unsigned char *)scp->cmnd;
35377
35378+ pax_track_stack();
35379+
35380 if ((errsts = check_readiness(scp, 1, devip)))
35381 return errsts;
35382 memset(arr, 0, sizeof(arr));
35383diff -urNp linux-3.1.4/drivers/scsi/scsi_lib.c linux-3.1.4/drivers/scsi/scsi_lib.c
35384--- linux-3.1.4/drivers/scsi/scsi_lib.c 2011-11-11 15:19:27.000000000 -0500
35385+++ linux-3.1.4/drivers/scsi/scsi_lib.c 2011-11-16 18:39:07.000000000 -0500
35386@@ -1413,7 +1413,7 @@ static void scsi_kill_request(struct req
35387 shost = sdev->host;
35388 scsi_init_cmd_errh(cmd);
35389 cmd->result = DID_NO_CONNECT << 16;
35390- atomic_inc(&cmd->device->iorequest_cnt);
35391+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
35392
35393 /*
35394 * SCSI request completion path will do scsi_device_unbusy(),
35395@@ -1439,9 +1439,9 @@ static void scsi_softirq_done(struct req
35396
35397 INIT_LIST_HEAD(&cmd->eh_entry);
35398
35399- atomic_inc(&cmd->device->iodone_cnt);
35400+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
35401 if (cmd->result)
35402- atomic_inc(&cmd->device->ioerr_cnt);
35403+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
35404
35405 disposition = scsi_decide_disposition(cmd);
35406 if (disposition != SUCCESS &&
35407diff -urNp linux-3.1.4/drivers/scsi/scsi_sysfs.c linux-3.1.4/drivers/scsi/scsi_sysfs.c
35408--- linux-3.1.4/drivers/scsi/scsi_sysfs.c 2011-11-11 15:19:27.000000000 -0500
35409+++ linux-3.1.4/drivers/scsi/scsi_sysfs.c 2011-11-16 18:39:07.000000000 -0500
35410@@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
35411 char *buf) \
35412 { \
35413 struct scsi_device *sdev = to_scsi_device(dev); \
35414- unsigned long long count = atomic_read(&sdev->field); \
35415+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
35416 return snprintf(buf, 20, "0x%llx\n", count); \
35417 } \
35418 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
35419diff -urNp linux-3.1.4/drivers/scsi/scsi_tgt_lib.c linux-3.1.4/drivers/scsi/scsi_tgt_lib.c
35420--- linux-3.1.4/drivers/scsi/scsi_tgt_lib.c 2011-11-11 15:19:27.000000000 -0500
35421+++ linux-3.1.4/drivers/scsi/scsi_tgt_lib.c 2011-11-16 18:39:07.000000000 -0500
35422@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct sc
35423 int err;
35424
35425 dprintk("%lx %u\n", uaddr, len);
35426- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
35427+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
35428 if (err) {
35429 /*
35430 * TODO: need to fixup sg_tablesize, max_segment_size,
35431diff -urNp linux-3.1.4/drivers/scsi/scsi_transport_fc.c linux-3.1.4/drivers/scsi/scsi_transport_fc.c
35432--- linux-3.1.4/drivers/scsi/scsi_transport_fc.c 2011-11-11 15:19:27.000000000 -0500
35433+++ linux-3.1.4/drivers/scsi/scsi_transport_fc.c 2011-11-16 18:39:07.000000000 -0500
35434@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
35435 * Netlink Infrastructure
35436 */
35437
35438-static atomic_t fc_event_seq;
35439+static atomic_unchecked_t fc_event_seq;
35440
35441 /**
35442 * fc_get_event_number - Obtain the next sequential FC event number
35443@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
35444 u32
35445 fc_get_event_number(void)
35446 {
35447- return atomic_add_return(1, &fc_event_seq);
35448+ return atomic_add_return_unchecked(1, &fc_event_seq);
35449 }
35450 EXPORT_SYMBOL(fc_get_event_number);
35451
35452@@ -645,7 +645,7 @@ static __init int fc_transport_init(void
35453 {
35454 int error;
35455
35456- atomic_set(&fc_event_seq, 0);
35457+ atomic_set_unchecked(&fc_event_seq, 0);
35458
35459 error = transport_class_register(&fc_host_class);
35460 if (error)
35461@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char
35462 char *cp;
35463
35464 *val = simple_strtoul(buf, &cp, 0);
35465- if ((*cp && (*cp != '\n')) || (*val < 0))
35466+ if (*cp && (*cp != '\n'))
35467 return -EINVAL;
35468 /*
35469 * Check for overflow; dev_loss_tmo is u32
35470diff -urNp linux-3.1.4/drivers/scsi/scsi_transport_iscsi.c linux-3.1.4/drivers/scsi/scsi_transport_iscsi.c
35471--- linux-3.1.4/drivers/scsi/scsi_transport_iscsi.c 2011-11-11 15:19:27.000000000 -0500
35472+++ linux-3.1.4/drivers/scsi/scsi_transport_iscsi.c 2011-11-16 18:39:07.000000000 -0500
35473@@ -83,7 +83,7 @@ struct iscsi_internal {
35474 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
35475 };
35476
35477-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
35478+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
35479 static struct workqueue_struct *iscsi_eh_timer_workq;
35480
35481 /*
35482@@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
35483 int err;
35484
35485 ihost = shost->shost_data;
35486- session->sid = atomic_add_return(1, &iscsi_session_nr);
35487+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
35488
35489 if (id == ISCSI_MAX_TARGET) {
35490 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
35491@@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
35492 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
35493 ISCSI_TRANSPORT_VERSION);
35494
35495- atomic_set(&iscsi_session_nr, 0);
35496+ atomic_set_unchecked(&iscsi_session_nr, 0);
35497
35498 err = class_register(&iscsi_transport_class);
35499 if (err)
35500diff -urNp linux-3.1.4/drivers/scsi/scsi_transport_srp.c linux-3.1.4/drivers/scsi/scsi_transport_srp.c
35501--- linux-3.1.4/drivers/scsi/scsi_transport_srp.c 2011-11-11 15:19:27.000000000 -0500
35502+++ linux-3.1.4/drivers/scsi/scsi_transport_srp.c 2011-11-16 18:39:07.000000000 -0500
35503@@ -33,7 +33,7 @@
35504 #include "scsi_transport_srp_internal.h"
35505
35506 struct srp_host_attrs {
35507- atomic_t next_port_id;
35508+ atomic_unchecked_t next_port_id;
35509 };
35510 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
35511
35512@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
35513 struct Scsi_Host *shost = dev_to_shost(dev);
35514 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
35515
35516- atomic_set(&srp_host->next_port_id, 0);
35517+ atomic_set_unchecked(&srp_host->next_port_id, 0);
35518 return 0;
35519 }
35520
35521@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
35522 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
35523 rport->roles = ids->roles;
35524
35525- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
35526+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
35527 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
35528
35529 transport_setup_device(&rport->dev);
35530diff -urNp linux-3.1.4/drivers/scsi/sg.c linux-3.1.4/drivers/scsi/sg.c
35531--- linux-3.1.4/drivers/scsi/sg.c 2011-11-11 15:19:27.000000000 -0500
35532+++ linux-3.1.4/drivers/scsi/sg.c 2011-11-16 18:39:07.000000000 -0500
35533@@ -1075,7 +1075,7 @@ sg_ioctl(struct file *filp, unsigned int
35534 sdp->disk->disk_name,
35535 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
35536 NULL,
35537- (char *)arg);
35538+ (char __user *)arg);
35539 case BLKTRACESTART:
35540 return blk_trace_startstop(sdp->device->request_queue, 1);
35541 case BLKTRACESTOP:
35542@@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
35543 const struct file_operations * fops;
35544 };
35545
35546-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
35547+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
35548 {"allow_dio", &adio_fops},
35549 {"debug", &debug_fops},
35550 {"def_reserved_size", &dressz_fops},
35551@@ -2325,7 +2325,7 @@ sg_proc_init(void)
35552 {
35553 int k, mask;
35554 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
35555- struct sg_proc_leaf * leaf;
35556+ const struct sg_proc_leaf * leaf;
35557
35558 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
35559 if (!sg_proc_sgp)
35560diff -urNp linux-3.1.4/drivers/scsi/sym53c8xx_2/sym_glue.c linux-3.1.4/drivers/scsi/sym53c8xx_2/sym_glue.c
35561--- linux-3.1.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-11-11 15:19:27.000000000 -0500
35562+++ linux-3.1.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-11-16 18:40:22.000000000 -0500
35563@@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
35564 int do_iounmap = 0;
35565 int do_disable_device = 1;
35566
35567+ pax_track_stack();
35568+
35569 memset(&sym_dev, 0, sizeof(sym_dev));
35570 memset(&nvram, 0, sizeof(nvram));
35571 sym_dev.pdev = pdev;
35572diff -urNp linux-3.1.4/drivers/scsi/vmw_pvscsi.c linux-3.1.4/drivers/scsi/vmw_pvscsi.c
35573--- linux-3.1.4/drivers/scsi/vmw_pvscsi.c 2011-11-11 15:19:27.000000000 -0500
35574+++ linux-3.1.4/drivers/scsi/vmw_pvscsi.c 2011-11-16 18:40:22.000000000 -0500
35575@@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
35576 dma_addr_t base;
35577 unsigned i;
35578
35579+ pax_track_stack();
35580+
35581 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
35582 cmd.reqRingNumPages = adapter->req_pages;
35583 cmd.cmpRingNumPages = adapter->cmp_pages;
35584diff -urNp linux-3.1.4/drivers/spi/spi.c linux-3.1.4/drivers/spi/spi.c
35585--- linux-3.1.4/drivers/spi/spi.c 2011-11-11 15:19:27.000000000 -0500
35586+++ linux-3.1.4/drivers/spi/spi.c 2011-11-16 18:39:07.000000000 -0500
35587@@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
35588 EXPORT_SYMBOL_GPL(spi_bus_unlock);
35589
35590 /* portable code must never pass more than 32 bytes */
35591-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
35592+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
35593
35594 static u8 *buf;
35595
35596diff -urNp linux-3.1.4/drivers/spi/spi-dw-pci.c linux-3.1.4/drivers/spi/spi-dw-pci.c
35597--- linux-3.1.4/drivers/spi/spi-dw-pci.c 2011-11-11 15:19:27.000000000 -0500
35598+++ linux-3.1.4/drivers/spi/spi-dw-pci.c 2011-11-16 18:39:07.000000000 -0500
35599@@ -148,7 +148,7 @@ static int spi_resume(struct pci_dev *pd
35600 #define spi_resume NULL
35601 #endif
35602
35603-static const struct pci_device_id pci_ids[] __devinitdata = {
35604+static const struct pci_device_id pci_ids[] __devinitconst = {
35605 /* Intel MID platform SPI controller 0 */
35606 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
35607 {},
35608diff -urNp linux-3.1.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-3.1.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c
35609--- linux-3.1.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-11-11 15:19:27.000000000 -0500
35610+++ linux-3.1.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-11-16 18:39:07.000000000 -0500
35611@@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[
35612 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
35613
35614
35615-static struct net_device_ops ar6000_netdev_ops = {
35616+static net_device_ops_no_const ar6000_netdev_ops = {
35617 .ndo_init = NULL,
35618 .ndo_open = ar6000_open,
35619 .ndo_stop = ar6000_close,
35620diff -urNp linux-3.1.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-3.1.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
35621--- linux-3.1.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-11-11 15:19:27.000000000 -0500
35622+++ linux-3.1.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-11-16 18:39:07.000000000 -0500
35623@@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
35624 typedef struct ar6k_pal_config_s
35625 {
35626 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
35627-}ar6k_pal_config_t;
35628+} __no_const ar6k_pal_config_t;
35629
35630 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
35631 #endif /* _AR6K_PAL_H_ */
35632diff -urNp linux-3.1.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-3.1.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
35633--- linux-3.1.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-11-11 15:19:27.000000000 -0500
35634+++ linux-3.1.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-11-16 18:39:07.000000000 -0500
35635@@ -451,14 +451,14 @@ static void brcmf_op_if(struct brcmf_if
35636 free_netdev(ifp->net);
35637 }
35638 /* Allocate etherdev, including space for private structure */
35639- ifp->net = alloc_etherdev(sizeof(drvr_priv));
35640+ ifp->net = alloc_etherdev(sizeof(*drvr_priv));
35641 if (!ifp->net) {
35642 BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
35643 ret = -ENOMEM;
35644 }
35645 if (ret == 0) {
35646 strcpy(ifp->net->name, ifp->name);
35647- memcpy(netdev_priv(ifp->net), &drvr_priv, sizeof(drvr_priv));
35648+ memcpy(netdev_priv(ifp->net), &drvr_priv, sizeof(*drvr_priv));
35649 err = brcmf_net_attach(&drvr_priv->pub, ifp->idx);
35650 if (err != 0) {
35651 BRCMF_ERROR(("%s: brcmf_net_attach failed, "
35652@@ -1279,7 +1279,7 @@ struct brcmf_pub *brcmf_attach(struct br
35653 BRCMF_TRACE(("%s: Enter\n", __func__));
35654
35655 /* Allocate etherdev, including space for private structure */
35656- net = alloc_etherdev(sizeof(drvr_priv));
35657+ net = alloc_etherdev(sizeof(*drvr_priv));
35658 if (!net) {
35659 BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
35660 goto fail;
35661@@ -1295,7 +1295,7 @@ struct brcmf_pub *brcmf_attach(struct br
35662 /*
35663 * Save the brcmf_info into the priv
35664 */
35665- memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv));
35666+ memcpy(netdev_priv(net), &drvr_priv, sizeof(*drvr_priv));
35667
35668 /* Set network interface name if it was provided as module parameter */
35669 if (iface_name[0]) {
35670@@ -1352,7 +1352,7 @@ struct brcmf_pub *brcmf_attach(struct br
35671 /*
35672 * Save the brcmf_info into the priv
35673 */
35674- memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv));
35675+ memcpy(netdev_priv(net), &drvr_priv, sizeof(*drvr_priv));
35676
35677 #if defined(CONFIG_PM_SLEEP)
35678 atomic_set(&brcmf_mmc_suspend, false);
35679diff -urNp linux-3.1.4/drivers/staging/brcm80211/brcmfmac/sdio_host.h linux-3.1.4/drivers/staging/brcm80211/brcmfmac/sdio_host.h
35680--- linux-3.1.4/drivers/staging/brcm80211/brcmfmac/sdio_host.h 2011-11-11 15:19:27.000000000 -0500
35681+++ linux-3.1.4/drivers/staging/brcm80211/brcmfmac/sdio_host.h 2011-11-16 18:39:07.000000000 -0500
35682@@ -263,7 +263,7 @@ struct brcmf_sdioh_driver {
35683 u16 func, uint bustype, u32 regsva, void *param);
35684 /* detach from device */
35685 void (*detach) (void *ch);
35686-};
35687+} __no_const;
35688
35689 struct sdioh_info;
35690
35691diff -urNp linux-3.1.4/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h linux-3.1.4/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h
35692--- linux-3.1.4/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h 2011-11-11 15:19:27.000000000 -0500
35693+++ linux-3.1.4/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h 2011-11-16 18:39:07.000000000 -0500
35694@@ -591,7 +591,7 @@ struct phy_func_ptr {
35695 initfn_t carrsuppr;
35696 rxsigpwrfn_t rxsigpwr;
35697 detachfn_t detach;
35698-};
35699+} __no_const;
35700
35701 struct brcms_phy {
35702 struct brcms_phy_pub pubpi_ro;
35703diff -urNp linux-3.1.4/drivers/staging/et131x/et1310_tx.c linux-3.1.4/drivers/staging/et131x/et1310_tx.c
35704--- linux-3.1.4/drivers/staging/et131x/et1310_tx.c 2011-11-11 15:19:27.000000000 -0500
35705+++ linux-3.1.4/drivers/staging/et131x/et1310_tx.c 2011-11-16 18:39:07.000000000 -0500
35706@@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
35707 struct net_device_stats *stats = &etdev->net_stats;
35708
35709 if (tcb->flags & fMP_DEST_BROAD)
35710- atomic_inc(&etdev->stats.brdcstxmt);
35711+ atomic_inc_unchecked(&etdev->stats.brdcstxmt);
35712 else if (tcb->flags & fMP_DEST_MULTI)
35713- atomic_inc(&etdev->stats.multixmt);
35714+ atomic_inc_unchecked(&etdev->stats.multixmt);
35715 else
35716- atomic_inc(&etdev->stats.unixmt);
35717+ atomic_inc_unchecked(&etdev->stats.unixmt);
35718
35719 if (tcb->skb) {
35720 stats->tx_bytes += tcb->skb->len;
35721diff -urNp linux-3.1.4/drivers/staging/et131x/et131x_adapter.h linux-3.1.4/drivers/staging/et131x/et131x_adapter.h
35722--- linux-3.1.4/drivers/staging/et131x/et131x_adapter.h 2011-11-11 15:19:27.000000000 -0500
35723+++ linux-3.1.4/drivers/staging/et131x/et131x_adapter.h 2011-11-16 18:39:07.000000000 -0500
35724@@ -106,11 +106,11 @@ struct ce_stats {
35725 * operations
35726 */
35727 u32 unircv; /* # multicast packets received */
35728- atomic_t unixmt; /* # multicast packets for Tx */
35729+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
35730 u32 multircv; /* # multicast packets received */
35731- atomic_t multixmt; /* # multicast packets for Tx */
35732+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
35733 u32 brdcstrcv; /* # broadcast packets received */
35734- atomic_t brdcstxmt; /* # broadcast packets for Tx */
35735+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
35736 u32 norcvbuf; /* # Rx packets discarded */
35737 u32 noxmtbuf; /* # Tx packets discarded */
35738
35739diff -urNp linux-3.1.4/drivers/staging/hv/channel.c linux-3.1.4/drivers/staging/hv/channel.c
35740--- linux-3.1.4/drivers/staging/hv/channel.c 2011-11-11 15:19:27.000000000 -0500
35741+++ linux-3.1.4/drivers/staging/hv/channel.c 2011-11-16 18:39:07.000000000 -0500
35742@@ -447,8 +447,8 @@ int vmbus_establish_gpadl(struct vmbus_c
35743 int ret = 0;
35744 int t;
35745
35746- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
35747- atomic_inc(&vmbus_connection.next_gpadl_handle);
35748+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
35749+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
35750
35751 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
35752 if (ret)
35753diff -urNp linux-3.1.4/drivers/staging/hv/hv.c linux-3.1.4/drivers/staging/hv/hv.c
35754--- linux-3.1.4/drivers/staging/hv/hv.c 2011-11-11 15:19:27.000000000 -0500
35755+++ linux-3.1.4/drivers/staging/hv/hv.c 2011-11-16 18:39:07.000000000 -0500
35756@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, voi
35757 u64 output_address = (output) ? virt_to_phys(output) : 0;
35758 u32 output_address_hi = output_address >> 32;
35759 u32 output_address_lo = output_address & 0xFFFFFFFF;
35760- volatile void *hypercall_page = hv_context.hypercall_page;
35761+ volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
35762
35763 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
35764 "=a"(hv_status_lo) : "d" (control_hi),
35765diff -urNp linux-3.1.4/drivers/staging/hv/hv_mouse.c linux-3.1.4/drivers/staging/hv/hv_mouse.c
35766--- linux-3.1.4/drivers/staging/hv/hv_mouse.c 2011-11-11 15:19:27.000000000 -0500
35767+++ linux-3.1.4/drivers/staging/hv/hv_mouse.c 2011-11-16 18:39:07.000000000 -0500
35768@@ -878,8 +878,10 @@ static void reportdesc_callback(struct h
35769 if (hid_dev) {
35770 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
35771
35772- hid_dev->ll_driver->open = mousevsc_hid_open;
35773- hid_dev->ll_driver->close = mousevsc_hid_close;
35774+ pax_open_kernel();
35775+ *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
35776+ *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
35777+ pax_close_kernel();
35778
35779 hid_dev->bus = BUS_VIRTUAL;
35780 hid_dev->vendor = input_device_ctx->device_info.vendor;
35781diff -urNp linux-3.1.4/drivers/staging/hv/hyperv_vmbus.h linux-3.1.4/drivers/staging/hv/hyperv_vmbus.h
35782--- linux-3.1.4/drivers/staging/hv/hyperv_vmbus.h 2011-11-11 15:19:27.000000000 -0500
35783+++ linux-3.1.4/drivers/staging/hv/hyperv_vmbus.h 2011-11-16 18:39:07.000000000 -0500
35784@@ -559,7 +559,7 @@ enum vmbus_connect_state {
35785 struct vmbus_connection {
35786 enum vmbus_connect_state conn_state;
35787
35788- atomic_t next_gpadl_handle;
35789+ atomic_unchecked_t next_gpadl_handle;
35790
35791 /*
35792 * Represents channel interrupts. Each bit position represents a
35793diff -urNp linux-3.1.4/drivers/staging/hv/rndis_filter.c linux-3.1.4/drivers/staging/hv/rndis_filter.c
35794--- linux-3.1.4/drivers/staging/hv/rndis_filter.c 2011-11-11 15:19:27.000000000 -0500
35795+++ linux-3.1.4/drivers/staging/hv/rndis_filter.c 2011-11-16 18:39:07.000000000 -0500
35796@@ -43,7 +43,7 @@ struct rndis_device {
35797
35798 enum rndis_device_state state;
35799 u32 link_stat;
35800- atomic_t new_req_id;
35801+ atomic_unchecked_t new_req_id;
35802
35803 spinlock_t request_lock;
35804 struct list_head req_list;
35805@@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_r
35806 * template
35807 */
35808 set = &rndis_msg->msg.set_req;
35809- set->req_id = atomic_inc_return(&dev->new_req_id);
35810+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35811
35812 /* Add to the request list */
35813 spin_lock_irqsave(&dev->request_lock, flags);
35814@@ -622,7 +622,7 @@ static void rndis_filter_halt_device(str
35815
35816 /* Setup the rndis set */
35817 halt = &request->request_msg.msg.halt_req;
35818- halt->req_id = atomic_inc_return(&dev->new_req_id);
35819+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35820
35821 /* Ignore return since this msg is optional. */
35822 rndis_filter_send_request(dev, request);
35823diff -urNp linux-3.1.4/drivers/staging/hv/vmbus_drv.c linux-3.1.4/drivers/staging/hv/vmbus_drv.c
35824--- linux-3.1.4/drivers/staging/hv/vmbus_drv.c 2011-11-11 15:19:27.000000000 -0500
35825+++ linux-3.1.4/drivers/staging/hv/vmbus_drv.c 2011-11-16 18:39:07.000000000 -0500
35826@@ -660,11 +660,11 @@ int vmbus_child_device_register(struct h
35827 {
35828 int ret = 0;
35829
35830- static atomic_t device_num = ATOMIC_INIT(0);
35831+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
35832
35833 /* Set the device name. Otherwise, device_register() will fail. */
35834 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
35835- atomic_inc_return(&device_num));
35836+ atomic_inc_return_unchecked(&device_num));
35837
35838 /* The new device belongs to this bus */
35839 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
35840diff -urNp linux-3.1.4/drivers/staging/iio/ring_generic.h linux-3.1.4/drivers/staging/iio/ring_generic.h
35841--- linux-3.1.4/drivers/staging/iio/ring_generic.h 2011-11-11 15:19:27.000000000 -0500
35842+++ linux-3.1.4/drivers/staging/iio/ring_generic.h 2011-11-16 18:39:07.000000000 -0500
35843@@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
35844
35845 int (*is_enabled)(struct iio_ring_buffer *ring);
35846 int (*enable)(struct iio_ring_buffer *ring);
35847-};
35848+} __no_const;
35849
35850 struct iio_ring_setup_ops {
35851 int (*preenable)(struct iio_dev *);
35852diff -urNp linux-3.1.4/drivers/staging/mei/interface.c linux-3.1.4/drivers/staging/mei/interface.c
35853--- linux-3.1.4/drivers/staging/mei/interface.c 2011-11-11 15:19:27.000000000 -0500
35854+++ linux-3.1.4/drivers/staging/mei/interface.c 2011-11-17 18:39:18.000000000 -0500
35855@@ -332,7 +332,7 @@ int mei_send_flow_control(struct mei_dev
35856 mei_hdr->reserved = 0;
35857
35858 mei_flow_control = (struct hbm_flow_control *) &dev->wr_msg_buf[1];
35859- memset(mei_flow_control, 0, sizeof(mei_flow_control));
35860+ memset(mei_flow_control, 0, sizeof(*mei_flow_control));
35861 mei_flow_control->host_addr = cl->host_client_id;
35862 mei_flow_control->me_addr = cl->me_client_id;
35863 mei_flow_control->cmd.cmd = MEI_FLOW_CONTROL_CMD;
35864@@ -396,7 +396,7 @@ int mei_disconnect(struct mei_device *de
35865
35866 mei_cli_disconnect =
35867 (struct hbm_client_disconnect_request *) &dev->wr_msg_buf[1];
35868- memset(mei_cli_disconnect, 0, sizeof(mei_cli_disconnect));
35869+ memset(mei_cli_disconnect, 0, sizeof(*mei_cli_disconnect));
35870 mei_cli_disconnect->host_addr = cl->host_client_id;
35871 mei_cli_disconnect->me_addr = cl->me_client_id;
35872 mei_cli_disconnect->cmd.cmd = CLIENT_DISCONNECT_REQ_CMD;
35873diff -urNp linux-3.1.4/drivers/staging/octeon/ethernet.c linux-3.1.4/drivers/staging/octeon/ethernet.c
35874--- linux-3.1.4/drivers/staging/octeon/ethernet.c 2011-11-11 15:19:27.000000000 -0500
35875+++ linux-3.1.4/drivers/staging/octeon/ethernet.c 2011-11-16 18:39:07.000000000 -0500
35876@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
35877 * since the RX tasklet also increments it.
35878 */
35879 #ifdef CONFIG_64BIT
35880- atomic64_add(rx_status.dropped_packets,
35881- (atomic64_t *)&priv->stats.rx_dropped);
35882+ atomic64_add_unchecked(rx_status.dropped_packets,
35883+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35884 #else
35885- atomic_add(rx_status.dropped_packets,
35886- (atomic_t *)&priv->stats.rx_dropped);
35887+ atomic_add_unchecked(rx_status.dropped_packets,
35888+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
35889 #endif
35890 }
35891
35892diff -urNp linux-3.1.4/drivers/staging/octeon/ethernet-rx.c linux-3.1.4/drivers/staging/octeon/ethernet-rx.c
35893--- linux-3.1.4/drivers/staging/octeon/ethernet-rx.c 2011-11-11 15:19:27.000000000 -0500
35894+++ linux-3.1.4/drivers/staging/octeon/ethernet-rx.c 2011-11-16 18:39:07.000000000 -0500
35895@@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi
35896 /* Increment RX stats for virtual ports */
35897 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
35898 #ifdef CONFIG_64BIT
35899- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
35900- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
35901+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
35902+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
35903 #else
35904- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
35905- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
35906+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
35907+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
35908 #endif
35909 }
35910 netif_receive_skb(skb);
35911@@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi
35912 dev->name);
35913 */
35914 #ifdef CONFIG_64BIT
35915- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
35916+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35917 #else
35918- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
35919+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
35920 #endif
35921 dev_kfree_skb_irq(skb);
35922 }
35923diff -urNp linux-3.1.4/drivers/staging/pohmelfs/inode.c linux-3.1.4/drivers/staging/pohmelfs/inode.c
35924--- linux-3.1.4/drivers/staging/pohmelfs/inode.c 2011-11-11 15:19:27.000000000 -0500
35925+++ linux-3.1.4/drivers/staging/pohmelfs/inode.c 2011-11-16 18:39:07.000000000 -0500
35926@@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct su
35927 mutex_init(&psb->mcache_lock);
35928 psb->mcache_root = RB_ROOT;
35929 psb->mcache_timeout = msecs_to_jiffies(5000);
35930- atomic_long_set(&psb->mcache_gen, 0);
35931+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
35932
35933 psb->trans_max_pages = 100;
35934
35935@@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct su
35936 INIT_LIST_HEAD(&psb->crypto_ready_list);
35937 INIT_LIST_HEAD(&psb->crypto_active_list);
35938
35939- atomic_set(&psb->trans_gen, 1);
35940+ atomic_set_unchecked(&psb->trans_gen, 1);
35941 atomic_long_set(&psb->total_inodes, 0);
35942
35943 mutex_init(&psb->state_lock);
35944diff -urNp linux-3.1.4/drivers/staging/pohmelfs/mcache.c linux-3.1.4/drivers/staging/pohmelfs/mcache.c
35945--- linux-3.1.4/drivers/staging/pohmelfs/mcache.c 2011-11-11 15:19:27.000000000 -0500
35946+++ linux-3.1.4/drivers/staging/pohmelfs/mcache.c 2011-11-16 18:39:07.000000000 -0500
35947@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
35948 m->data = data;
35949 m->start = start;
35950 m->size = size;
35951- m->gen = atomic_long_inc_return(&psb->mcache_gen);
35952+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
35953
35954 mutex_lock(&psb->mcache_lock);
35955 err = pohmelfs_mcache_insert(psb, m);
35956diff -urNp linux-3.1.4/drivers/staging/pohmelfs/netfs.h linux-3.1.4/drivers/staging/pohmelfs/netfs.h
35957--- linux-3.1.4/drivers/staging/pohmelfs/netfs.h 2011-11-11 15:19:27.000000000 -0500
35958+++ linux-3.1.4/drivers/staging/pohmelfs/netfs.h 2011-11-16 18:39:07.000000000 -0500
35959@@ -571,14 +571,14 @@ struct pohmelfs_config;
35960 struct pohmelfs_sb {
35961 struct rb_root mcache_root;
35962 struct mutex mcache_lock;
35963- atomic_long_t mcache_gen;
35964+ atomic_long_unchecked_t mcache_gen;
35965 unsigned long mcache_timeout;
35966
35967 unsigned int idx;
35968
35969 unsigned int trans_retries;
35970
35971- atomic_t trans_gen;
35972+ atomic_unchecked_t trans_gen;
35973
35974 unsigned int crypto_attached_size;
35975 unsigned int crypto_align_size;
35976diff -urNp linux-3.1.4/drivers/staging/pohmelfs/trans.c linux-3.1.4/drivers/staging/pohmelfs/trans.c
35977--- linux-3.1.4/drivers/staging/pohmelfs/trans.c 2011-11-11 15:19:27.000000000 -0500
35978+++ linux-3.1.4/drivers/staging/pohmelfs/trans.c 2011-11-16 18:39:07.000000000 -0500
35979@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
35980 int err;
35981 struct netfs_cmd *cmd = t->iovec.iov_base;
35982
35983- t->gen = atomic_inc_return(&psb->trans_gen);
35984+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
35985
35986 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
35987 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
35988diff -urNp linux-3.1.4/drivers/staging/rtl8712/rtl871x_io.h linux-3.1.4/drivers/staging/rtl8712/rtl871x_io.h
35989--- linux-3.1.4/drivers/staging/rtl8712/rtl871x_io.h 2011-11-11 15:19:27.000000000 -0500
35990+++ linux-3.1.4/drivers/staging/rtl8712/rtl871x_io.h 2011-11-16 18:39:07.000000000 -0500
35991@@ -83,7 +83,7 @@ struct _io_ops {
35992 u8 *pmem);
35993 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
35994 u8 *pmem);
35995-};
35996+} __no_const;
35997
35998 struct io_req {
35999 struct list_head list;
36000diff -urNp linux-3.1.4/drivers/staging/sbe-2t3e3/netdev.c linux-3.1.4/drivers/staging/sbe-2t3e3/netdev.c
36001--- linux-3.1.4/drivers/staging/sbe-2t3e3/netdev.c 2011-11-11 15:19:27.000000000 -0500
36002+++ linux-3.1.4/drivers/staging/sbe-2t3e3/netdev.c 2011-11-16 18:39:08.000000000 -0500
36003@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
36004 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
36005
36006 if (rlen)
36007- if (copy_to_user(data, &resp, rlen))
36008+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
36009 return -EFAULT;
36010
36011 return 0;
36012diff -urNp linux-3.1.4/drivers/staging/usbip/usbip_common.h linux-3.1.4/drivers/staging/usbip/usbip_common.h
36013--- linux-3.1.4/drivers/staging/usbip/usbip_common.h 2011-11-11 15:19:27.000000000 -0500
36014+++ linux-3.1.4/drivers/staging/usbip/usbip_common.h 2011-11-16 18:39:08.000000000 -0500
36015@@ -289,7 +289,7 @@ struct usbip_device {
36016 void (*shutdown)(struct usbip_device *);
36017 void (*reset)(struct usbip_device *);
36018 void (*unusable)(struct usbip_device *);
36019- } eh_ops;
36020+ } __no_const eh_ops;
36021 };
36022
36023 #if 0
36024diff -urNp linux-3.1.4/drivers/staging/usbip/vhci.h linux-3.1.4/drivers/staging/usbip/vhci.h
36025--- linux-3.1.4/drivers/staging/usbip/vhci.h 2011-11-11 15:19:27.000000000 -0500
36026+++ linux-3.1.4/drivers/staging/usbip/vhci.h 2011-11-16 18:39:08.000000000 -0500
36027@@ -85,7 +85,7 @@ struct vhci_hcd {
36028 unsigned resuming:1;
36029 unsigned long re_timeout;
36030
36031- atomic_t seqnum;
36032+ atomic_unchecked_t seqnum;
36033
36034 /*
36035 * NOTE:
36036diff -urNp linux-3.1.4/drivers/staging/usbip/vhci_hcd.c linux-3.1.4/drivers/staging/usbip/vhci_hcd.c
36037--- linux-3.1.4/drivers/staging/usbip/vhci_hcd.c 2011-11-11 15:19:27.000000000 -0500
36038+++ linux-3.1.4/drivers/staging/usbip/vhci_hcd.c 2011-11-16 18:39:08.000000000 -0500
36039@@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
36040 return;
36041 }
36042
36043- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
36044+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
36045 if (priv->seqnum == 0xffff)
36046 dev_info(&urb->dev->dev, "seqnum max\n");
36047
36048@@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_h
36049 return -ENOMEM;
36050 }
36051
36052- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
36053+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
36054 if (unlink->seqnum == 0xffff)
36055 pr_info("seqnum max\n");
36056
36057@@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hc
36058 vdev->rhport = rhport;
36059 }
36060
36061- atomic_set(&vhci->seqnum, 0);
36062+ atomic_set_unchecked(&vhci->seqnum, 0);
36063 spin_lock_init(&vhci->lock);
36064
36065 hcd->power_budget = 0; /* no limit */
36066diff -urNp linux-3.1.4/drivers/staging/usbip/vhci_rx.c linux-3.1.4/drivers/staging/usbip/vhci_rx.c
36067--- linux-3.1.4/drivers/staging/usbip/vhci_rx.c 2011-11-11 15:19:27.000000000 -0500
36068+++ linux-3.1.4/drivers/staging/usbip/vhci_rx.c 2011-11-16 18:39:08.000000000 -0500
36069@@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct
36070 if (!urb) {
36071 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
36072 pr_info("max seqnum %d\n",
36073- atomic_read(&the_controller->seqnum));
36074+ atomic_read_unchecked(&the_controller->seqnum));
36075 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
36076 return;
36077 }
36078diff -urNp linux-3.1.4/drivers/staging/vt6655/hostap.c linux-3.1.4/drivers/staging/vt6655/hostap.c
36079--- linux-3.1.4/drivers/staging/vt6655/hostap.c 2011-11-11 15:19:27.000000000 -0500
36080+++ linux-3.1.4/drivers/staging/vt6655/hostap.c 2011-11-16 18:39:08.000000000 -0500
36081@@ -79,14 +79,13 @@ static int msglevel
36082 *
36083 */
36084
36085+static net_device_ops_no_const apdev_netdev_ops;
36086+
36087 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
36088 {
36089 PSDevice apdev_priv;
36090 struct net_device *dev = pDevice->dev;
36091 int ret;
36092- const struct net_device_ops apdev_netdev_ops = {
36093- .ndo_start_xmit = pDevice->tx_80211,
36094- };
36095
36096 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
36097
36098@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevic
36099 *apdev_priv = *pDevice;
36100 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
36101
36102+ /* only half broken now */
36103+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
36104 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
36105
36106 pDevice->apdev->type = ARPHRD_IEEE80211;
36107diff -urNp linux-3.1.4/drivers/staging/vt6656/hostap.c linux-3.1.4/drivers/staging/vt6656/hostap.c
36108--- linux-3.1.4/drivers/staging/vt6656/hostap.c 2011-11-11 15:19:27.000000000 -0500
36109+++ linux-3.1.4/drivers/staging/vt6656/hostap.c 2011-11-16 18:39:08.000000000 -0500
36110@@ -80,14 +80,13 @@ static int msglevel
36111 *
36112 */
36113
36114+static net_device_ops_no_const apdev_netdev_ops;
36115+
36116 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
36117 {
36118 PSDevice apdev_priv;
36119 struct net_device *dev = pDevice->dev;
36120 int ret;
36121- const struct net_device_ops apdev_netdev_ops = {
36122- .ndo_start_xmit = pDevice->tx_80211,
36123- };
36124
36125 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
36126
36127@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevic
36128 *apdev_priv = *pDevice;
36129 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
36130
36131+ /* only half broken now */
36132+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
36133 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
36134
36135 pDevice->apdev->type = ARPHRD_IEEE80211;
36136diff -urNp linux-3.1.4/drivers/staging/wlan-ng/hfa384x_usb.c linux-3.1.4/drivers/staging/wlan-ng/hfa384x_usb.c
36137--- linux-3.1.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-11-11 15:19:27.000000000 -0500
36138+++ linux-3.1.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-11-16 18:39:08.000000000 -0500
36139@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
36140
36141 struct usbctlx_completor {
36142 int (*complete) (struct usbctlx_completor *);
36143-};
36144+} __no_const;
36145
36146 static int
36147 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
36148diff -urNp linux-3.1.4/drivers/staging/zcache/tmem.c linux-3.1.4/drivers/staging/zcache/tmem.c
36149--- linux-3.1.4/drivers/staging/zcache/tmem.c 2011-11-11 15:19:27.000000000 -0500
36150+++ linux-3.1.4/drivers/staging/zcache/tmem.c 2011-11-16 18:39:08.000000000 -0500
36151@@ -39,7 +39,7 @@
36152 * A tmem host implementation must use this function to register callbacks
36153 * for memory allocation.
36154 */
36155-static struct tmem_hostops tmem_hostops;
36156+static tmem_hostops_no_const tmem_hostops;
36157
36158 static void tmem_objnode_tree_init(void);
36159
36160@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
36161 * A tmem host implementation must use this function to register
36162 * callbacks for a page-accessible memory (PAM) implementation
36163 */
36164-static struct tmem_pamops tmem_pamops;
36165+static tmem_pamops_no_const tmem_pamops;
36166
36167 void tmem_register_pamops(struct tmem_pamops *m)
36168 {
36169diff -urNp linux-3.1.4/drivers/staging/zcache/tmem.h linux-3.1.4/drivers/staging/zcache/tmem.h
36170--- linux-3.1.4/drivers/staging/zcache/tmem.h 2011-11-11 15:19:27.000000000 -0500
36171+++ linux-3.1.4/drivers/staging/zcache/tmem.h 2011-11-16 18:39:08.000000000 -0500
36172@@ -180,6 +180,7 @@ struct tmem_pamops {
36173 void (*new_obj)(struct tmem_obj *);
36174 int (*replace_in_obj)(void *, struct tmem_obj *);
36175 };
36176+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
36177 extern void tmem_register_pamops(struct tmem_pamops *m);
36178
36179 /* memory allocation methods provided by the host implementation */
36180@@ -189,6 +190,7 @@ struct tmem_hostops {
36181 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
36182 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
36183 };
36184+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
36185 extern void tmem_register_hostops(struct tmem_hostops *m);
36186
36187 /* core tmem accessor functions */
36188diff -urNp linux-3.1.4/drivers/target/iscsi/iscsi_target.c linux-3.1.4/drivers/target/iscsi/iscsi_target.c
36189--- linux-3.1.4/drivers/target/iscsi/iscsi_target.c 2011-11-11 15:19:27.000000000 -0500
36190+++ linux-3.1.4/drivers/target/iscsi/iscsi_target.c 2011-11-16 18:39:08.000000000 -0500
36191@@ -1368,7 +1368,7 @@ static int iscsit_handle_data_out(struct
36192 * outstanding_r2ts reaches zero, go ahead and send the delayed
36193 * TASK_ABORTED status.
36194 */
36195- if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
36196+ if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
36197 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
36198 if (--cmd->outstanding_r2ts < 1) {
36199 iscsit_stop_dataout_timer(cmd);
36200diff -urNp linux-3.1.4/drivers/target/target_core_alua.c linux-3.1.4/drivers/target/target_core_alua.c
36201--- linux-3.1.4/drivers/target/target_core_alua.c 2011-11-11 15:19:27.000000000 -0500
36202+++ linux-3.1.4/drivers/target/target_core_alua.c 2011-11-16 18:40:29.000000000 -0500
36203@@ -723,6 +723,8 @@ static int core_alua_update_tpg_primary_
36204 char path[ALUA_METADATA_PATH_LEN];
36205 int len;
36206
36207+ pax_track_stack();
36208+
36209 memset(path, 0, ALUA_METADATA_PATH_LEN);
36210
36211 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
36212@@ -986,6 +988,8 @@ static int core_alua_update_tpg_secondar
36213 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
36214 int len;
36215
36216+ pax_track_stack();
36217+
36218 memset(path, 0, ALUA_METADATA_PATH_LEN);
36219 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
36220
36221diff -urNp linux-3.1.4/drivers/target/target_core_cdb.c linux-3.1.4/drivers/target/target_core_cdb.c
36222--- linux-3.1.4/drivers/target/target_core_cdb.c 2011-11-11 15:19:27.000000000 -0500
36223+++ linux-3.1.4/drivers/target/target_core_cdb.c 2011-11-16 18:40:29.000000000 -0500
36224@@ -933,6 +933,8 @@ target_emulate_modesense(struct se_cmd *
36225 int length = 0;
36226 unsigned char buf[SE_MODE_PAGE_BUF];
36227
36228+ pax_track_stack();
36229+
36230 memset(buf, 0, SE_MODE_PAGE_BUF);
36231
36232 switch (cdb[2] & 0x3f) {
36233diff -urNp linux-3.1.4/drivers/target/target_core_configfs.c linux-3.1.4/drivers/target/target_core_configfs.c
36234--- linux-3.1.4/drivers/target/target_core_configfs.c 2011-11-11 15:19:27.000000000 -0500
36235+++ linux-3.1.4/drivers/target/target_core_configfs.c 2011-11-16 19:04:37.000000000 -0500
36236@@ -1267,6 +1267,8 @@ static ssize_t target_core_dev_pr_show_a
36237 ssize_t len = 0;
36238 int reg_count = 0, prf_isid;
36239
36240+ pax_track_stack();
36241+
36242 if (!su_dev->se_dev_ptr)
36243 return -ENODEV;
36244
36245diff -urNp linux-3.1.4/drivers/target/target_core_pr.c linux-3.1.4/drivers/target/target_core_pr.c
36246--- linux-3.1.4/drivers/target/target_core_pr.c 2011-11-11 15:19:27.000000000 -0500
36247+++ linux-3.1.4/drivers/target/target_core_pr.c 2011-11-16 18:40:29.000000000 -0500
36248@@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
36249 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
36250 u16 tpgt;
36251
36252+ pax_track_stack();
36253+
36254 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
36255 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
36256 /*
36257@@ -1867,6 +1869,8 @@ static int __core_scsi3_update_aptpl_buf
36258 ssize_t len = 0;
36259 int reg_count = 0;
36260
36261+ pax_track_stack();
36262+
36263 memset(buf, 0, pr_aptpl_buf_len);
36264 /*
36265 * Called to clear metadata once APTPL has been deactivated.
36266@@ -1989,6 +1993,8 @@ static int __core_scsi3_write_aptpl_to_f
36267 char path[512];
36268 int ret;
36269
36270+ pax_track_stack();
36271+
36272 memset(iov, 0, sizeof(struct iovec));
36273 memset(path, 0, 512);
36274
36275diff -urNp linux-3.1.4/drivers/target/target_core_tmr.c linux-3.1.4/drivers/target/target_core_tmr.c
36276--- linux-3.1.4/drivers/target/target_core_tmr.c 2011-11-11 15:19:27.000000000 -0500
36277+++ linux-3.1.4/drivers/target/target_core_tmr.c 2011-11-16 18:39:08.000000000 -0500
36278@@ -255,7 +255,7 @@ static void core_tmr_drain_task_list(
36279 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
36280 cmd->t_task_list_num,
36281 atomic_read(&cmd->t_task_cdbs_left),
36282- atomic_read(&cmd->t_task_cdbs_sent),
36283+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
36284 atomic_read(&cmd->t_transport_active),
36285 atomic_read(&cmd->t_transport_stop),
36286 atomic_read(&cmd->t_transport_sent));
36287@@ -291,7 +291,7 @@ static void core_tmr_drain_task_list(
36288 pr_debug("LUN_RESET: got t_transport_active = 1 for"
36289 " task: %p, t_fe_count: %d dev: %p\n", task,
36290 fe_count, dev);
36291- atomic_set(&cmd->t_transport_aborted, 1);
36292+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
36293 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
36294
36295 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
36296@@ -299,7 +299,7 @@ static void core_tmr_drain_task_list(
36297 }
36298 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
36299 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
36300- atomic_set(&cmd->t_transport_aborted, 1);
36301+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
36302 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
36303
36304 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
36305diff -urNp linux-3.1.4/drivers/target/target_core_transport.c linux-3.1.4/drivers/target/target_core_transport.c
36306--- linux-3.1.4/drivers/target/target_core_transport.c 2011-11-11 15:19:27.000000000 -0500
36307+++ linux-3.1.4/drivers/target/target_core_transport.c 2011-11-16 18:39:08.000000000 -0500
36308@@ -1445,7 +1445,7 @@ struct se_device *transport_add_device_t
36309
36310 dev->queue_depth = dev_limits->queue_depth;
36311 atomic_set(&dev->depth_left, dev->queue_depth);
36312- atomic_set(&dev->dev_ordered_id, 0);
36313+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
36314
36315 se_dev_set_default_attribs(dev, dev_limits);
36316
36317@@ -1633,7 +1633,7 @@ static int transport_check_alloc_task_at
36318 * Used to determine when ORDERED commands should go from
36319 * Dormant to Active status.
36320 */
36321- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
36322+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
36323 smp_mb__after_atomic_inc();
36324 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
36325 cmd->se_ordered_id, cmd->sam_task_attr,
36326@@ -1960,7 +1960,7 @@ static void transport_generic_request_fa
36327 " t_transport_active: %d t_transport_stop: %d"
36328 " t_transport_sent: %d\n", cmd->t_task_list_num,
36329 atomic_read(&cmd->t_task_cdbs_left),
36330- atomic_read(&cmd->t_task_cdbs_sent),
36331+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
36332 atomic_read(&cmd->t_task_cdbs_ex_left),
36333 atomic_read(&cmd->t_transport_active),
36334 atomic_read(&cmd->t_transport_stop),
36335@@ -2460,9 +2460,9 @@ check_depth:
36336 spin_lock_irqsave(&cmd->t_state_lock, flags);
36337 atomic_set(&task->task_active, 1);
36338 atomic_set(&task->task_sent, 1);
36339- atomic_inc(&cmd->t_task_cdbs_sent);
36340+ atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
36341
36342- if (atomic_read(&cmd->t_task_cdbs_sent) ==
36343+ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
36344 cmd->t_task_list_num)
36345 atomic_set(&cmd->transport_sent, 1);
36346
36347@@ -4665,7 +4665,7 @@ static void transport_generic_wait_for_t
36348 atomic_set(&cmd->transport_lun_stop, 0);
36349 }
36350 if (!atomic_read(&cmd->t_transport_active) ||
36351- atomic_read(&cmd->t_transport_aborted))
36352+ atomic_read_unchecked(&cmd->t_transport_aborted))
36353 goto remove;
36354
36355 atomic_set(&cmd->t_transport_stop, 1);
36356@@ -4900,7 +4900,7 @@ int transport_check_aborted_status(struc
36357 {
36358 int ret = 0;
36359
36360- if (atomic_read(&cmd->t_transport_aborted) != 0) {
36361+ if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
36362 if (!send_status ||
36363 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
36364 return 1;
36365@@ -4937,7 +4937,7 @@ void transport_send_task_abort(struct se
36366 */
36367 if (cmd->data_direction == DMA_TO_DEVICE) {
36368 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
36369- atomic_inc(&cmd->t_transport_aborted);
36370+ atomic_inc_unchecked(&cmd->t_transport_aborted);
36371 smp_mb__after_atomic_inc();
36372 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
36373 transport_new_cmd_failure(cmd);
36374@@ -5051,7 +5051,7 @@ static void transport_processing_shutdow
36375 cmd->se_tfo->get_task_tag(cmd),
36376 cmd->t_task_list_num,
36377 atomic_read(&cmd->t_task_cdbs_left),
36378- atomic_read(&cmd->t_task_cdbs_sent),
36379+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
36380 atomic_read(&cmd->t_transport_active),
36381 atomic_read(&cmd->t_transport_stop),
36382 atomic_read(&cmd->t_transport_sent));
36383diff -urNp linux-3.1.4/drivers/telephony/ixj.c linux-3.1.4/drivers/telephony/ixj.c
36384--- linux-3.1.4/drivers/telephony/ixj.c 2011-11-11 15:19:27.000000000 -0500
36385+++ linux-3.1.4/drivers/telephony/ixj.c 2011-11-16 18:40:29.000000000 -0500
36386@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
36387 bool mContinue;
36388 char *pIn, *pOut;
36389
36390+ pax_track_stack();
36391+
36392 if (!SCI_Prepare(j))
36393 return 0;
36394
36395diff -urNp linux-3.1.4/drivers/tty/hvc/hvcs.c linux-3.1.4/drivers/tty/hvc/hvcs.c
36396--- linux-3.1.4/drivers/tty/hvc/hvcs.c 2011-11-11 15:19:27.000000000 -0500
36397+++ linux-3.1.4/drivers/tty/hvc/hvcs.c 2011-11-16 18:39:08.000000000 -0500
36398@@ -83,6 +83,7 @@
36399 #include <asm/hvcserver.h>
36400 #include <asm/uaccess.h>
36401 #include <asm/vio.h>
36402+#include <asm/local.h>
36403
36404 /*
36405 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
36406@@ -270,7 +271,7 @@ struct hvcs_struct {
36407 unsigned int index;
36408
36409 struct tty_struct *tty;
36410- int open_count;
36411+ local_t open_count;
36412
36413 /*
36414 * Used to tell the driver kernel_thread what operations need to take
36415@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
36416
36417 spin_lock_irqsave(&hvcsd->lock, flags);
36418
36419- if (hvcsd->open_count > 0) {
36420+ if (local_read(&hvcsd->open_count) > 0) {
36421 spin_unlock_irqrestore(&hvcsd->lock, flags);
36422 printk(KERN_INFO "HVCS: vterm state unchanged. "
36423 "The hvcs device node is still in use.\n");
36424@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
36425 if ((retval = hvcs_partner_connect(hvcsd)))
36426 goto error_release;
36427
36428- hvcsd->open_count = 1;
36429+ local_set(&hvcsd->open_count, 1);
36430 hvcsd->tty = tty;
36431 tty->driver_data = hvcsd;
36432
36433@@ -1179,7 +1180,7 @@ fast_open:
36434
36435 spin_lock_irqsave(&hvcsd->lock, flags);
36436 kref_get(&hvcsd->kref);
36437- hvcsd->open_count++;
36438+ local_inc(&hvcsd->open_count);
36439 hvcsd->todo_mask |= HVCS_SCHED_READ;
36440 spin_unlock_irqrestore(&hvcsd->lock, flags);
36441
36442@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
36443 hvcsd = tty->driver_data;
36444
36445 spin_lock_irqsave(&hvcsd->lock, flags);
36446- if (--hvcsd->open_count == 0) {
36447+ if (local_dec_and_test(&hvcsd->open_count)) {
36448
36449 vio_disable_interrupts(hvcsd->vdev);
36450
36451@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
36452 free_irq(irq, hvcsd);
36453 kref_put(&hvcsd->kref, destroy_hvcs_struct);
36454 return;
36455- } else if (hvcsd->open_count < 0) {
36456+ } else if (local_read(&hvcsd->open_count) < 0) {
36457 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
36458 " is missmanaged.\n",
36459- hvcsd->vdev->unit_address, hvcsd->open_count);
36460+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
36461 }
36462
36463 spin_unlock_irqrestore(&hvcsd->lock, flags);
36464@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
36465
36466 spin_lock_irqsave(&hvcsd->lock, flags);
36467 /* Preserve this so that we know how many kref refs to put */
36468- temp_open_count = hvcsd->open_count;
36469+ temp_open_count = local_read(&hvcsd->open_count);
36470
36471 /*
36472 * Don't kref put inside the spinlock because the destruction
36473@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
36474 hvcsd->tty->driver_data = NULL;
36475 hvcsd->tty = NULL;
36476
36477- hvcsd->open_count = 0;
36478+ local_set(&hvcsd->open_count, 0);
36479
36480 /* This will drop any buffered data on the floor which is OK in a hangup
36481 * scenario. */
36482@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
36483 * the middle of a write operation? This is a crummy place to do this
36484 * but we want to keep it all in the spinlock.
36485 */
36486- if (hvcsd->open_count <= 0) {
36487+ if (local_read(&hvcsd->open_count) <= 0) {
36488 spin_unlock_irqrestore(&hvcsd->lock, flags);
36489 return -ENODEV;
36490 }
36491@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
36492 {
36493 struct hvcs_struct *hvcsd = tty->driver_data;
36494
36495- if (!hvcsd || hvcsd->open_count <= 0)
36496+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
36497 return 0;
36498
36499 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
36500diff -urNp linux-3.1.4/drivers/tty/ipwireless/tty.c linux-3.1.4/drivers/tty/ipwireless/tty.c
36501--- linux-3.1.4/drivers/tty/ipwireless/tty.c 2011-11-11 15:19:27.000000000 -0500
36502+++ linux-3.1.4/drivers/tty/ipwireless/tty.c 2011-11-16 18:39:08.000000000 -0500
36503@@ -29,6 +29,7 @@
36504 #include <linux/tty_driver.h>
36505 #include <linux/tty_flip.h>
36506 #include <linux/uaccess.h>
36507+#include <asm/local.h>
36508
36509 #include "tty.h"
36510 #include "network.h"
36511@@ -51,7 +52,7 @@ struct ipw_tty {
36512 int tty_type;
36513 struct ipw_network *network;
36514 struct tty_struct *linux_tty;
36515- int open_count;
36516+ local_t open_count;
36517 unsigned int control_lines;
36518 struct mutex ipw_tty_mutex;
36519 int tx_bytes_queued;
36520@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
36521 mutex_unlock(&tty->ipw_tty_mutex);
36522 return -ENODEV;
36523 }
36524- if (tty->open_count == 0)
36525+ if (local_read(&tty->open_count) == 0)
36526 tty->tx_bytes_queued = 0;
36527
36528- tty->open_count++;
36529+ local_inc(&tty->open_count);
36530
36531 tty->linux_tty = linux_tty;
36532 linux_tty->driver_data = tty;
36533@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
36534
36535 static void do_ipw_close(struct ipw_tty *tty)
36536 {
36537- tty->open_count--;
36538-
36539- if (tty->open_count == 0) {
36540+ if (local_dec_return(&tty->open_count) == 0) {
36541 struct tty_struct *linux_tty = tty->linux_tty;
36542
36543 if (linux_tty != NULL) {
36544@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
36545 return;
36546
36547 mutex_lock(&tty->ipw_tty_mutex);
36548- if (tty->open_count == 0) {
36549+ if (local_read(&tty->open_count) == 0) {
36550 mutex_unlock(&tty->ipw_tty_mutex);
36551 return;
36552 }
36553@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
36554 return;
36555 }
36556
36557- if (!tty->open_count) {
36558+ if (!local_read(&tty->open_count)) {
36559 mutex_unlock(&tty->ipw_tty_mutex);
36560 return;
36561 }
36562@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
36563 return -ENODEV;
36564
36565 mutex_lock(&tty->ipw_tty_mutex);
36566- if (!tty->open_count) {
36567+ if (!local_read(&tty->open_count)) {
36568 mutex_unlock(&tty->ipw_tty_mutex);
36569 return -EINVAL;
36570 }
36571@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
36572 if (!tty)
36573 return -ENODEV;
36574
36575- if (!tty->open_count)
36576+ if (!local_read(&tty->open_count))
36577 return -EINVAL;
36578
36579 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
36580@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
36581 if (!tty)
36582 return 0;
36583
36584- if (!tty->open_count)
36585+ if (!local_read(&tty->open_count))
36586 return 0;
36587
36588 return tty->tx_bytes_queued;
36589@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
36590 if (!tty)
36591 return -ENODEV;
36592
36593- if (!tty->open_count)
36594+ if (!local_read(&tty->open_count))
36595 return -EINVAL;
36596
36597 return get_control_lines(tty);
36598@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
36599 if (!tty)
36600 return -ENODEV;
36601
36602- if (!tty->open_count)
36603+ if (!local_read(&tty->open_count))
36604 return -EINVAL;
36605
36606 return set_control_lines(tty, set, clear);
36607@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
36608 if (!tty)
36609 return -ENODEV;
36610
36611- if (!tty->open_count)
36612+ if (!local_read(&tty->open_count))
36613 return -EINVAL;
36614
36615 /* FIXME: Exactly how is the tty object locked here .. */
36616@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
36617 against a parallel ioctl etc */
36618 mutex_lock(&ttyj->ipw_tty_mutex);
36619 }
36620- while (ttyj->open_count)
36621+ while (local_read(&ttyj->open_count))
36622 do_ipw_close(ttyj);
36623 ipwireless_disassociate_network_ttys(network,
36624 ttyj->channel_idx);
36625diff -urNp linux-3.1.4/drivers/tty/n_gsm.c linux-3.1.4/drivers/tty/n_gsm.c
36626--- linux-3.1.4/drivers/tty/n_gsm.c 2011-11-11 15:19:27.000000000 -0500
36627+++ linux-3.1.4/drivers/tty/n_gsm.c 2011-11-16 18:39:08.000000000 -0500
36628@@ -1625,7 +1625,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
36629 kref_init(&dlci->ref);
36630 mutex_init(&dlci->mutex);
36631 dlci->fifo = &dlci->_fifo;
36632- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
36633+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
36634 kfree(dlci);
36635 return NULL;
36636 }
36637diff -urNp linux-3.1.4/drivers/tty/n_tty.c linux-3.1.4/drivers/tty/n_tty.c
36638--- linux-3.1.4/drivers/tty/n_tty.c 2011-11-11 15:19:27.000000000 -0500
36639+++ linux-3.1.4/drivers/tty/n_tty.c 2011-11-16 18:39:08.000000000 -0500
36640@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
36641 {
36642 *ops = tty_ldisc_N_TTY;
36643 ops->owner = NULL;
36644- ops->refcount = ops->flags = 0;
36645+ atomic_set(&ops->refcount, 0);
36646+ ops->flags = 0;
36647 }
36648 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
36649diff -urNp linux-3.1.4/drivers/tty/pty.c linux-3.1.4/drivers/tty/pty.c
36650--- linux-3.1.4/drivers/tty/pty.c 2011-11-11 15:19:27.000000000 -0500
36651+++ linux-3.1.4/drivers/tty/pty.c 2011-11-16 18:39:08.000000000 -0500
36652@@ -773,8 +773,10 @@ static void __init unix98_pty_init(void)
36653 register_sysctl_table(pty_root_table);
36654
36655 /* Now create the /dev/ptmx special device */
36656+ pax_open_kernel();
36657 tty_default_fops(&ptmx_fops);
36658- ptmx_fops.open = ptmx_open;
36659+ *(void **)&ptmx_fops.open = ptmx_open;
36660+ pax_close_kernel();
36661
36662 cdev_init(&ptmx_cdev, &ptmx_fops);
36663 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
36664diff -urNp linux-3.1.4/drivers/tty/rocket.c linux-3.1.4/drivers/tty/rocket.c
36665--- linux-3.1.4/drivers/tty/rocket.c 2011-11-11 15:19:27.000000000 -0500
36666+++ linux-3.1.4/drivers/tty/rocket.c 2011-11-16 18:40:29.000000000 -0500
36667@@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
36668 struct rocket_ports tmp;
36669 int board;
36670
36671+ pax_track_stack();
36672+
36673 if (!retports)
36674 return -EFAULT;
36675 memset(&tmp, 0, sizeof (tmp));
36676diff -urNp linux-3.1.4/drivers/tty/serial/kgdboc.c linux-3.1.4/drivers/tty/serial/kgdboc.c
36677--- linux-3.1.4/drivers/tty/serial/kgdboc.c 2011-11-11 15:19:27.000000000 -0500
36678+++ linux-3.1.4/drivers/tty/serial/kgdboc.c 2011-11-16 18:39:08.000000000 -0500
36679@@ -23,8 +23,9 @@
36680 #define MAX_CONFIG_LEN 40
36681
36682 static struct kgdb_io kgdboc_io_ops;
36683+static struct kgdb_io kgdboc_io_ops_console;
36684
36685-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
36686+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
36687 static int configured = -1;
36688
36689 static char config[MAX_CONFIG_LEN];
36690@@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
36691 kgdboc_unregister_kbd();
36692 if (configured == 1)
36693 kgdb_unregister_io_module(&kgdboc_io_ops);
36694+ else if (configured == 2)
36695+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
36696 }
36697
36698 static int configure_kgdboc(void)
36699@@ -156,13 +159,13 @@ static int configure_kgdboc(void)
36700 int err;
36701 char *cptr = config;
36702 struct console *cons;
36703+ int is_console = 0;
36704
36705 err = kgdboc_option_setup(config);
36706 if (err || !strlen(config) || isspace(config[0]))
36707 goto noconfig;
36708
36709 err = -ENODEV;
36710- kgdboc_io_ops.is_console = 0;
36711 kgdb_tty_driver = NULL;
36712
36713 kgdboc_use_kms = 0;
36714@@ -183,7 +186,7 @@ static int configure_kgdboc(void)
36715 int idx;
36716 if (cons->device && cons->device(cons, &idx) == p &&
36717 idx == tty_line) {
36718- kgdboc_io_ops.is_console = 1;
36719+ is_console = 1;
36720 break;
36721 }
36722 cons = cons->next;
36723@@ -193,12 +196,16 @@ static int configure_kgdboc(void)
36724 kgdb_tty_line = tty_line;
36725
36726 do_register:
36727- err = kgdb_register_io_module(&kgdboc_io_ops);
36728+ if (is_console) {
36729+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
36730+ configured = 2;
36731+ } else {
36732+ err = kgdb_register_io_module(&kgdboc_io_ops);
36733+ configured = 1;
36734+ }
36735 if (err)
36736 goto noconfig;
36737
36738- configured = 1;
36739-
36740 return 0;
36741
36742 noconfig:
36743@@ -212,7 +219,7 @@ noconfig:
36744 static int __init init_kgdboc(void)
36745 {
36746 /* Already configured? */
36747- if (configured == 1)
36748+ if (configured >= 1)
36749 return 0;
36750
36751 return configure_kgdboc();
36752@@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
36753 if (config[len - 1] == '\n')
36754 config[len - 1] = '\0';
36755
36756- if (configured == 1)
36757+ if (configured >= 1)
36758 cleanup_kgdboc();
36759
36760 /* Go and configure with the new params. */
36761@@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
36762 .post_exception = kgdboc_post_exp_handler,
36763 };
36764
36765+static struct kgdb_io kgdboc_io_ops_console = {
36766+ .name = "kgdboc",
36767+ .read_char = kgdboc_get_char,
36768+ .write_char = kgdboc_put_char,
36769+ .pre_exception = kgdboc_pre_exp_handler,
36770+ .post_exception = kgdboc_post_exp_handler,
36771+ .is_console = 1
36772+};
36773+
36774 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
36775 /* This is only available if kgdboc is a built in for early debugging */
36776 static int __init kgdboc_early_init(char *opt)
36777diff -urNp linux-3.1.4/drivers/tty/serial/mfd.c linux-3.1.4/drivers/tty/serial/mfd.c
36778--- linux-3.1.4/drivers/tty/serial/mfd.c 2011-11-11 15:19:27.000000000 -0500
36779+++ linux-3.1.4/drivers/tty/serial/mfd.c 2011-11-16 18:39:08.000000000 -0500
36780@@ -1423,7 +1423,7 @@ static void serial_hsu_remove(struct pci
36781 }
36782
36783 /* First 3 are UART ports, and the 4th is the DMA */
36784-static const struct pci_device_id pci_ids[] __devinitdata = {
36785+static const struct pci_device_id pci_ids[] __devinitconst = {
36786 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) },
36787 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) },
36788 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) },
36789diff -urNp linux-3.1.4/drivers/tty/serial/mrst_max3110.c linux-3.1.4/drivers/tty/serial/mrst_max3110.c
36790--- linux-3.1.4/drivers/tty/serial/mrst_max3110.c 2011-11-11 15:19:27.000000000 -0500
36791+++ linux-3.1.4/drivers/tty/serial/mrst_max3110.c 2011-11-16 18:40:29.000000000 -0500
36792@@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
36793 int loop = 1, num, total = 0;
36794 u8 recv_buf[512], *pbuf;
36795
36796+ pax_track_stack();
36797+
36798 pbuf = recv_buf;
36799 do {
36800 num = max3110_read_multi(max, pbuf);
36801diff -urNp linux-3.1.4/drivers/tty/tty_io.c linux-3.1.4/drivers/tty/tty_io.c
36802--- linux-3.1.4/drivers/tty/tty_io.c 2011-11-11 15:19:27.000000000 -0500
36803+++ linux-3.1.4/drivers/tty/tty_io.c 2011-11-16 18:39:08.000000000 -0500
36804@@ -3238,7 +3238,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
36805
36806 void tty_default_fops(struct file_operations *fops)
36807 {
36808- *fops = tty_fops;
36809+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
36810 }
36811
36812 /*
36813diff -urNp linux-3.1.4/drivers/tty/tty_ldisc.c linux-3.1.4/drivers/tty/tty_ldisc.c
36814--- linux-3.1.4/drivers/tty/tty_ldisc.c 2011-11-26 19:57:29.000000000 -0500
36815+++ linux-3.1.4/drivers/tty/tty_ldisc.c 2011-11-26 20:00:43.000000000 -0500
36816@@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *
36817 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
36818 struct tty_ldisc_ops *ldo = ld->ops;
36819
36820- ldo->refcount--;
36821+ atomic_dec(&ldo->refcount);
36822 module_put(ldo->owner);
36823 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
36824
36825@@ -110,7 +110,7 @@ int tty_register_ldisc(int disc, struct
36826 spin_lock_irqsave(&tty_ldisc_lock, flags);
36827 tty_ldiscs[disc] = new_ldisc;
36828 new_ldisc->num = disc;
36829- new_ldisc->refcount = 0;
36830+ atomic_set(&new_ldisc->refcount, 0);
36831 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
36832
36833 return ret;
36834@@ -138,7 +138,7 @@ int tty_unregister_ldisc(int disc)
36835 return -EINVAL;
36836
36837 spin_lock_irqsave(&tty_ldisc_lock, flags);
36838- if (tty_ldiscs[disc]->refcount)
36839+ if (atomic_read(&tty_ldiscs[disc]->refcount))
36840 ret = -EBUSY;
36841 else
36842 tty_ldiscs[disc] = NULL;
36843@@ -159,7 +159,7 @@ static struct tty_ldisc_ops *get_ldops(i
36844 if (ldops) {
36845 ret = ERR_PTR(-EAGAIN);
36846 if (try_module_get(ldops->owner)) {
36847- ldops->refcount++;
36848+ atomic_inc(&ldops->refcount);
36849 ret = ldops;
36850 }
36851 }
36852@@ -172,7 +172,7 @@ static void put_ldops(struct tty_ldisc_o
36853 unsigned long flags;
36854
36855 spin_lock_irqsave(&tty_ldisc_lock, flags);
36856- ldops->refcount--;
36857+ atomic_dec(&ldops->refcount);
36858 module_put(ldops->owner);
36859 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
36860 }
36861diff -urNp linux-3.1.4/drivers/tty/vt/keyboard.c linux-3.1.4/drivers/tty/vt/keyboard.c
36862--- linux-3.1.4/drivers/tty/vt/keyboard.c 2011-11-11 15:19:27.000000000 -0500
36863+++ linux-3.1.4/drivers/tty/vt/keyboard.c 2011-11-16 18:40:29.000000000 -0500
36864@@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, u
36865 kbd->kbdmode == VC_OFF) &&
36866 value != KVAL(K_SAK))
36867 return; /* SAK is allowed even in raw mode */
36868+
36869+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
36870+ {
36871+ void *func = fn_handler[value];
36872+ if (func == fn_show_state || func == fn_show_ptregs ||
36873+ func == fn_show_mem)
36874+ return;
36875+ }
36876+#endif
36877+
36878 fn_handler[value](vc);
36879 }
36880
36881diff -urNp linux-3.1.4/drivers/tty/vt/vt.c linux-3.1.4/drivers/tty/vt/vt.c
36882--- linux-3.1.4/drivers/tty/vt/vt.c 2011-11-11 15:19:27.000000000 -0500
36883+++ linux-3.1.4/drivers/tty/vt/vt.c 2011-11-16 18:39:08.000000000 -0500
36884@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
36885
36886 static void notify_write(struct vc_data *vc, unsigned int unicode)
36887 {
36888- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
36889+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
36890 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
36891 }
36892
36893diff -urNp linux-3.1.4/drivers/tty/vt/vt_ioctl.c linux-3.1.4/drivers/tty/vt/vt_ioctl.c
36894--- linux-3.1.4/drivers/tty/vt/vt_ioctl.c 2011-11-11 15:19:27.000000000 -0500
36895+++ linux-3.1.4/drivers/tty/vt/vt_ioctl.c 2011-11-16 18:40:29.000000000 -0500
36896@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
36897 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
36898 return -EFAULT;
36899
36900- if (!capable(CAP_SYS_TTY_CONFIG))
36901- perm = 0;
36902-
36903 switch (cmd) {
36904 case KDGKBENT:
36905 key_map = key_maps[s];
36906@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
36907 val = (i ? K_HOLE : K_NOSUCHMAP);
36908 return put_user(val, &user_kbe->kb_value);
36909 case KDSKBENT:
36910+ if (!capable(CAP_SYS_TTY_CONFIG))
36911+ perm = 0;
36912+
36913 if (!perm)
36914 return -EPERM;
36915 if (!i && v == K_NOSUCHMAP) {
36916@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
36917 int i, j, k;
36918 int ret;
36919
36920- if (!capable(CAP_SYS_TTY_CONFIG))
36921- perm = 0;
36922-
36923 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
36924 if (!kbs) {
36925 ret = -ENOMEM;
36926@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
36927 kfree(kbs);
36928 return ((p && *p) ? -EOVERFLOW : 0);
36929 case KDSKBSENT:
36930+ if (!capable(CAP_SYS_TTY_CONFIG))
36931+ perm = 0;
36932+
36933 if (!perm) {
36934 ret = -EPERM;
36935 goto reterr;
36936diff -urNp linux-3.1.4/drivers/uio/uio.c linux-3.1.4/drivers/uio/uio.c
36937--- linux-3.1.4/drivers/uio/uio.c 2011-11-11 15:19:27.000000000 -0500
36938+++ linux-3.1.4/drivers/uio/uio.c 2011-11-16 18:39:08.000000000 -0500
36939@@ -25,6 +25,7 @@
36940 #include <linux/kobject.h>
36941 #include <linux/cdev.h>
36942 #include <linux/uio_driver.h>
36943+#include <asm/local.h>
36944
36945 #define UIO_MAX_DEVICES (1U << MINORBITS)
36946
36947@@ -32,10 +33,10 @@ struct uio_device {
36948 struct module *owner;
36949 struct device *dev;
36950 int minor;
36951- atomic_t event;
36952+ atomic_unchecked_t event;
36953 struct fasync_struct *async_queue;
36954 wait_queue_head_t wait;
36955- int vma_count;
36956+ local_t vma_count;
36957 struct uio_info *info;
36958 struct kobject *map_dir;
36959 struct kobject *portio_dir;
36960@@ -242,7 +243,7 @@ static ssize_t show_event(struct device
36961 struct device_attribute *attr, char *buf)
36962 {
36963 struct uio_device *idev = dev_get_drvdata(dev);
36964- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
36965+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
36966 }
36967
36968 static struct device_attribute uio_class_attributes[] = {
36969@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *i
36970 {
36971 struct uio_device *idev = info->uio_dev;
36972
36973- atomic_inc(&idev->event);
36974+ atomic_inc_unchecked(&idev->event);
36975 wake_up_interruptible(&idev->wait);
36976 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
36977 }
36978@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode,
36979 }
36980
36981 listener->dev = idev;
36982- listener->event_count = atomic_read(&idev->event);
36983+ listener->event_count = atomic_read_unchecked(&idev->event);
36984 filep->private_data = listener;
36985
36986 if (idev->info->open) {
36987@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file
36988 return -EIO;
36989
36990 poll_wait(filep, &idev->wait, wait);
36991- if (listener->event_count != atomic_read(&idev->event))
36992+ if (listener->event_count != atomic_read_unchecked(&idev->event))
36993 return POLLIN | POLLRDNORM;
36994 return 0;
36995 }
36996@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *fil
36997 do {
36998 set_current_state(TASK_INTERRUPTIBLE);
36999
37000- event_count = atomic_read(&idev->event);
37001+ event_count = atomic_read_unchecked(&idev->event);
37002 if (event_count != listener->event_count) {
37003 if (copy_to_user(buf, &event_count, count))
37004 retval = -EFAULT;
37005@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_
37006 static void uio_vma_open(struct vm_area_struct *vma)
37007 {
37008 struct uio_device *idev = vma->vm_private_data;
37009- idev->vma_count++;
37010+ local_inc(&idev->vma_count);
37011 }
37012
37013 static void uio_vma_close(struct vm_area_struct *vma)
37014 {
37015 struct uio_device *idev = vma->vm_private_data;
37016- idev->vma_count--;
37017+ local_dec(&idev->vma_count);
37018 }
37019
37020 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
37021@@ -823,7 +824,7 @@ int __uio_register_device(struct module
37022 idev->owner = owner;
37023 idev->info = info;
37024 init_waitqueue_head(&idev->wait);
37025- atomic_set(&idev->event, 0);
37026+ atomic_set_unchecked(&idev->event, 0);
37027
37028 ret = uio_get_minor(idev);
37029 if (ret)
37030diff -urNp linux-3.1.4/drivers/usb/atm/cxacru.c linux-3.1.4/drivers/usb/atm/cxacru.c
37031--- linux-3.1.4/drivers/usb/atm/cxacru.c 2011-11-11 15:19:27.000000000 -0500
37032+++ linux-3.1.4/drivers/usb/atm/cxacru.c 2011-11-16 18:39:08.000000000 -0500
37033@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
37034 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
37035 if (ret < 2)
37036 return -EINVAL;
37037- if (index < 0 || index > 0x7f)
37038+ if (index > 0x7f)
37039 return -EINVAL;
37040 pos += tmp;
37041
37042diff -urNp linux-3.1.4/drivers/usb/atm/usbatm.c linux-3.1.4/drivers/usb/atm/usbatm.c
37043--- linux-3.1.4/drivers/usb/atm/usbatm.c 2011-11-11 15:19:27.000000000 -0500
37044+++ linux-3.1.4/drivers/usb/atm/usbatm.c 2011-11-16 18:39:08.000000000 -0500
37045@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
37046 if (printk_ratelimit())
37047 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
37048 __func__, vpi, vci);
37049- atomic_inc(&vcc->stats->rx_err);
37050+ atomic_inc_unchecked(&vcc->stats->rx_err);
37051 return;
37052 }
37053
37054@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
37055 if (length > ATM_MAX_AAL5_PDU) {
37056 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
37057 __func__, length, vcc);
37058- atomic_inc(&vcc->stats->rx_err);
37059+ atomic_inc_unchecked(&vcc->stats->rx_err);
37060 goto out;
37061 }
37062
37063@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
37064 if (sarb->len < pdu_length) {
37065 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
37066 __func__, pdu_length, sarb->len, vcc);
37067- atomic_inc(&vcc->stats->rx_err);
37068+ atomic_inc_unchecked(&vcc->stats->rx_err);
37069 goto out;
37070 }
37071
37072 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
37073 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
37074 __func__, vcc);
37075- atomic_inc(&vcc->stats->rx_err);
37076+ atomic_inc_unchecked(&vcc->stats->rx_err);
37077 goto out;
37078 }
37079
37080@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
37081 if (printk_ratelimit())
37082 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
37083 __func__, length);
37084- atomic_inc(&vcc->stats->rx_drop);
37085+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37086 goto out;
37087 }
37088
37089@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
37090
37091 vcc->push(vcc, skb);
37092
37093- atomic_inc(&vcc->stats->rx);
37094+ atomic_inc_unchecked(&vcc->stats->rx);
37095 out:
37096 skb_trim(sarb, 0);
37097 }
37098@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned l
37099 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
37100
37101 usbatm_pop(vcc, skb);
37102- atomic_inc(&vcc->stats->tx);
37103+ atomic_inc_unchecked(&vcc->stats->tx);
37104
37105 skb = skb_dequeue(&instance->sndqueue);
37106 }
37107@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
37108 if (!left--)
37109 return sprintf(page,
37110 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
37111- atomic_read(&atm_dev->stats.aal5.tx),
37112- atomic_read(&atm_dev->stats.aal5.tx_err),
37113- atomic_read(&atm_dev->stats.aal5.rx),
37114- atomic_read(&atm_dev->stats.aal5.rx_err),
37115- atomic_read(&atm_dev->stats.aal5.rx_drop));
37116+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
37117+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
37118+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
37119+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
37120+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
37121
37122 if (!left--) {
37123 if (instance->disconnected)
37124diff -urNp linux-3.1.4/drivers/usb/core/devices.c linux-3.1.4/drivers/usb/core/devices.c
37125--- linux-3.1.4/drivers/usb/core/devices.c 2011-11-11 15:19:27.000000000 -0500
37126+++ linux-3.1.4/drivers/usb/core/devices.c 2011-11-16 18:39:08.000000000 -0500
37127@@ -126,7 +126,7 @@ static const char format_endpt[] =
37128 * time it gets called.
37129 */
37130 static struct device_connect_event {
37131- atomic_t count;
37132+ atomic_unchecked_t count;
37133 wait_queue_head_t wait;
37134 } device_event = {
37135 .count = ATOMIC_INIT(1),
37136@@ -164,7 +164,7 @@ static const struct class_info clas_info
37137
37138 void usbfs_conn_disc_event(void)
37139 {
37140- atomic_add(2, &device_event.count);
37141+ atomic_add_unchecked(2, &device_event.count);
37142 wake_up(&device_event.wait);
37143 }
37144
37145@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
37146
37147 poll_wait(file, &device_event.wait, wait);
37148
37149- event_count = atomic_read(&device_event.count);
37150+ event_count = atomic_read_unchecked(&device_event.count);
37151 if (file->f_version != event_count) {
37152 file->f_version = event_count;
37153 return POLLIN | POLLRDNORM;
37154diff -urNp linux-3.1.4/drivers/usb/core/message.c linux-3.1.4/drivers/usb/core/message.c
37155--- linux-3.1.4/drivers/usb/core/message.c 2011-11-11 15:19:27.000000000 -0500
37156+++ linux-3.1.4/drivers/usb/core/message.c 2011-11-16 18:39:08.000000000 -0500
37157@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
37158 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
37159 if (buf) {
37160 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
37161- if (len > 0) {
37162- smallbuf = kmalloc(++len, GFP_NOIO);
37163+ if (len++ > 0) {
37164+ smallbuf = kmalloc(len, GFP_NOIO);
37165 if (!smallbuf)
37166 return buf;
37167 memcpy(smallbuf, buf, len);
37168diff -urNp linux-3.1.4/drivers/usb/early/ehci-dbgp.c linux-3.1.4/drivers/usb/early/ehci-dbgp.c
37169--- linux-3.1.4/drivers/usb/early/ehci-dbgp.c 2011-11-11 15:19:27.000000000 -0500
37170+++ linux-3.1.4/drivers/usb/early/ehci-dbgp.c 2011-11-16 18:39:08.000000000 -0500
37171@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
37172
37173 #ifdef CONFIG_KGDB
37174 static struct kgdb_io kgdbdbgp_io_ops;
37175-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
37176+static struct kgdb_io kgdbdbgp_io_ops_console;
37177+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
37178 #else
37179 #define dbgp_kgdb_mode (0)
37180 #endif
37181@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
37182 .write_char = kgdbdbgp_write_char,
37183 };
37184
37185+static struct kgdb_io kgdbdbgp_io_ops_console = {
37186+ .name = "kgdbdbgp",
37187+ .read_char = kgdbdbgp_read_char,
37188+ .write_char = kgdbdbgp_write_char,
37189+ .is_console = 1
37190+};
37191+
37192 static int kgdbdbgp_wait_time;
37193
37194 static int __init kgdbdbgp_parse_config(char *str)
37195@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(
37196 ptr++;
37197 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
37198 }
37199- kgdb_register_io_module(&kgdbdbgp_io_ops);
37200- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
37201+ if (early_dbgp_console.index != -1)
37202+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
37203+ else
37204+ kgdb_register_io_module(&kgdbdbgp_io_ops);
37205
37206 return 0;
37207 }
37208diff -urNp linux-3.1.4/drivers/usb/host/xhci-mem.c linux-3.1.4/drivers/usb/host/xhci-mem.c
37209--- linux-3.1.4/drivers/usb/host/xhci-mem.c 2011-11-26 19:57:29.000000000 -0500
37210+++ linux-3.1.4/drivers/usb/host/xhci-mem.c 2011-11-26 20:00:43.000000000 -0500
37211@@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(str
37212 unsigned int num_tests;
37213 int i, ret;
37214
37215+ pax_track_stack();
37216+
37217 num_tests = ARRAY_SIZE(simple_test_vector);
37218 for (i = 0; i < num_tests; i++) {
37219 ret = xhci_test_trb_in_td(xhci,
37220diff -urNp linux-3.1.4/drivers/usb/wusbcore/wa-hc.h linux-3.1.4/drivers/usb/wusbcore/wa-hc.h
37221--- linux-3.1.4/drivers/usb/wusbcore/wa-hc.h 2011-11-11 15:19:27.000000000 -0500
37222+++ linux-3.1.4/drivers/usb/wusbcore/wa-hc.h 2011-11-16 18:39:08.000000000 -0500
37223@@ -192,7 +192,7 @@ struct wahc {
37224 struct list_head xfer_delayed_list;
37225 spinlock_t xfer_list_lock;
37226 struct work_struct xfer_work;
37227- atomic_t xfer_id_count;
37228+ atomic_unchecked_t xfer_id_count;
37229 };
37230
37231
37232@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
37233 INIT_LIST_HEAD(&wa->xfer_delayed_list);
37234 spin_lock_init(&wa->xfer_list_lock);
37235 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
37236- atomic_set(&wa->xfer_id_count, 1);
37237+ atomic_set_unchecked(&wa->xfer_id_count, 1);
37238 }
37239
37240 /**
37241diff -urNp linux-3.1.4/drivers/usb/wusbcore/wa-xfer.c linux-3.1.4/drivers/usb/wusbcore/wa-xfer.c
37242--- linux-3.1.4/drivers/usb/wusbcore/wa-xfer.c 2011-11-11 15:19:27.000000000 -0500
37243+++ linux-3.1.4/drivers/usb/wusbcore/wa-xfer.c 2011-11-16 18:39:08.000000000 -0500
37244@@ -295,7 +295,7 @@ out:
37245 */
37246 static void wa_xfer_id_init(struct wa_xfer *xfer)
37247 {
37248- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
37249+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
37250 }
37251
37252 /*
37253diff -urNp linux-3.1.4/drivers/vhost/vhost.c linux-3.1.4/drivers/vhost/vhost.c
37254--- linux-3.1.4/drivers/vhost/vhost.c 2011-11-11 15:19:27.000000000 -0500
37255+++ linux-3.1.4/drivers/vhost/vhost.c 2011-11-16 18:39:08.000000000 -0500
37256@@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhos
37257 return 0;
37258 }
37259
37260-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
37261+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
37262 {
37263 struct file *eventfp, *filep = NULL,
37264 *pollstart = NULL, *pollstop = NULL;
37265diff -urNp linux-3.1.4/drivers/video/aty/aty128fb.c linux-3.1.4/drivers/video/aty/aty128fb.c
37266--- linux-3.1.4/drivers/video/aty/aty128fb.c 2011-11-11 15:19:27.000000000 -0500
37267+++ linux-3.1.4/drivers/video/aty/aty128fb.c 2011-11-16 18:39:08.000000000 -0500
37268@@ -148,7 +148,7 @@ enum {
37269 };
37270
37271 /* Must match above enum */
37272-static const char *r128_family[] __devinitdata = {
37273+static const char *r128_family[] __devinitconst = {
37274 "AGP",
37275 "PCI",
37276 "PRO AGP",
37277diff -urNp linux-3.1.4/drivers/video/fbcmap.c linux-3.1.4/drivers/video/fbcmap.c
37278--- linux-3.1.4/drivers/video/fbcmap.c 2011-11-11 15:19:27.000000000 -0500
37279+++ linux-3.1.4/drivers/video/fbcmap.c 2011-11-16 18:39:08.000000000 -0500
37280@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
37281 rc = -ENODEV;
37282 goto out;
37283 }
37284- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
37285- !info->fbops->fb_setcmap)) {
37286+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
37287 rc = -EINVAL;
37288 goto out1;
37289 }
37290diff -urNp linux-3.1.4/drivers/video/fbmem.c linux-3.1.4/drivers/video/fbmem.c
37291--- linux-3.1.4/drivers/video/fbmem.c 2011-11-11 15:19:27.000000000 -0500
37292+++ linux-3.1.4/drivers/video/fbmem.c 2011-11-16 18:40:29.000000000 -0500
37293@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
37294 image->dx += image->width + 8;
37295 }
37296 } else if (rotate == FB_ROTATE_UD) {
37297- for (x = 0; x < num && image->dx >= 0; x++) {
37298+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
37299 info->fbops->fb_imageblit(info, image);
37300 image->dx -= image->width + 8;
37301 }
37302@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
37303 image->dy += image->height + 8;
37304 }
37305 } else if (rotate == FB_ROTATE_CCW) {
37306- for (x = 0; x < num && image->dy >= 0; x++) {
37307+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
37308 info->fbops->fb_imageblit(info, image);
37309 image->dy -= image->height + 8;
37310 }
37311@@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
37312 int flags = info->flags;
37313 int ret = 0;
37314
37315+ pax_track_stack();
37316+
37317 if (var->activate & FB_ACTIVATE_INV_MODE) {
37318 struct fb_videomode mode1, mode2;
37319
37320@@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
37321 void __user *argp = (void __user *)arg;
37322 long ret = 0;
37323
37324+ pax_track_stack();
37325+
37326 switch (cmd) {
37327 case FBIOGET_VSCREENINFO:
37328 if (!lock_fb_info(info))
37329@@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
37330 return -EFAULT;
37331 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
37332 return -EINVAL;
37333- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
37334+ if (con2fb.framebuffer >= FB_MAX)
37335 return -EINVAL;
37336 if (!registered_fb[con2fb.framebuffer])
37337 request_module("fb%d", con2fb.framebuffer);
37338diff -urNp linux-3.1.4/drivers/video/geode/gx1fb_core.c linux-3.1.4/drivers/video/geode/gx1fb_core.c
37339--- linux-3.1.4/drivers/video/geode/gx1fb_core.c 2011-11-11 15:19:27.000000000 -0500
37340+++ linux-3.1.4/drivers/video/geode/gx1fb_core.c 2011-11-16 18:39:08.000000000 -0500
37341@@ -29,7 +29,7 @@ static int crt_option = 1;
37342 static char panel_option[32] = "";
37343
37344 /* Modes relevant to the GX1 (taken from modedb.c) */
37345-static const struct fb_videomode __devinitdata gx1_modedb[] = {
37346+static const struct fb_videomode __devinitconst gx1_modedb[] = {
37347 /* 640x480-60 VESA */
37348 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
37349 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
37350diff -urNp linux-3.1.4/drivers/video/gxt4500.c linux-3.1.4/drivers/video/gxt4500.c
37351--- linux-3.1.4/drivers/video/gxt4500.c 2011-11-11 15:19:27.000000000 -0500
37352+++ linux-3.1.4/drivers/video/gxt4500.c 2011-11-16 18:39:08.000000000 -0500
37353@@ -156,7 +156,7 @@ struct gxt4500_par {
37354 static char *mode_option;
37355
37356 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
37357-static const struct fb_videomode defaultmode __devinitdata = {
37358+static const struct fb_videomode defaultmode __devinitconst = {
37359 .refresh = 60,
37360 .xres = 1280,
37361 .yres = 1024,
37362@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, stru
37363 return 0;
37364 }
37365
37366-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
37367+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
37368 .id = "IBM GXT4500P",
37369 .type = FB_TYPE_PACKED_PIXELS,
37370 .visual = FB_VISUAL_PSEUDOCOLOR,
37371diff -urNp linux-3.1.4/drivers/video/i810/i810_accel.c linux-3.1.4/drivers/video/i810/i810_accel.c
37372--- linux-3.1.4/drivers/video/i810/i810_accel.c 2011-11-11 15:19:27.000000000 -0500
37373+++ linux-3.1.4/drivers/video/i810/i810_accel.c 2011-11-16 18:39:08.000000000 -0500
37374@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
37375 }
37376 }
37377 printk("ringbuffer lockup!!!\n");
37378+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
37379 i810_report_error(mmio);
37380 par->dev_flags |= LOCKUP;
37381 info->pixmap.scan_align = 1;
37382diff -urNp linux-3.1.4/drivers/video/i810/i810_main.c linux-3.1.4/drivers/video/i810/i810_main.c
37383--- linux-3.1.4/drivers/video/i810/i810_main.c 2011-11-11 15:19:27.000000000 -0500
37384+++ linux-3.1.4/drivers/video/i810/i810_main.c 2011-11-16 18:39:08.000000000 -0500
37385@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_
37386 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
37387
37388 /* PCI */
37389-static const char *i810_pci_list[] __devinitdata = {
37390+static const char *i810_pci_list[] __devinitconst = {
37391 "Intel(R) 810 Framebuffer Device" ,
37392 "Intel(R) 810-DC100 Framebuffer Device" ,
37393 "Intel(R) 810E Framebuffer Device" ,
37394diff -urNp linux-3.1.4/drivers/video/jz4740_fb.c linux-3.1.4/drivers/video/jz4740_fb.c
37395--- linux-3.1.4/drivers/video/jz4740_fb.c 2011-11-11 15:19:27.000000000 -0500
37396+++ linux-3.1.4/drivers/video/jz4740_fb.c 2011-11-16 18:39:08.000000000 -0500
37397@@ -136,7 +136,7 @@ struct jzfb {
37398 uint32_t pseudo_palette[16];
37399 };
37400
37401-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
37402+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
37403 .id = "JZ4740 FB",
37404 .type = FB_TYPE_PACKED_PIXELS,
37405 .visual = FB_VISUAL_TRUECOLOR,
37406diff -urNp linux-3.1.4/drivers/video/logo/logo_linux_clut224.ppm linux-3.1.4/drivers/video/logo/logo_linux_clut224.ppm
37407--- linux-3.1.4/drivers/video/logo/logo_linux_clut224.ppm 2011-11-11 15:19:27.000000000 -0500
37408+++ linux-3.1.4/drivers/video/logo/logo_linux_clut224.ppm 2011-11-16 18:40:29.000000000 -0500
37409@@ -1,1604 +1,1123 @@
37410 P3
37411-# Standard 224-color Linux logo
37412 80 80
37413 255
37414- 0 0 0 0 0 0 0 0 0 0 0 0
37415- 0 0 0 0 0 0 0 0 0 0 0 0
37416- 0 0 0 0 0 0 0 0 0 0 0 0
37417- 0 0 0 0 0 0 0 0 0 0 0 0
37418- 0 0 0 0 0 0 0 0 0 0 0 0
37419- 0 0 0 0 0 0 0 0 0 0 0 0
37420- 0 0 0 0 0 0 0 0 0 0 0 0
37421- 0 0 0 0 0 0 0 0 0 0 0 0
37422- 0 0 0 0 0 0 0 0 0 0 0 0
37423- 6 6 6 6 6 6 10 10 10 10 10 10
37424- 10 10 10 6 6 6 6 6 6 6 6 6
37425- 0 0 0 0 0 0 0 0 0 0 0 0
37426- 0 0 0 0 0 0 0 0 0 0 0 0
37427- 0 0 0 0 0 0 0 0 0 0 0 0
37428- 0 0 0 0 0 0 0 0 0 0 0 0
37429- 0 0 0 0 0 0 0 0 0 0 0 0
37430- 0 0 0 0 0 0 0 0 0 0 0 0
37431- 0 0 0 0 0 0 0 0 0 0 0 0
37432- 0 0 0 0 0 0 0 0 0 0 0 0
37433- 0 0 0 0 0 0 0 0 0 0 0 0
37434- 0 0 0 0 0 0 0 0 0 0 0 0
37435- 0 0 0 0 0 0 0 0 0 0 0 0
37436- 0 0 0 0 0 0 0 0 0 0 0 0
37437- 0 0 0 0 0 0 0 0 0 0 0 0
37438- 0 0 0 0 0 0 0 0 0 0 0 0
37439- 0 0 0 0 0 0 0 0 0 0 0 0
37440- 0 0 0 0 0 0 0 0 0 0 0 0
37441- 0 0 0 0 0 0 0 0 0 0 0 0
37442- 0 0 0 6 6 6 10 10 10 14 14 14
37443- 22 22 22 26 26 26 30 30 30 34 34 34
37444- 30 30 30 30 30 30 26 26 26 18 18 18
37445- 14 14 14 10 10 10 6 6 6 0 0 0
37446- 0 0 0 0 0 0 0 0 0 0 0 0
37447- 0 0 0 0 0 0 0 0 0 0 0 0
37448- 0 0 0 0 0 0 0 0 0 0 0 0
37449- 0 0 0 0 0 0 0 0 0 0 0 0
37450- 0 0 0 0 0 0 0 0 0 0 0 0
37451- 0 0 0 0 0 0 0 0 0 0 0 0
37452- 0 0 0 0 0 0 0 0 0 0 0 0
37453- 0 0 0 0 0 0 0 0 0 0 0 0
37454- 0 0 0 0 0 0 0 0 0 0 0 0
37455- 0 0 0 0 0 1 0 0 1 0 0 0
37456- 0 0 0 0 0 0 0 0 0 0 0 0
37457- 0 0 0 0 0 0 0 0 0 0 0 0
37458- 0 0 0 0 0 0 0 0 0 0 0 0
37459- 0 0 0 0 0 0 0 0 0 0 0 0
37460- 0 0 0 0 0 0 0 0 0 0 0 0
37461- 0 0 0 0 0 0 0 0 0 0 0 0
37462- 6 6 6 14 14 14 26 26 26 42 42 42
37463- 54 54 54 66 66 66 78 78 78 78 78 78
37464- 78 78 78 74 74 74 66 66 66 54 54 54
37465- 42 42 42 26 26 26 18 18 18 10 10 10
37466- 6 6 6 0 0 0 0 0 0 0 0 0
37467- 0 0 0 0 0 0 0 0 0 0 0 0
37468- 0 0 0 0 0 0 0 0 0 0 0 0
37469- 0 0 0 0 0 0 0 0 0 0 0 0
37470- 0 0 0 0 0 0 0 0 0 0 0 0
37471- 0 0 0 0 0 0 0 0 0 0 0 0
37472- 0 0 0 0 0 0 0 0 0 0 0 0
37473- 0 0 0 0 0 0 0 0 0 0 0 0
37474- 0 0 0 0 0 0 0 0 0 0 0 0
37475- 0 0 1 0 0 0 0 0 0 0 0 0
37476- 0 0 0 0 0 0 0 0 0 0 0 0
37477- 0 0 0 0 0 0 0 0 0 0 0 0
37478- 0 0 0 0 0 0 0 0 0 0 0 0
37479- 0 0 0 0 0 0 0 0 0 0 0 0
37480- 0 0 0 0 0 0 0 0 0 0 0 0
37481- 0 0 0 0 0 0 0 0 0 10 10 10
37482- 22 22 22 42 42 42 66 66 66 86 86 86
37483- 66 66 66 38 38 38 38 38 38 22 22 22
37484- 26 26 26 34 34 34 54 54 54 66 66 66
37485- 86 86 86 70 70 70 46 46 46 26 26 26
37486- 14 14 14 6 6 6 0 0 0 0 0 0
37487- 0 0 0 0 0 0 0 0 0 0 0 0
37488- 0 0 0 0 0 0 0 0 0 0 0 0
37489- 0 0 0 0 0 0 0 0 0 0 0 0
37490- 0 0 0 0 0 0 0 0 0 0 0 0
37491- 0 0 0 0 0 0 0 0 0 0 0 0
37492- 0 0 0 0 0 0 0 0 0 0 0 0
37493- 0 0 0 0 0 0 0 0 0 0 0 0
37494- 0 0 0 0 0 0 0 0 0 0 0 0
37495- 0 0 1 0 0 1 0 0 1 0 0 0
37496- 0 0 0 0 0 0 0 0 0 0 0 0
37497- 0 0 0 0 0 0 0 0 0 0 0 0
37498- 0 0 0 0 0 0 0 0 0 0 0 0
37499- 0 0 0 0 0 0 0 0 0 0 0 0
37500- 0 0 0 0 0 0 0 0 0 0 0 0
37501- 0 0 0 0 0 0 10 10 10 26 26 26
37502- 50 50 50 82 82 82 58 58 58 6 6 6
37503- 2 2 6 2 2 6 2 2 6 2 2 6
37504- 2 2 6 2 2 6 2 2 6 2 2 6
37505- 6 6 6 54 54 54 86 86 86 66 66 66
37506- 38 38 38 18 18 18 6 6 6 0 0 0
37507- 0 0 0 0 0 0 0 0 0 0 0 0
37508- 0 0 0 0 0 0 0 0 0 0 0 0
37509- 0 0 0 0 0 0 0 0 0 0 0 0
37510- 0 0 0 0 0 0 0 0 0 0 0 0
37511- 0 0 0 0 0 0 0 0 0 0 0 0
37512- 0 0 0 0 0 0 0 0 0 0 0 0
37513- 0 0 0 0 0 0 0 0 0 0 0 0
37514- 0 0 0 0 0 0 0 0 0 0 0 0
37515- 0 0 0 0 0 0 0 0 0 0 0 0
37516- 0 0 0 0 0 0 0 0 0 0 0 0
37517- 0 0 0 0 0 0 0 0 0 0 0 0
37518- 0 0 0 0 0 0 0 0 0 0 0 0
37519- 0 0 0 0 0 0 0 0 0 0 0 0
37520- 0 0 0 0 0 0 0 0 0 0 0 0
37521- 0 0 0 6 6 6 22 22 22 50 50 50
37522- 78 78 78 34 34 34 2 2 6 2 2 6
37523- 2 2 6 2 2 6 2 2 6 2 2 6
37524- 2 2 6 2 2 6 2 2 6 2 2 6
37525- 2 2 6 2 2 6 6 6 6 70 70 70
37526- 78 78 78 46 46 46 22 22 22 6 6 6
37527- 0 0 0 0 0 0 0 0 0 0 0 0
37528- 0 0 0 0 0 0 0 0 0 0 0 0
37529- 0 0 0 0 0 0 0 0 0 0 0 0
37530- 0 0 0 0 0 0 0 0 0 0 0 0
37531- 0 0 0 0 0 0 0 0 0 0 0 0
37532- 0 0 0 0 0 0 0 0 0 0 0 0
37533- 0 0 0 0 0 0 0 0 0 0 0 0
37534- 0 0 0 0 0 0 0 0 0 0 0 0
37535- 0 0 1 0 0 1 0 0 1 0 0 0
37536- 0 0 0 0 0 0 0 0 0 0 0 0
37537- 0 0 0 0 0 0 0 0 0 0 0 0
37538- 0 0 0 0 0 0 0 0 0 0 0 0
37539- 0 0 0 0 0 0 0 0 0 0 0 0
37540- 0 0 0 0 0 0 0 0 0 0 0 0
37541- 6 6 6 18 18 18 42 42 42 82 82 82
37542- 26 26 26 2 2 6 2 2 6 2 2 6
37543- 2 2 6 2 2 6 2 2 6 2 2 6
37544- 2 2 6 2 2 6 2 2 6 14 14 14
37545- 46 46 46 34 34 34 6 6 6 2 2 6
37546- 42 42 42 78 78 78 42 42 42 18 18 18
37547- 6 6 6 0 0 0 0 0 0 0 0 0
37548- 0 0 0 0 0 0 0 0 0 0 0 0
37549- 0 0 0 0 0 0 0 0 0 0 0 0
37550- 0 0 0 0 0 0 0 0 0 0 0 0
37551- 0 0 0 0 0 0 0 0 0 0 0 0
37552- 0 0 0 0 0 0 0 0 0 0 0 0
37553- 0 0 0 0 0 0 0 0 0 0 0 0
37554- 0 0 0 0 0 0 0 0 0 0 0 0
37555- 0 0 1 0 0 0 0 0 1 0 0 0
37556- 0 0 0 0 0 0 0 0 0 0 0 0
37557- 0 0 0 0 0 0 0 0 0 0 0 0
37558- 0 0 0 0 0 0 0 0 0 0 0 0
37559- 0 0 0 0 0 0 0 0 0 0 0 0
37560- 0 0 0 0 0 0 0 0 0 0 0 0
37561- 10 10 10 30 30 30 66 66 66 58 58 58
37562- 2 2 6 2 2 6 2 2 6 2 2 6
37563- 2 2 6 2 2 6 2 2 6 2 2 6
37564- 2 2 6 2 2 6 2 2 6 26 26 26
37565- 86 86 86 101 101 101 46 46 46 10 10 10
37566- 2 2 6 58 58 58 70 70 70 34 34 34
37567- 10 10 10 0 0 0 0 0 0 0 0 0
37568- 0 0 0 0 0 0 0 0 0 0 0 0
37569- 0 0 0 0 0 0 0 0 0 0 0 0
37570- 0 0 0 0 0 0 0 0 0 0 0 0
37571- 0 0 0 0 0 0 0 0 0 0 0 0
37572- 0 0 0 0 0 0 0 0 0 0 0 0
37573- 0 0 0 0 0 0 0 0 0 0 0 0
37574- 0 0 0 0 0 0 0 0 0 0 0 0
37575- 0 0 1 0 0 1 0 0 1 0 0 0
37576- 0 0 0 0 0 0 0 0 0 0 0 0
37577- 0 0 0 0 0 0 0 0 0 0 0 0
37578- 0 0 0 0 0 0 0 0 0 0 0 0
37579- 0 0 0 0 0 0 0 0 0 0 0 0
37580- 0 0 0 0 0 0 0 0 0 0 0 0
37581- 14 14 14 42 42 42 86 86 86 10 10 10
37582- 2 2 6 2 2 6 2 2 6 2 2 6
37583- 2 2 6 2 2 6 2 2 6 2 2 6
37584- 2 2 6 2 2 6 2 2 6 30 30 30
37585- 94 94 94 94 94 94 58 58 58 26 26 26
37586- 2 2 6 6 6 6 78 78 78 54 54 54
37587- 22 22 22 6 6 6 0 0 0 0 0 0
37588- 0 0 0 0 0 0 0 0 0 0 0 0
37589- 0 0 0 0 0 0 0 0 0 0 0 0
37590- 0 0 0 0 0 0 0 0 0 0 0 0
37591- 0 0 0 0 0 0 0 0 0 0 0 0
37592- 0 0 0 0 0 0 0 0 0 0 0 0
37593- 0 0 0 0 0 0 0 0 0 0 0 0
37594- 0 0 0 0 0 0 0 0 0 0 0 0
37595- 0 0 0 0 0 0 0 0 0 0 0 0
37596- 0 0 0 0 0 0 0 0 0 0 0 0
37597- 0 0 0 0 0 0 0 0 0 0 0 0
37598- 0 0 0 0 0 0 0 0 0 0 0 0
37599- 0 0 0 0 0 0 0 0 0 0 0 0
37600- 0 0 0 0 0 0 0 0 0 6 6 6
37601- 22 22 22 62 62 62 62 62 62 2 2 6
37602- 2 2 6 2 2 6 2 2 6 2 2 6
37603- 2 2 6 2 2 6 2 2 6 2 2 6
37604- 2 2 6 2 2 6 2 2 6 26 26 26
37605- 54 54 54 38 38 38 18 18 18 10 10 10
37606- 2 2 6 2 2 6 34 34 34 82 82 82
37607- 38 38 38 14 14 14 0 0 0 0 0 0
37608- 0 0 0 0 0 0 0 0 0 0 0 0
37609- 0 0 0 0 0 0 0 0 0 0 0 0
37610- 0 0 0 0 0 0 0 0 0 0 0 0
37611- 0 0 0 0 0 0 0 0 0 0 0 0
37612- 0 0 0 0 0 0 0 0 0 0 0 0
37613- 0 0 0 0 0 0 0 0 0 0 0 0
37614- 0 0 0 0 0 0 0 0 0 0 0 0
37615- 0 0 0 0 0 1 0 0 1 0 0 0
37616- 0 0 0 0 0 0 0 0 0 0 0 0
37617- 0 0 0 0 0 0 0 0 0 0 0 0
37618- 0 0 0 0 0 0 0 0 0 0 0 0
37619- 0 0 0 0 0 0 0 0 0 0 0 0
37620- 0 0 0 0 0 0 0 0 0 6 6 6
37621- 30 30 30 78 78 78 30 30 30 2 2 6
37622- 2 2 6 2 2 6 2 2 6 2 2 6
37623- 2 2 6 2 2 6 2 2 6 2 2 6
37624- 2 2 6 2 2 6 2 2 6 10 10 10
37625- 10 10 10 2 2 6 2 2 6 2 2 6
37626- 2 2 6 2 2 6 2 2 6 78 78 78
37627- 50 50 50 18 18 18 6 6 6 0 0 0
37628- 0 0 0 0 0 0 0 0 0 0 0 0
37629- 0 0 0 0 0 0 0 0 0 0 0 0
37630- 0 0 0 0 0 0 0 0 0 0 0 0
37631- 0 0 0 0 0 0 0 0 0 0 0 0
37632- 0 0 0 0 0 0 0 0 0 0 0 0
37633- 0 0 0 0 0 0 0 0 0 0 0 0
37634- 0 0 0 0 0 0 0 0 0 0 0 0
37635- 0 0 1 0 0 0 0 0 0 0 0 0
37636- 0 0 0 0 0 0 0 0 0 0 0 0
37637- 0 0 0 0 0 0 0 0 0 0 0 0
37638- 0 0 0 0 0 0 0 0 0 0 0 0
37639- 0 0 0 0 0 0 0 0 0 0 0 0
37640- 0 0 0 0 0 0 0 0 0 10 10 10
37641- 38 38 38 86 86 86 14 14 14 2 2 6
37642- 2 2 6 2 2 6 2 2 6 2 2 6
37643- 2 2 6 2 2 6 2 2 6 2 2 6
37644- 2 2 6 2 2 6 2 2 6 2 2 6
37645- 2 2 6 2 2 6 2 2 6 2 2 6
37646- 2 2 6 2 2 6 2 2 6 54 54 54
37647- 66 66 66 26 26 26 6 6 6 0 0 0
37648- 0 0 0 0 0 0 0 0 0 0 0 0
37649- 0 0 0 0 0 0 0 0 0 0 0 0
37650- 0 0 0 0 0 0 0 0 0 0 0 0
37651- 0 0 0 0 0 0 0 0 0 0 0 0
37652- 0 0 0 0 0 0 0 0 0 0 0 0
37653- 0 0 0 0 0 0 0 0 0 0 0 0
37654- 0 0 0 0 0 0 0 0 0 0 0 0
37655- 0 0 0 0 0 1 0 0 1 0 0 0
37656- 0 0 0 0 0 0 0 0 0 0 0 0
37657- 0 0 0 0 0 0 0 0 0 0 0 0
37658- 0 0 0 0 0 0 0 0 0 0 0 0
37659- 0 0 0 0 0 0 0 0 0 0 0 0
37660- 0 0 0 0 0 0 0 0 0 14 14 14
37661- 42 42 42 82 82 82 2 2 6 2 2 6
37662- 2 2 6 6 6 6 10 10 10 2 2 6
37663- 2 2 6 2 2 6 2 2 6 2 2 6
37664- 2 2 6 2 2 6 2 2 6 6 6 6
37665- 14 14 14 10 10 10 2 2 6 2 2 6
37666- 2 2 6 2 2 6 2 2 6 18 18 18
37667- 82 82 82 34 34 34 10 10 10 0 0 0
37668- 0 0 0 0 0 0 0 0 0 0 0 0
37669- 0 0 0 0 0 0 0 0 0 0 0 0
37670- 0 0 0 0 0 0 0 0 0 0 0 0
37671- 0 0 0 0 0 0 0 0 0 0 0 0
37672- 0 0 0 0 0 0 0 0 0 0 0 0
37673- 0 0 0 0 0 0 0 0 0 0 0 0
37674- 0 0 0 0 0 0 0 0 0 0 0 0
37675- 0 0 1 0 0 0 0 0 0 0 0 0
37676- 0 0 0 0 0 0 0 0 0 0 0 0
37677- 0 0 0 0 0 0 0 0 0 0 0 0
37678- 0 0 0 0 0 0 0 0 0 0 0 0
37679- 0 0 0 0 0 0 0 0 0 0 0 0
37680- 0 0 0 0 0 0 0 0 0 14 14 14
37681- 46 46 46 86 86 86 2 2 6 2 2 6
37682- 6 6 6 6 6 6 22 22 22 34 34 34
37683- 6 6 6 2 2 6 2 2 6 2 2 6
37684- 2 2 6 2 2 6 18 18 18 34 34 34
37685- 10 10 10 50 50 50 22 22 22 2 2 6
37686- 2 2 6 2 2 6 2 2 6 10 10 10
37687- 86 86 86 42 42 42 14 14 14 0 0 0
37688- 0 0 0 0 0 0 0 0 0 0 0 0
37689- 0 0 0 0 0 0 0 0 0 0 0 0
37690- 0 0 0 0 0 0 0 0 0 0 0 0
37691- 0 0 0 0 0 0 0 0 0 0 0 0
37692- 0 0 0 0 0 0 0 0 0 0 0 0
37693- 0 0 0 0 0 0 0 0 0 0 0 0
37694- 0 0 0 0 0 0 0 0 0 0 0 0
37695- 0 0 1 0 0 1 0 0 1 0 0 0
37696- 0 0 0 0 0 0 0 0 0 0 0 0
37697- 0 0 0 0 0 0 0 0 0 0 0 0
37698- 0 0 0 0 0 0 0 0 0 0 0 0
37699- 0 0 0 0 0 0 0 0 0 0 0 0
37700- 0 0 0 0 0 0 0 0 0 14 14 14
37701- 46 46 46 86 86 86 2 2 6 2 2 6
37702- 38 38 38 116 116 116 94 94 94 22 22 22
37703- 22 22 22 2 2 6 2 2 6 2 2 6
37704- 14 14 14 86 86 86 138 138 138 162 162 162
37705-154 154 154 38 38 38 26 26 26 6 6 6
37706- 2 2 6 2 2 6 2 2 6 2 2 6
37707- 86 86 86 46 46 46 14 14 14 0 0 0
37708- 0 0 0 0 0 0 0 0 0 0 0 0
37709- 0 0 0 0 0 0 0 0 0 0 0 0
37710- 0 0 0 0 0 0 0 0 0 0 0 0
37711- 0 0 0 0 0 0 0 0 0 0 0 0
37712- 0 0 0 0 0 0 0 0 0 0 0 0
37713- 0 0 0 0 0 0 0 0 0 0 0 0
37714- 0 0 0 0 0 0 0 0 0 0 0 0
37715- 0 0 0 0 0 0 0 0 0 0 0 0
37716- 0 0 0 0 0 0 0 0 0 0 0 0
37717- 0 0 0 0 0 0 0 0 0 0 0 0
37718- 0 0 0 0 0 0 0 0 0 0 0 0
37719- 0 0 0 0 0 0 0 0 0 0 0 0
37720- 0 0 0 0 0 0 0 0 0 14 14 14
37721- 46 46 46 86 86 86 2 2 6 14 14 14
37722-134 134 134 198 198 198 195 195 195 116 116 116
37723- 10 10 10 2 2 6 2 2 6 6 6 6
37724-101 98 89 187 187 187 210 210 210 218 218 218
37725-214 214 214 134 134 134 14 14 14 6 6 6
37726- 2 2 6 2 2 6 2 2 6 2 2 6
37727- 86 86 86 50 50 50 18 18 18 6 6 6
37728- 0 0 0 0 0 0 0 0 0 0 0 0
37729- 0 0 0 0 0 0 0 0 0 0 0 0
37730- 0 0 0 0 0 0 0 0 0 0 0 0
37731- 0 0 0 0 0 0 0 0 0 0 0 0
37732- 0 0 0 0 0 0 0 0 0 0 0 0
37733- 0 0 0 0 0 0 0 0 0 0 0 0
37734- 0 0 0 0 0 0 0 0 1 0 0 0
37735- 0 0 1 0 0 1 0 0 1 0 0 0
37736- 0 0 0 0 0 0 0 0 0 0 0 0
37737- 0 0 0 0 0 0 0 0 0 0 0 0
37738- 0 0 0 0 0 0 0 0 0 0 0 0
37739- 0 0 0 0 0 0 0 0 0 0 0 0
37740- 0 0 0 0 0 0 0 0 0 14 14 14
37741- 46 46 46 86 86 86 2 2 6 54 54 54
37742-218 218 218 195 195 195 226 226 226 246 246 246
37743- 58 58 58 2 2 6 2 2 6 30 30 30
37744-210 210 210 253 253 253 174 174 174 123 123 123
37745-221 221 221 234 234 234 74 74 74 2 2 6
37746- 2 2 6 2 2 6 2 2 6 2 2 6
37747- 70 70 70 58 58 58 22 22 22 6 6 6
37748- 0 0 0 0 0 0 0 0 0 0 0 0
37749- 0 0 0 0 0 0 0 0 0 0 0 0
37750- 0 0 0 0 0 0 0 0 0 0 0 0
37751- 0 0 0 0 0 0 0 0 0 0 0 0
37752- 0 0 0 0 0 0 0 0 0 0 0 0
37753- 0 0 0 0 0 0 0 0 0 0 0 0
37754- 0 0 0 0 0 0 0 0 0 0 0 0
37755- 0 0 0 0 0 0 0 0 0 0 0 0
37756- 0 0 0 0 0 0 0 0 0 0 0 0
37757- 0 0 0 0 0 0 0 0 0 0 0 0
37758- 0 0 0 0 0 0 0 0 0 0 0 0
37759- 0 0 0 0 0 0 0 0 0 0 0 0
37760- 0 0 0 0 0 0 0 0 0 14 14 14
37761- 46 46 46 82 82 82 2 2 6 106 106 106
37762-170 170 170 26 26 26 86 86 86 226 226 226
37763-123 123 123 10 10 10 14 14 14 46 46 46
37764-231 231 231 190 190 190 6 6 6 70 70 70
37765- 90 90 90 238 238 238 158 158 158 2 2 6
37766- 2 2 6 2 2 6 2 2 6 2 2 6
37767- 70 70 70 58 58 58 22 22 22 6 6 6
37768- 0 0 0 0 0 0 0 0 0 0 0 0
37769- 0 0 0 0 0 0 0 0 0 0 0 0
37770- 0 0 0 0 0 0 0 0 0 0 0 0
37771- 0 0 0 0 0 0 0 0 0 0 0 0
37772- 0 0 0 0 0 0 0 0 0 0 0 0
37773- 0 0 0 0 0 0 0 0 0 0 0 0
37774- 0 0 0 0 0 0 0 0 1 0 0 0
37775- 0 0 1 0 0 1 0 0 1 0 0 0
37776- 0 0 0 0 0 0 0 0 0 0 0 0
37777- 0 0 0 0 0 0 0 0 0 0 0 0
37778- 0 0 0 0 0 0 0 0 0 0 0 0
37779- 0 0 0 0 0 0 0 0 0 0 0 0
37780- 0 0 0 0 0 0 0 0 0 14 14 14
37781- 42 42 42 86 86 86 6 6 6 116 116 116
37782-106 106 106 6 6 6 70 70 70 149 149 149
37783-128 128 128 18 18 18 38 38 38 54 54 54
37784-221 221 221 106 106 106 2 2 6 14 14 14
37785- 46 46 46 190 190 190 198 198 198 2 2 6
37786- 2 2 6 2 2 6 2 2 6 2 2 6
37787- 74 74 74 62 62 62 22 22 22 6 6 6
37788- 0 0 0 0 0 0 0 0 0 0 0 0
37789- 0 0 0 0 0 0 0 0 0 0 0 0
37790- 0 0 0 0 0 0 0 0 0 0 0 0
37791- 0 0 0 0 0 0 0 0 0 0 0 0
37792- 0 0 0 0 0 0 0 0 0 0 0 0
37793- 0 0 0 0 0 0 0 0 0 0 0 0
37794- 0 0 0 0 0 0 0 0 1 0 0 0
37795- 0 0 1 0 0 0 0 0 1 0 0 0
37796- 0 0 0 0 0 0 0 0 0 0 0 0
37797- 0 0 0 0 0 0 0 0 0 0 0 0
37798- 0 0 0 0 0 0 0 0 0 0 0 0
37799- 0 0 0 0 0 0 0 0 0 0 0 0
37800- 0 0 0 0 0 0 0 0 0 14 14 14
37801- 42 42 42 94 94 94 14 14 14 101 101 101
37802-128 128 128 2 2 6 18 18 18 116 116 116
37803-118 98 46 121 92 8 121 92 8 98 78 10
37804-162 162 162 106 106 106 2 2 6 2 2 6
37805- 2 2 6 195 195 195 195 195 195 6 6 6
37806- 2 2 6 2 2 6 2 2 6 2 2 6
37807- 74 74 74 62 62 62 22 22 22 6 6 6
37808- 0 0 0 0 0 0 0 0 0 0 0 0
37809- 0 0 0 0 0 0 0 0 0 0 0 0
37810- 0 0 0 0 0 0 0 0 0 0 0 0
37811- 0 0 0 0 0 0 0 0 0 0 0 0
37812- 0 0 0 0 0 0 0 0 0 0 0 0
37813- 0 0 0 0 0 0 0 0 0 0 0 0
37814- 0 0 0 0 0 0 0 0 1 0 0 1
37815- 0 0 1 0 0 0 0 0 1 0 0 0
37816- 0 0 0 0 0 0 0 0 0 0 0 0
37817- 0 0 0 0 0 0 0 0 0 0 0 0
37818- 0 0 0 0 0 0 0 0 0 0 0 0
37819- 0 0 0 0 0 0 0 0 0 0 0 0
37820- 0 0 0 0 0 0 0 0 0 10 10 10
37821- 38 38 38 90 90 90 14 14 14 58 58 58
37822-210 210 210 26 26 26 54 38 6 154 114 10
37823-226 170 11 236 186 11 225 175 15 184 144 12
37824-215 174 15 175 146 61 37 26 9 2 2 6
37825- 70 70 70 246 246 246 138 138 138 2 2 6
37826- 2 2 6 2 2 6 2 2 6 2 2 6
37827- 70 70 70 66 66 66 26 26 26 6 6 6
37828- 0 0 0 0 0 0 0 0 0 0 0 0
37829- 0 0 0 0 0 0 0 0 0 0 0 0
37830- 0 0 0 0 0 0 0 0 0 0 0 0
37831- 0 0 0 0 0 0 0 0 0 0 0 0
37832- 0 0 0 0 0 0 0 0 0 0 0 0
37833- 0 0 0 0 0 0 0 0 0 0 0 0
37834- 0 0 0 0 0 0 0 0 0 0 0 0
37835- 0 0 0 0 0 0 0 0 0 0 0 0
37836- 0 0 0 0 0 0 0 0 0 0 0 0
37837- 0 0 0 0 0 0 0 0 0 0 0 0
37838- 0 0 0 0 0 0 0 0 0 0 0 0
37839- 0 0 0 0 0 0 0 0 0 0 0 0
37840- 0 0 0 0 0 0 0 0 0 10 10 10
37841- 38 38 38 86 86 86 14 14 14 10 10 10
37842-195 195 195 188 164 115 192 133 9 225 175 15
37843-239 182 13 234 190 10 232 195 16 232 200 30
37844-245 207 45 241 208 19 232 195 16 184 144 12
37845-218 194 134 211 206 186 42 42 42 2 2 6
37846- 2 2 6 2 2 6 2 2 6 2 2 6
37847- 50 50 50 74 74 74 30 30 30 6 6 6
37848- 0 0 0 0 0 0 0 0 0 0 0 0
37849- 0 0 0 0 0 0 0 0 0 0 0 0
37850- 0 0 0 0 0 0 0 0 0 0 0 0
37851- 0 0 0 0 0 0 0 0 0 0 0 0
37852- 0 0 0 0 0 0 0 0 0 0 0 0
37853- 0 0 0 0 0 0 0 0 0 0 0 0
37854- 0 0 0 0 0 0 0 0 0 0 0 0
37855- 0 0 0 0 0 0 0 0 0 0 0 0
37856- 0 0 0 0 0 0 0 0 0 0 0 0
37857- 0 0 0 0 0 0 0 0 0 0 0 0
37858- 0 0 0 0 0 0 0 0 0 0 0 0
37859- 0 0 0 0 0 0 0 0 0 0 0 0
37860- 0 0 0 0 0 0 0 0 0 10 10 10
37861- 34 34 34 86 86 86 14 14 14 2 2 6
37862-121 87 25 192 133 9 219 162 10 239 182 13
37863-236 186 11 232 195 16 241 208 19 244 214 54
37864-246 218 60 246 218 38 246 215 20 241 208 19
37865-241 208 19 226 184 13 121 87 25 2 2 6
37866- 2 2 6 2 2 6 2 2 6 2 2 6
37867- 50 50 50 82 82 82 34 34 34 10 10 10
37868- 0 0 0 0 0 0 0 0 0 0 0 0
37869- 0 0 0 0 0 0 0 0 0 0 0 0
37870- 0 0 0 0 0 0 0 0 0 0 0 0
37871- 0 0 0 0 0 0 0 0 0 0 0 0
37872- 0 0 0 0 0 0 0 0 0 0 0 0
37873- 0 0 0 0 0 0 0 0 0 0 0 0
37874- 0 0 0 0 0 0 0 0 0 0 0 0
37875- 0 0 0 0 0 0 0 0 0 0 0 0
37876- 0 0 0 0 0 0 0 0 0 0 0 0
37877- 0 0 0 0 0 0 0 0 0 0 0 0
37878- 0 0 0 0 0 0 0 0 0 0 0 0
37879- 0 0 0 0 0 0 0 0 0 0 0 0
37880- 0 0 0 0 0 0 0 0 0 10 10 10
37881- 34 34 34 82 82 82 30 30 30 61 42 6
37882-180 123 7 206 145 10 230 174 11 239 182 13
37883-234 190 10 238 202 15 241 208 19 246 218 74
37884-246 218 38 246 215 20 246 215 20 246 215 20
37885-226 184 13 215 174 15 184 144 12 6 6 6
37886- 2 2 6 2 2 6 2 2 6 2 2 6
37887- 26 26 26 94 94 94 42 42 42 14 14 14
37888- 0 0 0 0 0 0 0 0 0 0 0 0
37889- 0 0 0 0 0 0 0 0 0 0 0 0
37890- 0 0 0 0 0 0 0 0 0 0 0 0
37891- 0 0 0 0 0 0 0 0 0 0 0 0
37892- 0 0 0 0 0 0 0 0 0 0 0 0
37893- 0 0 0 0 0 0 0 0 0 0 0 0
37894- 0 0 0 0 0 0 0 0 0 0 0 0
37895- 0 0 0 0 0 0 0 0 0 0 0 0
37896- 0 0 0 0 0 0 0 0 0 0 0 0
37897- 0 0 0 0 0 0 0 0 0 0 0 0
37898- 0 0 0 0 0 0 0 0 0 0 0 0
37899- 0 0 0 0 0 0 0 0 0 0 0 0
37900- 0 0 0 0 0 0 0 0 0 10 10 10
37901- 30 30 30 78 78 78 50 50 50 104 69 6
37902-192 133 9 216 158 10 236 178 12 236 186 11
37903-232 195 16 241 208 19 244 214 54 245 215 43
37904-246 215 20 246 215 20 241 208 19 198 155 10
37905-200 144 11 216 158 10 156 118 10 2 2 6
37906- 2 2 6 2 2 6 2 2 6 2 2 6
37907- 6 6 6 90 90 90 54 54 54 18 18 18
37908- 6 6 6 0 0 0 0 0 0 0 0 0
37909- 0 0 0 0 0 0 0 0 0 0 0 0
37910- 0 0 0 0 0 0 0 0 0 0 0 0
37911- 0 0 0 0 0 0 0 0 0 0 0 0
37912- 0 0 0 0 0 0 0 0 0 0 0 0
37913- 0 0 0 0 0 0 0 0 0 0 0 0
37914- 0 0 0 0 0 0 0 0 0 0 0 0
37915- 0 0 0 0 0 0 0 0 0 0 0 0
37916- 0 0 0 0 0 0 0 0 0 0 0 0
37917- 0 0 0 0 0 0 0 0 0 0 0 0
37918- 0 0 0 0 0 0 0 0 0 0 0 0
37919- 0 0 0 0 0 0 0 0 0 0 0 0
37920- 0 0 0 0 0 0 0 0 0 10 10 10
37921- 30 30 30 78 78 78 46 46 46 22 22 22
37922-137 92 6 210 162 10 239 182 13 238 190 10
37923-238 202 15 241 208 19 246 215 20 246 215 20
37924-241 208 19 203 166 17 185 133 11 210 150 10
37925-216 158 10 210 150 10 102 78 10 2 2 6
37926- 6 6 6 54 54 54 14 14 14 2 2 6
37927- 2 2 6 62 62 62 74 74 74 30 30 30
37928- 10 10 10 0 0 0 0 0 0 0 0 0
37929- 0 0 0 0 0 0 0 0 0 0 0 0
37930- 0 0 0 0 0 0 0 0 0 0 0 0
37931- 0 0 0 0 0 0 0 0 0 0 0 0
37932- 0 0 0 0 0 0 0 0 0 0 0 0
37933- 0 0 0 0 0 0 0 0 0 0 0 0
37934- 0 0 0 0 0 0 0 0 0 0 0 0
37935- 0 0 0 0 0 0 0 0 0 0 0 0
37936- 0 0 0 0 0 0 0 0 0 0 0 0
37937- 0 0 0 0 0 0 0 0 0 0 0 0
37938- 0 0 0 0 0 0 0 0 0 0 0 0
37939- 0 0 0 0 0 0 0 0 0 0 0 0
37940- 0 0 0 0 0 0 0 0 0 10 10 10
37941- 34 34 34 78 78 78 50 50 50 6 6 6
37942- 94 70 30 139 102 15 190 146 13 226 184 13
37943-232 200 30 232 195 16 215 174 15 190 146 13
37944-168 122 10 192 133 9 210 150 10 213 154 11
37945-202 150 34 182 157 106 101 98 89 2 2 6
37946- 2 2 6 78 78 78 116 116 116 58 58 58
37947- 2 2 6 22 22 22 90 90 90 46 46 46
37948- 18 18 18 6 6 6 0 0 0 0 0 0
37949- 0 0 0 0 0 0 0 0 0 0 0 0
37950- 0 0 0 0 0 0 0 0 0 0 0 0
37951- 0 0 0 0 0 0 0 0 0 0 0 0
37952- 0 0 0 0 0 0 0 0 0 0 0 0
37953- 0 0 0 0 0 0 0 0 0 0 0 0
37954- 0 0 0 0 0 0 0 0 0 0 0 0
37955- 0 0 0 0 0 0 0 0 0 0 0 0
37956- 0 0 0 0 0 0 0 0 0 0 0 0
37957- 0 0 0 0 0 0 0 0 0 0 0 0
37958- 0 0 0 0 0 0 0 0 0 0 0 0
37959- 0 0 0 0 0 0 0 0 0 0 0 0
37960- 0 0 0 0 0 0 0 0 0 10 10 10
37961- 38 38 38 86 86 86 50 50 50 6 6 6
37962-128 128 128 174 154 114 156 107 11 168 122 10
37963-198 155 10 184 144 12 197 138 11 200 144 11
37964-206 145 10 206 145 10 197 138 11 188 164 115
37965-195 195 195 198 198 198 174 174 174 14 14 14
37966- 2 2 6 22 22 22 116 116 116 116 116 116
37967- 22 22 22 2 2 6 74 74 74 70 70 70
37968- 30 30 30 10 10 10 0 0 0 0 0 0
37969- 0 0 0 0 0 0 0 0 0 0 0 0
37970- 0 0 0 0 0 0 0 0 0 0 0 0
37971- 0 0 0 0 0 0 0 0 0 0 0 0
37972- 0 0 0 0 0 0 0 0 0 0 0 0
37973- 0 0 0 0 0 0 0 0 0 0 0 0
37974- 0 0 0 0 0 0 0 0 0 0 0 0
37975- 0 0 0 0 0 0 0 0 0 0 0 0
37976- 0 0 0 0 0 0 0 0 0 0 0 0
37977- 0 0 0 0 0 0 0 0 0 0 0 0
37978- 0 0 0 0 0 0 0 0 0 0 0 0
37979- 0 0 0 0 0 0 0 0 0 0 0 0
37980- 0 0 0 0 0 0 6 6 6 18 18 18
37981- 50 50 50 101 101 101 26 26 26 10 10 10
37982-138 138 138 190 190 190 174 154 114 156 107 11
37983-197 138 11 200 144 11 197 138 11 192 133 9
37984-180 123 7 190 142 34 190 178 144 187 187 187
37985-202 202 202 221 221 221 214 214 214 66 66 66
37986- 2 2 6 2 2 6 50 50 50 62 62 62
37987- 6 6 6 2 2 6 10 10 10 90 90 90
37988- 50 50 50 18 18 18 6 6 6 0 0 0
37989- 0 0 0 0 0 0 0 0 0 0 0 0
37990- 0 0 0 0 0 0 0 0 0 0 0 0
37991- 0 0 0 0 0 0 0 0 0 0 0 0
37992- 0 0 0 0 0 0 0 0 0 0 0 0
37993- 0 0 0 0 0 0 0 0 0 0 0 0
37994- 0 0 0 0 0 0 0 0 0 0 0 0
37995- 0 0 0 0 0 0 0 0 0 0 0 0
37996- 0 0 0 0 0 0 0 0 0 0 0 0
37997- 0 0 0 0 0 0 0 0 0 0 0 0
37998- 0 0 0 0 0 0 0 0 0 0 0 0
37999- 0 0 0 0 0 0 0 0 0 0 0 0
38000- 0 0 0 0 0 0 10 10 10 34 34 34
38001- 74 74 74 74 74 74 2 2 6 6 6 6
38002-144 144 144 198 198 198 190 190 190 178 166 146
38003-154 121 60 156 107 11 156 107 11 168 124 44
38004-174 154 114 187 187 187 190 190 190 210 210 210
38005-246 246 246 253 253 253 253 253 253 182 182 182
38006- 6 6 6 2 2 6 2 2 6 2 2 6
38007- 2 2 6 2 2 6 2 2 6 62 62 62
38008- 74 74 74 34 34 34 14 14 14 0 0 0
38009- 0 0 0 0 0 0 0 0 0 0 0 0
38010- 0 0 0 0 0 0 0 0 0 0 0 0
38011- 0 0 0 0 0 0 0 0 0 0 0 0
38012- 0 0 0 0 0 0 0 0 0 0 0 0
38013- 0 0 0 0 0 0 0 0 0 0 0 0
38014- 0 0 0 0 0 0 0 0 0 0 0 0
38015- 0 0 0 0 0 0 0 0 0 0 0 0
38016- 0 0 0 0 0 0 0 0 0 0 0 0
38017- 0 0 0 0 0 0 0 0 0 0 0 0
38018- 0 0 0 0 0 0 0 0 0 0 0 0
38019- 0 0 0 0 0 0 0 0 0 0 0 0
38020- 0 0 0 10 10 10 22 22 22 54 54 54
38021- 94 94 94 18 18 18 2 2 6 46 46 46
38022-234 234 234 221 221 221 190 190 190 190 190 190
38023-190 190 190 187 187 187 187 187 187 190 190 190
38024-190 190 190 195 195 195 214 214 214 242 242 242
38025-253 253 253 253 253 253 253 253 253 253 253 253
38026- 82 82 82 2 2 6 2 2 6 2 2 6
38027- 2 2 6 2 2 6 2 2 6 14 14 14
38028- 86 86 86 54 54 54 22 22 22 6 6 6
38029- 0 0 0 0 0 0 0 0 0 0 0 0
38030- 0 0 0 0 0 0 0 0 0 0 0 0
38031- 0 0 0 0 0 0 0 0 0 0 0 0
38032- 0 0 0 0 0 0 0 0 0 0 0 0
38033- 0 0 0 0 0 0 0 0 0 0 0 0
38034- 0 0 0 0 0 0 0 0 0 0 0 0
38035- 0 0 0 0 0 0 0 0 0 0 0 0
38036- 0 0 0 0 0 0 0 0 0 0 0 0
38037- 0 0 0 0 0 0 0 0 0 0 0 0
38038- 0 0 0 0 0 0 0 0 0 0 0 0
38039- 0 0 0 0 0 0 0 0 0 0 0 0
38040- 6 6 6 18 18 18 46 46 46 90 90 90
38041- 46 46 46 18 18 18 6 6 6 182 182 182
38042-253 253 253 246 246 246 206 206 206 190 190 190
38043-190 190 190 190 190 190 190 190 190 190 190 190
38044-206 206 206 231 231 231 250 250 250 253 253 253
38045-253 253 253 253 253 253 253 253 253 253 253 253
38046-202 202 202 14 14 14 2 2 6 2 2 6
38047- 2 2 6 2 2 6 2 2 6 2 2 6
38048- 42 42 42 86 86 86 42 42 42 18 18 18
38049- 6 6 6 0 0 0 0 0 0 0 0 0
38050- 0 0 0 0 0 0 0 0 0 0 0 0
38051- 0 0 0 0 0 0 0 0 0 0 0 0
38052- 0 0 0 0 0 0 0 0 0 0 0 0
38053- 0 0 0 0 0 0 0 0 0 0 0 0
38054- 0 0 0 0 0 0 0 0 0 0 0 0
38055- 0 0 0 0 0 0 0 0 0 0 0 0
38056- 0 0 0 0 0 0 0 0 0 0 0 0
38057- 0 0 0 0 0 0 0 0 0 0 0 0
38058- 0 0 0 0 0 0 0 0 0 0 0 0
38059- 0 0 0 0 0 0 0 0 0 6 6 6
38060- 14 14 14 38 38 38 74 74 74 66 66 66
38061- 2 2 6 6 6 6 90 90 90 250 250 250
38062-253 253 253 253 253 253 238 238 238 198 198 198
38063-190 190 190 190 190 190 195 195 195 221 221 221
38064-246 246 246 253 253 253 253 253 253 253 253 253
38065-253 253 253 253 253 253 253 253 253 253 253 253
38066-253 253 253 82 82 82 2 2 6 2 2 6
38067- 2 2 6 2 2 6 2 2 6 2 2 6
38068- 2 2 6 78 78 78 70 70 70 34 34 34
38069- 14 14 14 6 6 6 0 0 0 0 0 0
38070- 0 0 0 0 0 0 0 0 0 0 0 0
38071- 0 0 0 0 0 0 0 0 0 0 0 0
38072- 0 0 0 0 0 0 0 0 0 0 0 0
38073- 0 0 0 0 0 0 0 0 0 0 0 0
38074- 0 0 0 0 0 0 0 0 0 0 0 0
38075- 0 0 0 0 0 0 0 0 0 0 0 0
38076- 0 0 0 0 0 0 0 0 0 0 0 0
38077- 0 0 0 0 0 0 0 0 0 0 0 0
38078- 0 0 0 0 0 0 0 0 0 0 0 0
38079- 0 0 0 0 0 0 0 0 0 14 14 14
38080- 34 34 34 66 66 66 78 78 78 6 6 6
38081- 2 2 6 18 18 18 218 218 218 253 253 253
38082-253 253 253 253 253 253 253 253 253 246 246 246
38083-226 226 226 231 231 231 246 246 246 253 253 253
38084-253 253 253 253 253 253 253 253 253 253 253 253
38085-253 253 253 253 253 253 253 253 253 253 253 253
38086-253 253 253 178 178 178 2 2 6 2 2 6
38087- 2 2 6 2 2 6 2 2 6 2 2 6
38088- 2 2 6 18 18 18 90 90 90 62 62 62
38089- 30 30 30 10 10 10 0 0 0 0 0 0
38090- 0 0 0 0 0 0 0 0 0 0 0 0
38091- 0 0 0 0 0 0 0 0 0 0 0 0
38092- 0 0 0 0 0 0 0 0 0 0 0 0
38093- 0 0 0 0 0 0 0 0 0 0 0 0
38094- 0 0 0 0 0 0 0 0 0 0 0 0
38095- 0 0 0 0 0 0 0 0 0 0 0 0
38096- 0 0 0 0 0 0 0 0 0 0 0 0
38097- 0 0 0 0 0 0 0 0 0 0 0 0
38098- 0 0 0 0 0 0 0 0 0 0 0 0
38099- 0 0 0 0 0 0 10 10 10 26 26 26
38100- 58 58 58 90 90 90 18 18 18 2 2 6
38101- 2 2 6 110 110 110 253 253 253 253 253 253
38102-253 253 253 253 253 253 253 253 253 253 253 253
38103-250 250 250 253 253 253 253 253 253 253 253 253
38104-253 253 253 253 253 253 253 253 253 253 253 253
38105-253 253 253 253 253 253 253 253 253 253 253 253
38106-253 253 253 231 231 231 18 18 18 2 2 6
38107- 2 2 6 2 2 6 2 2 6 2 2 6
38108- 2 2 6 2 2 6 18 18 18 94 94 94
38109- 54 54 54 26 26 26 10 10 10 0 0 0
38110- 0 0 0 0 0 0 0 0 0 0 0 0
38111- 0 0 0 0 0 0 0 0 0 0 0 0
38112- 0 0 0 0 0 0 0 0 0 0 0 0
38113- 0 0 0 0 0 0 0 0 0 0 0 0
38114- 0 0 0 0 0 0 0 0 0 0 0 0
38115- 0 0 0 0 0 0 0 0 0 0 0 0
38116- 0 0 0 0 0 0 0 0 0 0 0 0
38117- 0 0 0 0 0 0 0 0 0 0 0 0
38118- 0 0 0 0 0 0 0 0 0 0 0 0
38119- 0 0 0 6 6 6 22 22 22 50 50 50
38120- 90 90 90 26 26 26 2 2 6 2 2 6
38121- 14 14 14 195 195 195 250 250 250 253 253 253
38122-253 253 253 253 253 253 253 253 253 253 253 253
38123-253 253 253 253 253 253 253 253 253 253 253 253
38124-253 253 253 253 253 253 253 253 253 253 253 253
38125-253 253 253 253 253 253 253 253 253 253 253 253
38126-250 250 250 242 242 242 54 54 54 2 2 6
38127- 2 2 6 2 2 6 2 2 6 2 2 6
38128- 2 2 6 2 2 6 2 2 6 38 38 38
38129- 86 86 86 50 50 50 22 22 22 6 6 6
38130- 0 0 0 0 0 0 0 0 0 0 0 0
38131- 0 0 0 0 0 0 0 0 0 0 0 0
38132- 0 0 0 0 0 0 0 0 0 0 0 0
38133- 0 0 0 0 0 0 0 0 0 0 0 0
38134- 0 0 0 0 0 0 0 0 0 0 0 0
38135- 0 0 0 0 0 0 0 0 0 0 0 0
38136- 0 0 0 0 0 0 0 0 0 0 0 0
38137- 0 0 0 0 0 0 0 0 0 0 0 0
38138- 0 0 0 0 0 0 0 0 0 0 0 0
38139- 6 6 6 14 14 14 38 38 38 82 82 82
38140- 34 34 34 2 2 6 2 2 6 2 2 6
38141- 42 42 42 195 195 195 246 246 246 253 253 253
38142-253 253 253 253 253 253 253 253 253 250 250 250
38143-242 242 242 242 242 242 250 250 250 253 253 253
38144-253 253 253 253 253 253 253 253 253 253 253 253
38145-253 253 253 250 250 250 246 246 246 238 238 238
38146-226 226 226 231 231 231 101 101 101 6 6 6
38147- 2 2 6 2 2 6 2 2 6 2 2 6
38148- 2 2 6 2 2 6 2 2 6 2 2 6
38149- 38 38 38 82 82 82 42 42 42 14 14 14
38150- 6 6 6 0 0 0 0 0 0 0 0 0
38151- 0 0 0 0 0 0 0 0 0 0 0 0
38152- 0 0 0 0 0 0 0 0 0 0 0 0
38153- 0 0 0 0 0 0 0 0 0 0 0 0
38154- 0 0 0 0 0 0 0 0 0 0 0 0
38155- 0 0 0 0 0 0 0 0 0 0 0 0
38156- 0 0 0 0 0 0 0 0 0 0 0 0
38157- 0 0 0 0 0 0 0 0 0 0 0 0
38158- 0 0 0 0 0 0 0 0 0 0 0 0
38159- 10 10 10 26 26 26 62 62 62 66 66 66
38160- 2 2 6 2 2 6 2 2 6 6 6 6
38161- 70 70 70 170 170 170 206 206 206 234 234 234
38162-246 246 246 250 250 250 250 250 250 238 238 238
38163-226 226 226 231 231 231 238 238 238 250 250 250
38164-250 250 250 250 250 250 246 246 246 231 231 231
38165-214 214 214 206 206 206 202 202 202 202 202 202
38166-198 198 198 202 202 202 182 182 182 18 18 18
38167- 2 2 6 2 2 6 2 2 6 2 2 6
38168- 2 2 6 2 2 6 2 2 6 2 2 6
38169- 2 2 6 62 62 62 66 66 66 30 30 30
38170- 10 10 10 0 0 0 0 0 0 0 0 0
38171- 0 0 0 0 0 0 0 0 0 0 0 0
38172- 0 0 0 0 0 0 0 0 0 0 0 0
38173- 0 0 0 0 0 0 0 0 0 0 0 0
38174- 0 0 0 0 0 0 0 0 0 0 0 0
38175- 0 0 0 0 0 0 0 0 0 0 0 0
38176- 0 0 0 0 0 0 0 0 0 0 0 0
38177- 0 0 0 0 0 0 0 0 0 0 0 0
38178- 0 0 0 0 0 0 0 0 0 0 0 0
38179- 14 14 14 42 42 42 82 82 82 18 18 18
38180- 2 2 6 2 2 6 2 2 6 10 10 10
38181- 94 94 94 182 182 182 218 218 218 242 242 242
38182-250 250 250 253 253 253 253 253 253 250 250 250
38183-234 234 234 253 253 253 253 253 253 253 253 253
38184-253 253 253 253 253 253 253 253 253 246 246 246
38185-238 238 238 226 226 226 210 210 210 202 202 202
38186-195 195 195 195 195 195 210 210 210 158 158 158
38187- 6 6 6 14 14 14 50 50 50 14 14 14
38188- 2 2 6 2 2 6 2 2 6 2 2 6
38189- 2 2 6 6 6 6 86 86 86 46 46 46
38190- 18 18 18 6 6 6 0 0 0 0 0 0
38191- 0 0 0 0 0 0 0 0 0 0 0 0
38192- 0 0 0 0 0 0 0 0 0 0 0 0
38193- 0 0 0 0 0 0 0 0 0 0 0 0
38194- 0 0 0 0 0 0 0 0 0 0 0 0
38195- 0 0 0 0 0 0 0 0 0 0 0 0
38196- 0 0 0 0 0 0 0 0 0 0 0 0
38197- 0 0 0 0 0 0 0 0 0 0 0 0
38198- 0 0 0 0 0 0 0 0 0 6 6 6
38199- 22 22 22 54 54 54 70 70 70 2 2 6
38200- 2 2 6 10 10 10 2 2 6 22 22 22
38201-166 166 166 231 231 231 250 250 250 253 253 253
38202-253 253 253 253 253 253 253 253 253 250 250 250
38203-242 242 242 253 253 253 253 253 253 253 253 253
38204-253 253 253 253 253 253 253 253 253 253 253 253
38205-253 253 253 253 253 253 253 253 253 246 246 246
38206-231 231 231 206 206 206 198 198 198 226 226 226
38207- 94 94 94 2 2 6 6 6 6 38 38 38
38208- 30 30 30 2 2 6 2 2 6 2 2 6
38209- 2 2 6 2 2 6 62 62 62 66 66 66
38210- 26 26 26 10 10 10 0 0 0 0 0 0
38211- 0 0 0 0 0 0 0 0 0 0 0 0
38212- 0 0 0 0 0 0 0 0 0 0 0 0
38213- 0 0 0 0 0 0 0 0 0 0 0 0
38214- 0 0 0 0 0 0 0 0 0 0 0 0
38215- 0 0 0 0 0 0 0 0 0 0 0 0
38216- 0 0 0 0 0 0 0 0 0 0 0 0
38217- 0 0 0 0 0 0 0 0 0 0 0 0
38218- 0 0 0 0 0 0 0 0 0 10 10 10
38219- 30 30 30 74 74 74 50 50 50 2 2 6
38220- 26 26 26 26 26 26 2 2 6 106 106 106
38221-238 238 238 253 253 253 253 253 253 253 253 253
38222-253 253 253 253 253 253 253 253 253 253 253 253
38223-253 253 253 253 253 253 253 253 253 253 253 253
38224-253 253 253 253 253 253 253 253 253 253 253 253
38225-253 253 253 253 253 253 253 253 253 253 253 253
38226-253 253 253 246 246 246 218 218 218 202 202 202
38227-210 210 210 14 14 14 2 2 6 2 2 6
38228- 30 30 30 22 22 22 2 2 6 2 2 6
38229- 2 2 6 2 2 6 18 18 18 86 86 86
38230- 42 42 42 14 14 14 0 0 0 0 0 0
38231- 0 0 0 0 0 0 0 0 0 0 0 0
38232- 0 0 0 0 0 0 0 0 0 0 0 0
38233- 0 0 0 0 0 0 0 0 0 0 0 0
38234- 0 0 0 0 0 0 0 0 0 0 0 0
38235- 0 0 0 0 0 0 0 0 0 0 0 0
38236- 0 0 0 0 0 0 0 0 0 0 0 0
38237- 0 0 0 0 0 0 0 0 0 0 0 0
38238- 0 0 0 0 0 0 0 0 0 14 14 14
38239- 42 42 42 90 90 90 22 22 22 2 2 6
38240- 42 42 42 2 2 6 18 18 18 218 218 218
38241-253 253 253 253 253 253 253 253 253 253 253 253
38242-253 253 253 253 253 253 253 253 253 253 253 253
38243-253 253 253 253 253 253 253 253 253 253 253 253
38244-253 253 253 253 253 253 253 253 253 253 253 253
38245-253 253 253 253 253 253 253 253 253 253 253 253
38246-253 253 253 253 253 253 250 250 250 221 221 221
38247-218 218 218 101 101 101 2 2 6 14 14 14
38248- 18 18 18 38 38 38 10 10 10 2 2 6
38249- 2 2 6 2 2 6 2 2 6 78 78 78
38250- 58 58 58 22 22 22 6 6 6 0 0 0
38251- 0 0 0 0 0 0 0 0 0 0 0 0
38252- 0 0 0 0 0 0 0 0 0 0 0 0
38253- 0 0 0 0 0 0 0 0 0 0 0 0
38254- 0 0 0 0 0 0 0 0 0 0 0 0
38255- 0 0 0 0 0 0 0 0 0 0 0 0
38256- 0 0 0 0 0 0 0 0 0 0 0 0
38257- 0 0 0 0 0 0 0 0 0 0 0 0
38258- 0 0 0 0 0 0 6 6 6 18 18 18
38259- 54 54 54 82 82 82 2 2 6 26 26 26
38260- 22 22 22 2 2 6 123 123 123 253 253 253
38261-253 253 253 253 253 253 253 253 253 253 253 253
38262-253 253 253 253 253 253 253 253 253 253 253 253
38263-253 253 253 253 253 253 253 253 253 253 253 253
38264-253 253 253 253 253 253 253 253 253 253 253 253
38265-253 253 253 253 253 253 253 253 253 253 253 253
38266-253 253 253 253 253 253 253 253 253 250 250 250
38267-238 238 238 198 198 198 6 6 6 38 38 38
38268- 58 58 58 26 26 26 38 38 38 2 2 6
38269- 2 2 6 2 2 6 2 2 6 46 46 46
38270- 78 78 78 30 30 30 10 10 10 0 0 0
38271- 0 0 0 0 0 0 0 0 0 0 0 0
38272- 0 0 0 0 0 0 0 0 0 0 0 0
38273- 0 0 0 0 0 0 0 0 0 0 0 0
38274- 0 0 0 0 0 0 0 0 0 0 0 0
38275- 0 0 0 0 0 0 0 0 0 0 0 0
38276- 0 0 0 0 0 0 0 0 0 0 0 0
38277- 0 0 0 0 0 0 0 0 0 0 0 0
38278- 0 0 0 0 0 0 10 10 10 30 30 30
38279- 74 74 74 58 58 58 2 2 6 42 42 42
38280- 2 2 6 22 22 22 231 231 231 253 253 253
38281-253 253 253 253 253 253 253 253 253 253 253 253
38282-253 253 253 253 253 253 253 253 253 250 250 250
38283-253 253 253 253 253 253 253 253 253 253 253 253
38284-253 253 253 253 253 253 253 253 253 253 253 253
38285-253 253 253 253 253 253 253 253 253 253 253 253
38286-253 253 253 253 253 253 253 253 253 253 253 253
38287-253 253 253 246 246 246 46 46 46 38 38 38
38288- 42 42 42 14 14 14 38 38 38 14 14 14
38289- 2 2 6 2 2 6 2 2 6 6 6 6
38290- 86 86 86 46 46 46 14 14 14 0 0 0
38291- 0 0 0 0 0 0 0 0 0 0 0 0
38292- 0 0 0 0 0 0 0 0 0 0 0 0
38293- 0 0 0 0 0 0 0 0 0 0 0 0
38294- 0 0 0 0 0 0 0 0 0 0 0 0
38295- 0 0 0 0 0 0 0 0 0 0 0 0
38296- 0 0 0 0 0 0 0 0 0 0 0 0
38297- 0 0 0 0 0 0 0 0 0 0 0 0
38298- 0 0 0 6 6 6 14 14 14 42 42 42
38299- 90 90 90 18 18 18 18 18 18 26 26 26
38300- 2 2 6 116 116 116 253 253 253 253 253 253
38301-253 253 253 253 253 253 253 253 253 253 253 253
38302-253 253 253 253 253 253 250 250 250 238 238 238
38303-253 253 253 253 253 253 253 253 253 253 253 253
38304-253 253 253 253 253 253 253 253 253 253 253 253
38305-253 253 253 253 253 253 253 253 253 253 253 253
38306-253 253 253 253 253 253 253 253 253 253 253 253
38307-253 253 253 253 253 253 94 94 94 6 6 6
38308- 2 2 6 2 2 6 10 10 10 34 34 34
38309- 2 2 6 2 2 6 2 2 6 2 2 6
38310- 74 74 74 58 58 58 22 22 22 6 6 6
38311- 0 0 0 0 0 0 0 0 0 0 0 0
38312- 0 0 0 0 0 0 0 0 0 0 0 0
38313- 0 0 0 0 0 0 0 0 0 0 0 0
38314- 0 0 0 0 0 0 0 0 0 0 0 0
38315- 0 0 0 0 0 0 0 0 0 0 0 0
38316- 0 0 0 0 0 0 0 0 0 0 0 0
38317- 0 0 0 0 0 0 0 0 0 0 0 0
38318- 0 0 0 10 10 10 26 26 26 66 66 66
38319- 82 82 82 2 2 6 38 38 38 6 6 6
38320- 14 14 14 210 210 210 253 253 253 253 253 253
38321-253 253 253 253 253 253 253 253 253 253 253 253
38322-253 253 253 253 253 253 246 246 246 242 242 242
38323-253 253 253 253 253 253 253 253 253 253 253 253
38324-253 253 253 253 253 253 253 253 253 253 253 253
38325-253 253 253 253 253 253 253 253 253 253 253 253
38326-253 253 253 253 253 253 253 253 253 253 253 253
38327-253 253 253 253 253 253 144 144 144 2 2 6
38328- 2 2 6 2 2 6 2 2 6 46 46 46
38329- 2 2 6 2 2 6 2 2 6 2 2 6
38330- 42 42 42 74 74 74 30 30 30 10 10 10
38331- 0 0 0 0 0 0 0 0 0 0 0 0
38332- 0 0 0 0 0 0 0 0 0 0 0 0
38333- 0 0 0 0 0 0 0 0 0 0 0 0
38334- 0 0 0 0 0 0 0 0 0 0 0 0
38335- 0 0 0 0 0 0 0 0 0 0 0 0
38336- 0 0 0 0 0 0 0 0 0 0 0 0
38337- 0 0 0 0 0 0 0 0 0 0 0 0
38338- 6 6 6 14 14 14 42 42 42 90 90 90
38339- 26 26 26 6 6 6 42 42 42 2 2 6
38340- 74 74 74 250 250 250 253 253 253 253 253 253
38341-253 253 253 253 253 253 253 253 253 253 253 253
38342-253 253 253 253 253 253 242 242 242 242 242 242
38343-253 253 253 253 253 253 253 253 253 253 253 253
38344-253 253 253 253 253 253 253 253 253 253 253 253
38345-253 253 253 253 253 253 253 253 253 253 253 253
38346-253 253 253 253 253 253 253 253 253 253 253 253
38347-253 253 253 253 253 253 182 182 182 2 2 6
38348- 2 2 6 2 2 6 2 2 6 46 46 46
38349- 2 2 6 2 2 6 2 2 6 2 2 6
38350- 10 10 10 86 86 86 38 38 38 10 10 10
38351- 0 0 0 0 0 0 0 0 0 0 0 0
38352- 0 0 0 0 0 0 0 0 0 0 0 0
38353- 0 0 0 0 0 0 0 0 0 0 0 0
38354- 0 0 0 0 0 0 0 0 0 0 0 0
38355- 0 0 0 0 0 0 0 0 0 0 0 0
38356- 0 0 0 0 0 0 0 0 0 0 0 0
38357- 0 0 0 0 0 0 0 0 0 0 0 0
38358- 10 10 10 26 26 26 66 66 66 82 82 82
38359- 2 2 6 22 22 22 18 18 18 2 2 6
38360-149 149 149 253 253 253 253 253 253 253 253 253
38361-253 253 253 253 253 253 253 253 253 253 253 253
38362-253 253 253 253 253 253 234 234 234 242 242 242
38363-253 253 253 253 253 253 253 253 253 253 253 253
38364-253 253 253 253 253 253 253 253 253 253 253 253
38365-253 253 253 253 253 253 253 253 253 253 253 253
38366-253 253 253 253 253 253 253 253 253 253 253 253
38367-253 253 253 253 253 253 206 206 206 2 2 6
38368- 2 2 6 2 2 6 2 2 6 38 38 38
38369- 2 2 6 2 2 6 2 2 6 2 2 6
38370- 6 6 6 86 86 86 46 46 46 14 14 14
38371- 0 0 0 0 0 0 0 0 0 0 0 0
38372- 0 0 0 0 0 0 0 0 0 0 0 0
38373- 0 0 0 0 0 0 0 0 0 0 0 0
38374- 0 0 0 0 0 0 0 0 0 0 0 0
38375- 0 0 0 0 0 0 0 0 0 0 0 0
38376- 0 0 0 0 0 0 0 0 0 0 0 0
38377- 0 0 0 0 0 0 0 0 0 6 6 6
38378- 18 18 18 46 46 46 86 86 86 18 18 18
38379- 2 2 6 34 34 34 10 10 10 6 6 6
38380-210 210 210 253 253 253 253 253 253 253 253 253
38381-253 253 253 253 253 253 253 253 253 253 253 253
38382-253 253 253 253 253 253 234 234 234 242 242 242
38383-253 253 253 253 253 253 253 253 253 253 253 253
38384-253 253 253 253 253 253 253 253 253 253 253 253
38385-253 253 253 253 253 253 253 253 253 253 253 253
38386-253 253 253 253 253 253 253 253 253 253 253 253
38387-253 253 253 253 253 253 221 221 221 6 6 6
38388- 2 2 6 2 2 6 6 6 6 30 30 30
38389- 2 2 6 2 2 6 2 2 6 2 2 6
38390- 2 2 6 82 82 82 54 54 54 18 18 18
38391- 6 6 6 0 0 0 0 0 0 0 0 0
38392- 0 0 0 0 0 0 0 0 0 0 0 0
38393- 0 0 0 0 0 0 0 0 0 0 0 0
38394- 0 0 0 0 0 0 0 0 0 0 0 0
38395- 0 0 0 0 0 0 0 0 0 0 0 0
38396- 0 0 0 0 0 0 0 0 0 0 0 0
38397- 0 0 0 0 0 0 0 0 0 10 10 10
38398- 26 26 26 66 66 66 62 62 62 2 2 6
38399- 2 2 6 38 38 38 10 10 10 26 26 26
38400-238 238 238 253 253 253 253 253 253 253 253 253
38401-253 253 253 253 253 253 253 253 253 253 253 253
38402-253 253 253 253 253 253 231 231 231 238 238 238
38403-253 253 253 253 253 253 253 253 253 253 253 253
38404-253 253 253 253 253 253 253 253 253 253 253 253
38405-253 253 253 253 253 253 253 253 253 253 253 253
38406-253 253 253 253 253 253 253 253 253 253 253 253
38407-253 253 253 253 253 253 231 231 231 6 6 6
38408- 2 2 6 2 2 6 10 10 10 30 30 30
38409- 2 2 6 2 2 6 2 2 6 2 2 6
38410- 2 2 6 66 66 66 58 58 58 22 22 22
38411- 6 6 6 0 0 0 0 0 0 0 0 0
38412- 0 0 0 0 0 0 0 0 0 0 0 0
38413- 0 0 0 0 0 0 0 0 0 0 0 0
38414- 0 0 0 0 0 0 0 0 0 0 0 0
38415- 0 0 0 0 0 0 0 0 0 0 0 0
38416- 0 0 0 0 0 0 0 0 0 0 0 0
38417- 0 0 0 0 0 0 0 0 0 10 10 10
38418- 38 38 38 78 78 78 6 6 6 2 2 6
38419- 2 2 6 46 46 46 14 14 14 42 42 42
38420-246 246 246 253 253 253 253 253 253 253 253 253
38421-253 253 253 253 253 253 253 253 253 253 253 253
38422-253 253 253 253 253 253 231 231 231 242 242 242
38423-253 253 253 253 253 253 253 253 253 253 253 253
38424-253 253 253 253 253 253 253 253 253 253 253 253
38425-253 253 253 253 253 253 253 253 253 253 253 253
38426-253 253 253 253 253 253 253 253 253 253 253 253
38427-253 253 253 253 253 253 234 234 234 10 10 10
38428- 2 2 6 2 2 6 22 22 22 14 14 14
38429- 2 2 6 2 2 6 2 2 6 2 2 6
38430- 2 2 6 66 66 66 62 62 62 22 22 22
38431- 6 6 6 0 0 0 0 0 0 0 0 0
38432- 0 0 0 0 0 0 0 0 0 0 0 0
38433- 0 0 0 0 0 0 0 0 0 0 0 0
38434- 0 0 0 0 0 0 0 0 0 0 0 0
38435- 0 0 0 0 0 0 0 0 0 0 0 0
38436- 0 0 0 0 0 0 0 0 0 0 0 0
38437- 0 0 0 0 0 0 6 6 6 18 18 18
38438- 50 50 50 74 74 74 2 2 6 2 2 6
38439- 14 14 14 70 70 70 34 34 34 62 62 62
38440-250 250 250 253 253 253 253 253 253 253 253 253
38441-253 253 253 253 253 253 253 253 253 253 253 253
38442-253 253 253 253 253 253 231 231 231 246 246 246
38443-253 253 253 253 253 253 253 253 253 253 253 253
38444-253 253 253 253 253 253 253 253 253 253 253 253
38445-253 253 253 253 253 253 253 253 253 253 253 253
38446-253 253 253 253 253 253 253 253 253 253 253 253
38447-253 253 253 253 253 253 234 234 234 14 14 14
38448- 2 2 6 2 2 6 30 30 30 2 2 6
38449- 2 2 6 2 2 6 2 2 6 2 2 6
38450- 2 2 6 66 66 66 62 62 62 22 22 22
38451- 6 6 6 0 0 0 0 0 0 0 0 0
38452- 0 0 0 0 0 0 0 0 0 0 0 0
38453- 0 0 0 0 0 0 0 0 0 0 0 0
38454- 0 0 0 0 0 0 0 0 0 0 0 0
38455- 0 0 0 0 0 0 0 0 0 0 0 0
38456- 0 0 0 0 0 0 0 0 0 0 0 0
38457- 0 0 0 0 0 0 6 6 6 18 18 18
38458- 54 54 54 62 62 62 2 2 6 2 2 6
38459- 2 2 6 30 30 30 46 46 46 70 70 70
38460-250 250 250 253 253 253 253 253 253 253 253 253
38461-253 253 253 253 253 253 253 253 253 253 253 253
38462-253 253 253 253 253 253 231 231 231 246 246 246
38463-253 253 253 253 253 253 253 253 253 253 253 253
38464-253 253 253 253 253 253 253 253 253 253 253 253
38465-253 253 253 253 253 253 253 253 253 253 253 253
38466-253 253 253 253 253 253 253 253 253 253 253 253
38467-253 253 253 253 253 253 226 226 226 10 10 10
38468- 2 2 6 6 6 6 30 30 30 2 2 6
38469- 2 2 6 2 2 6 2 2 6 2 2 6
38470- 2 2 6 66 66 66 58 58 58 22 22 22
38471- 6 6 6 0 0 0 0 0 0 0 0 0
38472- 0 0 0 0 0 0 0 0 0 0 0 0
38473- 0 0 0 0 0 0 0 0 0 0 0 0
38474- 0 0 0 0 0 0 0 0 0 0 0 0
38475- 0 0 0 0 0 0 0 0 0 0 0 0
38476- 0 0 0 0 0 0 0 0 0 0 0 0
38477- 0 0 0 0 0 0 6 6 6 22 22 22
38478- 58 58 58 62 62 62 2 2 6 2 2 6
38479- 2 2 6 2 2 6 30 30 30 78 78 78
38480-250 250 250 253 253 253 253 253 253 253 253 253
38481-253 253 253 253 253 253 253 253 253 253 253 253
38482-253 253 253 253 253 253 231 231 231 246 246 246
38483-253 253 253 253 253 253 253 253 253 253 253 253
38484-253 253 253 253 253 253 253 253 253 253 253 253
38485-253 253 253 253 253 253 253 253 253 253 253 253
38486-253 253 253 253 253 253 253 253 253 253 253 253
38487-253 253 253 253 253 253 206 206 206 2 2 6
38488- 22 22 22 34 34 34 18 14 6 22 22 22
38489- 26 26 26 18 18 18 6 6 6 2 2 6
38490- 2 2 6 82 82 82 54 54 54 18 18 18
38491- 6 6 6 0 0 0 0 0 0 0 0 0
38492- 0 0 0 0 0 0 0 0 0 0 0 0
38493- 0 0 0 0 0 0 0 0 0 0 0 0
38494- 0 0 0 0 0 0 0 0 0 0 0 0
38495- 0 0 0 0 0 0 0 0 0 0 0 0
38496- 0 0 0 0 0 0 0 0 0 0 0 0
38497- 0 0 0 0 0 0 6 6 6 26 26 26
38498- 62 62 62 106 106 106 74 54 14 185 133 11
38499-210 162 10 121 92 8 6 6 6 62 62 62
38500-238 238 238 253 253 253 253 253 253 253 253 253
38501-253 253 253 253 253 253 253 253 253 253 253 253
38502-253 253 253 253 253 253 231 231 231 246 246 246
38503-253 253 253 253 253 253 253 253 253 253 253 253
38504-253 253 253 253 253 253 253 253 253 253 253 253
38505-253 253 253 253 253 253 253 253 253 253 253 253
38506-253 253 253 253 253 253 253 253 253 253 253 253
38507-253 253 253 253 253 253 158 158 158 18 18 18
38508- 14 14 14 2 2 6 2 2 6 2 2 6
38509- 6 6 6 18 18 18 66 66 66 38 38 38
38510- 6 6 6 94 94 94 50 50 50 18 18 18
38511- 6 6 6 0 0 0 0 0 0 0 0 0
38512- 0 0 0 0 0 0 0 0 0 0 0 0
38513- 0 0 0 0 0 0 0 0 0 0 0 0
38514- 0 0 0 0 0 0 0 0 0 0 0 0
38515- 0 0 0 0 0 0 0 0 0 0 0 0
38516- 0 0 0 0 0 0 0 0 0 6 6 6
38517- 10 10 10 10 10 10 18 18 18 38 38 38
38518- 78 78 78 142 134 106 216 158 10 242 186 14
38519-246 190 14 246 190 14 156 118 10 10 10 10
38520- 90 90 90 238 238 238 253 253 253 253 253 253
38521-253 253 253 253 253 253 253 253 253 253 253 253
38522-253 253 253 253 253 253 231 231 231 250 250 250
38523-253 253 253 253 253 253 253 253 253 253 253 253
38524-253 253 253 253 253 253 253 253 253 253 253 253
38525-253 253 253 253 253 253 253 253 253 253 253 253
38526-253 253 253 253 253 253 253 253 253 246 230 190
38527-238 204 91 238 204 91 181 142 44 37 26 9
38528- 2 2 6 2 2 6 2 2 6 2 2 6
38529- 2 2 6 2 2 6 38 38 38 46 46 46
38530- 26 26 26 106 106 106 54 54 54 18 18 18
38531- 6 6 6 0 0 0 0 0 0 0 0 0
38532- 0 0 0 0 0 0 0 0 0 0 0 0
38533- 0 0 0 0 0 0 0 0 0 0 0 0
38534- 0 0 0 0 0 0 0 0 0 0 0 0
38535- 0 0 0 0 0 0 0 0 0 0 0 0
38536- 0 0 0 6 6 6 14 14 14 22 22 22
38537- 30 30 30 38 38 38 50 50 50 70 70 70
38538-106 106 106 190 142 34 226 170 11 242 186 14
38539-246 190 14 246 190 14 246 190 14 154 114 10
38540- 6 6 6 74 74 74 226 226 226 253 253 253
38541-253 253 253 253 253 253 253 253 253 253 253 253
38542-253 253 253 253 253 253 231 231 231 250 250 250
38543-253 253 253 253 253 253 253 253 253 253 253 253
38544-253 253 253 253 253 253 253 253 253 253 253 253
38545-253 253 253 253 253 253 253 253 253 253 253 253
38546-253 253 253 253 253 253 253 253 253 228 184 62
38547-241 196 14 241 208 19 232 195 16 38 30 10
38548- 2 2 6 2 2 6 2 2 6 2 2 6
38549- 2 2 6 6 6 6 30 30 30 26 26 26
38550-203 166 17 154 142 90 66 66 66 26 26 26
38551- 6 6 6 0 0 0 0 0 0 0 0 0
38552- 0 0 0 0 0 0 0 0 0 0 0 0
38553- 0 0 0 0 0 0 0 0 0 0 0 0
38554- 0 0 0 0 0 0 0 0 0 0 0 0
38555- 0 0 0 0 0 0 0 0 0 0 0 0
38556- 6 6 6 18 18 18 38 38 38 58 58 58
38557- 78 78 78 86 86 86 101 101 101 123 123 123
38558-175 146 61 210 150 10 234 174 13 246 186 14
38559-246 190 14 246 190 14 246 190 14 238 190 10
38560-102 78 10 2 2 6 46 46 46 198 198 198
38561-253 253 253 253 253 253 253 253 253 253 253 253
38562-253 253 253 253 253 253 234 234 234 242 242 242
38563-253 253 253 253 253 253 253 253 253 253 253 253
38564-253 253 253 253 253 253 253 253 253 253 253 253
38565-253 253 253 253 253 253 253 253 253 253 253 253
38566-253 253 253 253 253 253 253 253 253 224 178 62
38567-242 186 14 241 196 14 210 166 10 22 18 6
38568- 2 2 6 2 2 6 2 2 6 2 2 6
38569- 2 2 6 2 2 6 6 6 6 121 92 8
38570-238 202 15 232 195 16 82 82 82 34 34 34
38571- 10 10 10 0 0 0 0 0 0 0 0 0
38572- 0 0 0 0 0 0 0 0 0 0 0 0
38573- 0 0 0 0 0 0 0 0 0 0 0 0
38574- 0 0 0 0 0 0 0 0 0 0 0 0
38575- 0 0 0 0 0 0 0 0 0 0 0 0
38576- 14 14 14 38 38 38 70 70 70 154 122 46
38577-190 142 34 200 144 11 197 138 11 197 138 11
38578-213 154 11 226 170 11 242 186 14 246 190 14
38579-246 190 14 246 190 14 246 190 14 246 190 14
38580-225 175 15 46 32 6 2 2 6 22 22 22
38581-158 158 158 250 250 250 253 253 253 253 253 253
38582-253 253 253 253 253 253 253 253 253 253 253 253
38583-253 253 253 253 253 253 253 253 253 253 253 253
38584-253 253 253 253 253 253 253 253 253 253 253 253
38585-253 253 253 253 253 253 253 253 253 253 253 253
38586-253 253 253 250 250 250 242 242 242 224 178 62
38587-239 182 13 236 186 11 213 154 11 46 32 6
38588- 2 2 6 2 2 6 2 2 6 2 2 6
38589- 2 2 6 2 2 6 61 42 6 225 175 15
38590-238 190 10 236 186 11 112 100 78 42 42 42
38591- 14 14 14 0 0 0 0 0 0 0 0 0
38592- 0 0 0 0 0 0 0 0 0 0 0 0
38593- 0 0 0 0 0 0 0 0 0 0 0 0
38594- 0 0 0 0 0 0 0 0 0 0 0 0
38595- 0 0 0 0 0 0 0 0 0 6 6 6
38596- 22 22 22 54 54 54 154 122 46 213 154 11
38597-226 170 11 230 174 11 226 170 11 226 170 11
38598-236 178 12 242 186 14 246 190 14 246 190 14
38599-246 190 14 246 190 14 246 190 14 246 190 14
38600-241 196 14 184 144 12 10 10 10 2 2 6
38601- 6 6 6 116 116 116 242 242 242 253 253 253
38602-253 253 253 253 253 253 253 253 253 253 253 253
38603-253 253 253 253 253 253 253 253 253 253 253 253
38604-253 253 253 253 253 253 253 253 253 253 253 253
38605-253 253 253 253 253 253 253 253 253 253 253 253
38606-253 253 253 231 231 231 198 198 198 214 170 54
38607-236 178 12 236 178 12 210 150 10 137 92 6
38608- 18 14 6 2 2 6 2 2 6 2 2 6
38609- 6 6 6 70 47 6 200 144 11 236 178 12
38610-239 182 13 239 182 13 124 112 88 58 58 58
38611- 22 22 22 6 6 6 0 0 0 0 0 0
38612- 0 0 0 0 0 0 0 0 0 0 0 0
38613- 0 0 0 0 0 0 0 0 0 0 0 0
38614- 0 0 0 0 0 0 0 0 0 0 0 0
38615- 0 0 0 0 0 0 0 0 0 10 10 10
38616- 30 30 30 70 70 70 180 133 36 226 170 11
38617-239 182 13 242 186 14 242 186 14 246 186 14
38618-246 190 14 246 190 14 246 190 14 246 190 14
38619-246 190 14 246 190 14 246 190 14 246 190 14
38620-246 190 14 232 195 16 98 70 6 2 2 6
38621- 2 2 6 2 2 6 66 66 66 221 221 221
38622-253 253 253 253 253 253 253 253 253 253 253 253
38623-253 253 253 253 253 253 253 253 253 253 253 253
38624-253 253 253 253 253 253 253 253 253 253 253 253
38625-253 253 253 253 253 253 253 253 253 253 253 253
38626-253 253 253 206 206 206 198 198 198 214 166 58
38627-230 174 11 230 174 11 216 158 10 192 133 9
38628-163 110 8 116 81 8 102 78 10 116 81 8
38629-167 114 7 197 138 11 226 170 11 239 182 13
38630-242 186 14 242 186 14 162 146 94 78 78 78
38631- 34 34 34 14 14 14 6 6 6 0 0 0
38632- 0 0 0 0 0 0 0 0 0 0 0 0
38633- 0 0 0 0 0 0 0 0 0 0 0 0
38634- 0 0 0 0 0 0 0 0 0 0 0 0
38635- 0 0 0 0 0 0 0 0 0 6 6 6
38636- 30 30 30 78 78 78 190 142 34 226 170 11
38637-239 182 13 246 190 14 246 190 14 246 190 14
38638-246 190 14 246 190 14 246 190 14 246 190 14
38639-246 190 14 246 190 14 246 190 14 246 190 14
38640-246 190 14 241 196 14 203 166 17 22 18 6
38641- 2 2 6 2 2 6 2 2 6 38 38 38
38642-218 218 218 253 253 253 253 253 253 253 253 253
38643-253 253 253 253 253 253 253 253 253 253 253 253
38644-253 253 253 253 253 253 253 253 253 253 253 253
38645-253 253 253 253 253 253 253 253 253 253 253 253
38646-250 250 250 206 206 206 198 198 198 202 162 69
38647-226 170 11 236 178 12 224 166 10 210 150 10
38648-200 144 11 197 138 11 192 133 9 197 138 11
38649-210 150 10 226 170 11 242 186 14 246 190 14
38650-246 190 14 246 186 14 225 175 15 124 112 88
38651- 62 62 62 30 30 30 14 14 14 6 6 6
38652- 0 0 0 0 0 0 0 0 0 0 0 0
38653- 0 0 0 0 0 0 0 0 0 0 0 0
38654- 0 0 0 0 0 0 0 0 0 0 0 0
38655- 0 0 0 0 0 0 0 0 0 10 10 10
38656- 30 30 30 78 78 78 174 135 50 224 166 10
38657-239 182 13 246 190 14 246 190 14 246 190 14
38658-246 190 14 246 190 14 246 190 14 246 190 14
38659-246 190 14 246 190 14 246 190 14 246 190 14
38660-246 190 14 246 190 14 241 196 14 139 102 15
38661- 2 2 6 2 2 6 2 2 6 2 2 6
38662- 78 78 78 250 250 250 253 253 253 253 253 253
38663-253 253 253 253 253 253 253 253 253 253 253 253
38664-253 253 253 253 253 253 253 253 253 253 253 253
38665-253 253 253 253 253 253 253 253 253 253 253 253
38666-250 250 250 214 214 214 198 198 198 190 150 46
38667-219 162 10 236 178 12 234 174 13 224 166 10
38668-216 158 10 213 154 11 213 154 11 216 158 10
38669-226 170 11 239 182 13 246 190 14 246 190 14
38670-246 190 14 246 190 14 242 186 14 206 162 42
38671-101 101 101 58 58 58 30 30 30 14 14 14
38672- 6 6 6 0 0 0 0 0 0 0 0 0
38673- 0 0 0 0 0 0 0 0 0 0 0 0
38674- 0 0 0 0 0 0 0 0 0 0 0 0
38675- 0 0 0 0 0 0 0 0 0 10 10 10
38676- 30 30 30 74 74 74 174 135 50 216 158 10
38677-236 178 12 246 190 14 246 190 14 246 190 14
38678-246 190 14 246 190 14 246 190 14 246 190 14
38679-246 190 14 246 190 14 246 190 14 246 190 14
38680-246 190 14 246 190 14 241 196 14 226 184 13
38681- 61 42 6 2 2 6 2 2 6 2 2 6
38682- 22 22 22 238 238 238 253 253 253 253 253 253
38683-253 253 253 253 253 253 253 253 253 253 253 253
38684-253 253 253 253 253 253 253 253 253 253 253 253
38685-253 253 253 253 253 253 253 253 253 253 253 253
38686-253 253 253 226 226 226 187 187 187 180 133 36
38687-216 158 10 236 178 12 239 182 13 236 178 12
38688-230 174 11 226 170 11 226 170 11 230 174 11
38689-236 178 12 242 186 14 246 190 14 246 190 14
38690-246 190 14 246 190 14 246 186 14 239 182 13
38691-206 162 42 106 106 106 66 66 66 34 34 34
38692- 14 14 14 6 6 6 0 0 0 0 0 0
38693- 0 0 0 0 0 0 0 0 0 0 0 0
38694- 0 0 0 0 0 0 0 0 0 0 0 0
38695- 0 0 0 0 0 0 0 0 0 6 6 6
38696- 26 26 26 70 70 70 163 133 67 213 154 11
38697-236 178 12 246 190 14 246 190 14 246 190 14
38698-246 190 14 246 190 14 246 190 14 246 190 14
38699-246 190 14 246 190 14 246 190 14 246 190 14
38700-246 190 14 246 190 14 246 190 14 241 196 14
38701-190 146 13 18 14 6 2 2 6 2 2 6
38702- 46 46 46 246 246 246 253 253 253 253 253 253
38703-253 253 253 253 253 253 253 253 253 253 253 253
38704-253 253 253 253 253 253 253 253 253 253 253 253
38705-253 253 253 253 253 253 253 253 253 253 253 253
38706-253 253 253 221 221 221 86 86 86 156 107 11
38707-216 158 10 236 178 12 242 186 14 246 186 14
38708-242 186 14 239 182 13 239 182 13 242 186 14
38709-242 186 14 246 186 14 246 190 14 246 190 14
38710-246 190 14 246 190 14 246 190 14 246 190 14
38711-242 186 14 225 175 15 142 122 72 66 66 66
38712- 30 30 30 10 10 10 0 0 0 0 0 0
38713- 0 0 0 0 0 0 0 0 0 0 0 0
38714- 0 0 0 0 0 0 0 0 0 0 0 0
38715- 0 0 0 0 0 0 0 0 0 6 6 6
38716- 26 26 26 70 70 70 163 133 67 210 150 10
38717-236 178 12 246 190 14 246 190 14 246 190 14
38718-246 190 14 246 190 14 246 190 14 246 190 14
38719-246 190 14 246 190 14 246 190 14 246 190 14
38720-246 190 14 246 190 14 246 190 14 246 190 14
38721-232 195 16 121 92 8 34 34 34 106 106 106
38722-221 221 221 253 253 253 253 253 253 253 253 253
38723-253 253 253 253 253 253 253 253 253 253 253 253
38724-253 253 253 253 253 253 253 253 253 253 253 253
38725-253 253 253 253 253 253 253 253 253 253 253 253
38726-242 242 242 82 82 82 18 14 6 163 110 8
38727-216 158 10 236 178 12 242 186 14 246 190 14
38728-246 190 14 246 190 14 246 190 14 246 190 14
38729-246 190 14 246 190 14 246 190 14 246 190 14
38730-246 190 14 246 190 14 246 190 14 246 190 14
38731-246 190 14 246 190 14 242 186 14 163 133 67
38732- 46 46 46 18 18 18 6 6 6 0 0 0
38733- 0 0 0 0 0 0 0 0 0 0 0 0
38734- 0 0 0 0 0 0 0 0 0 0 0 0
38735- 0 0 0 0 0 0 0 0 0 10 10 10
38736- 30 30 30 78 78 78 163 133 67 210 150 10
38737-236 178 12 246 186 14 246 190 14 246 190 14
38738-246 190 14 246 190 14 246 190 14 246 190 14
38739-246 190 14 246 190 14 246 190 14 246 190 14
38740-246 190 14 246 190 14 246 190 14 246 190 14
38741-241 196 14 215 174 15 190 178 144 253 253 253
38742-253 253 253 253 253 253 253 253 253 253 253 253
38743-253 253 253 253 253 253 253 253 253 253 253 253
38744-253 253 253 253 253 253 253 253 253 253 253 253
38745-253 253 253 253 253 253 253 253 253 218 218 218
38746- 58 58 58 2 2 6 22 18 6 167 114 7
38747-216 158 10 236 178 12 246 186 14 246 190 14
38748-246 190 14 246 190 14 246 190 14 246 190 14
38749-246 190 14 246 190 14 246 190 14 246 190 14
38750-246 190 14 246 190 14 246 190 14 246 190 14
38751-246 190 14 246 186 14 242 186 14 190 150 46
38752- 54 54 54 22 22 22 6 6 6 0 0 0
38753- 0 0 0 0 0 0 0 0 0 0 0 0
38754- 0 0 0 0 0 0 0 0 0 0 0 0
38755- 0 0 0 0 0 0 0 0 0 14 14 14
38756- 38 38 38 86 86 86 180 133 36 213 154 11
38757-236 178 12 246 186 14 246 190 14 246 190 14
38758-246 190 14 246 190 14 246 190 14 246 190 14
38759-246 190 14 246 190 14 246 190 14 246 190 14
38760-246 190 14 246 190 14 246 190 14 246 190 14
38761-246 190 14 232 195 16 190 146 13 214 214 214
38762-253 253 253 253 253 253 253 253 253 253 253 253
38763-253 253 253 253 253 253 253 253 253 253 253 253
38764-253 253 253 253 253 253 253 253 253 253 253 253
38765-253 253 253 250 250 250 170 170 170 26 26 26
38766- 2 2 6 2 2 6 37 26 9 163 110 8
38767-219 162 10 239 182 13 246 186 14 246 190 14
38768-246 190 14 246 190 14 246 190 14 246 190 14
38769-246 190 14 246 190 14 246 190 14 246 190 14
38770-246 190 14 246 190 14 246 190 14 246 190 14
38771-246 186 14 236 178 12 224 166 10 142 122 72
38772- 46 46 46 18 18 18 6 6 6 0 0 0
38773- 0 0 0 0 0 0 0 0 0 0 0 0
38774- 0 0 0 0 0 0 0 0 0 0 0 0
38775- 0 0 0 0 0 0 6 6 6 18 18 18
38776- 50 50 50 109 106 95 192 133 9 224 166 10
38777-242 186 14 246 190 14 246 190 14 246 190 14
38778-246 190 14 246 190 14 246 190 14 246 190 14
38779-246 190 14 246 190 14 246 190 14 246 190 14
38780-246 190 14 246 190 14 246 190 14 246 190 14
38781-242 186 14 226 184 13 210 162 10 142 110 46
38782-226 226 226 253 253 253 253 253 253 253 253 253
38783-253 253 253 253 253 253 253 253 253 253 253 253
38784-253 253 253 253 253 253 253 253 253 253 253 253
38785-198 198 198 66 66 66 2 2 6 2 2 6
38786- 2 2 6 2 2 6 50 34 6 156 107 11
38787-219 162 10 239 182 13 246 186 14 246 190 14
38788-246 190 14 246 190 14 246 190 14 246 190 14
38789-246 190 14 246 190 14 246 190 14 246 190 14
38790-246 190 14 246 190 14 246 190 14 242 186 14
38791-234 174 13 213 154 11 154 122 46 66 66 66
38792- 30 30 30 10 10 10 0 0 0 0 0 0
38793- 0 0 0 0 0 0 0 0 0 0 0 0
38794- 0 0 0 0 0 0 0 0 0 0 0 0
38795- 0 0 0 0 0 0 6 6 6 22 22 22
38796- 58 58 58 154 121 60 206 145 10 234 174 13
38797-242 186 14 246 186 14 246 190 14 246 190 14
38798-246 190 14 246 190 14 246 190 14 246 190 14
38799-246 190 14 246 190 14 246 190 14 246 190 14
38800-246 190 14 246 190 14 246 190 14 246 190 14
38801-246 186 14 236 178 12 210 162 10 163 110 8
38802- 61 42 6 138 138 138 218 218 218 250 250 250
38803-253 253 253 253 253 253 253 253 253 250 250 250
38804-242 242 242 210 210 210 144 144 144 66 66 66
38805- 6 6 6 2 2 6 2 2 6 2 2 6
38806- 2 2 6 2 2 6 61 42 6 163 110 8
38807-216 158 10 236 178 12 246 190 14 246 190 14
38808-246 190 14 246 190 14 246 190 14 246 190 14
38809-246 190 14 246 190 14 246 190 14 246 190 14
38810-246 190 14 239 182 13 230 174 11 216 158 10
38811-190 142 34 124 112 88 70 70 70 38 38 38
38812- 18 18 18 6 6 6 0 0 0 0 0 0
38813- 0 0 0 0 0 0 0 0 0 0 0 0
38814- 0 0 0 0 0 0 0 0 0 0 0 0
38815- 0 0 0 0 0 0 6 6 6 22 22 22
38816- 62 62 62 168 124 44 206 145 10 224 166 10
38817-236 178 12 239 182 13 242 186 14 242 186 14
38818-246 186 14 246 190 14 246 190 14 246 190 14
38819-246 190 14 246 190 14 246 190 14 246 190 14
38820-246 190 14 246 190 14 246 190 14 246 190 14
38821-246 190 14 236 178 12 216 158 10 175 118 6
38822- 80 54 7 2 2 6 6 6 6 30 30 30
38823- 54 54 54 62 62 62 50 50 50 38 38 38
38824- 14 14 14 2 2 6 2 2 6 2 2 6
38825- 2 2 6 2 2 6 2 2 6 2 2 6
38826- 2 2 6 6 6 6 80 54 7 167 114 7
38827-213 154 11 236 178 12 246 190 14 246 190 14
38828-246 190 14 246 190 14 246 190 14 246 190 14
38829-246 190 14 242 186 14 239 182 13 239 182 13
38830-230 174 11 210 150 10 174 135 50 124 112 88
38831- 82 82 82 54 54 54 34 34 34 18 18 18
38832- 6 6 6 0 0 0 0 0 0 0 0 0
38833- 0 0 0 0 0 0 0 0 0 0 0 0
38834- 0 0 0 0 0 0 0 0 0 0 0 0
38835- 0 0 0 0 0 0 6 6 6 18 18 18
38836- 50 50 50 158 118 36 192 133 9 200 144 11
38837-216 158 10 219 162 10 224 166 10 226 170 11
38838-230 174 11 236 178 12 239 182 13 239 182 13
38839-242 186 14 246 186 14 246 190 14 246 190 14
38840-246 190 14 246 190 14 246 190 14 246 190 14
38841-246 186 14 230 174 11 210 150 10 163 110 8
38842-104 69 6 10 10 10 2 2 6 2 2 6
38843- 2 2 6 2 2 6 2 2 6 2 2 6
38844- 2 2 6 2 2 6 2 2 6 2 2 6
38845- 2 2 6 2 2 6 2 2 6 2 2 6
38846- 2 2 6 6 6 6 91 60 6 167 114 7
38847-206 145 10 230 174 11 242 186 14 246 190 14
38848-246 190 14 246 190 14 246 186 14 242 186 14
38849-239 182 13 230 174 11 224 166 10 213 154 11
38850-180 133 36 124 112 88 86 86 86 58 58 58
38851- 38 38 38 22 22 22 10 10 10 6 6 6
38852- 0 0 0 0 0 0 0 0 0 0 0 0
38853- 0 0 0 0 0 0 0 0 0 0 0 0
38854- 0 0 0 0 0 0 0 0 0 0 0 0
38855- 0 0 0 0 0 0 0 0 0 14 14 14
38856- 34 34 34 70 70 70 138 110 50 158 118 36
38857-167 114 7 180 123 7 192 133 9 197 138 11
38858-200 144 11 206 145 10 213 154 11 219 162 10
38859-224 166 10 230 174 11 239 182 13 242 186 14
38860-246 186 14 246 186 14 246 186 14 246 186 14
38861-239 182 13 216 158 10 185 133 11 152 99 6
38862-104 69 6 18 14 6 2 2 6 2 2 6
38863- 2 2 6 2 2 6 2 2 6 2 2 6
38864- 2 2 6 2 2 6 2 2 6 2 2 6
38865- 2 2 6 2 2 6 2 2 6 2 2 6
38866- 2 2 6 6 6 6 80 54 7 152 99 6
38867-192 133 9 219 162 10 236 178 12 239 182 13
38868-246 186 14 242 186 14 239 182 13 236 178 12
38869-224 166 10 206 145 10 192 133 9 154 121 60
38870- 94 94 94 62 62 62 42 42 42 22 22 22
38871- 14 14 14 6 6 6 0 0 0 0 0 0
38872- 0 0 0 0 0 0 0 0 0 0 0 0
38873- 0 0 0 0 0 0 0 0 0 0 0 0
38874- 0 0 0 0 0 0 0 0 0 0 0 0
38875- 0 0 0 0 0 0 0 0 0 6 6 6
38876- 18 18 18 34 34 34 58 58 58 78 78 78
38877-101 98 89 124 112 88 142 110 46 156 107 11
38878-163 110 8 167 114 7 175 118 6 180 123 7
38879-185 133 11 197 138 11 210 150 10 219 162 10
38880-226 170 11 236 178 12 236 178 12 234 174 13
38881-219 162 10 197 138 11 163 110 8 130 83 6
38882- 91 60 6 10 10 10 2 2 6 2 2 6
38883- 18 18 18 38 38 38 38 38 38 38 38 38
38884- 38 38 38 38 38 38 38 38 38 38 38 38
38885- 38 38 38 38 38 38 26 26 26 2 2 6
38886- 2 2 6 6 6 6 70 47 6 137 92 6
38887-175 118 6 200 144 11 219 162 10 230 174 11
38888-234 174 13 230 174 11 219 162 10 210 150 10
38889-192 133 9 163 110 8 124 112 88 82 82 82
38890- 50 50 50 30 30 30 14 14 14 6 6 6
38891- 0 0 0 0 0 0 0 0 0 0 0 0
38892- 0 0 0 0 0 0 0 0 0 0 0 0
38893- 0 0 0 0 0 0 0 0 0 0 0 0
38894- 0 0 0 0 0 0 0 0 0 0 0 0
38895- 0 0 0 0 0 0 0 0 0 0 0 0
38896- 6 6 6 14 14 14 22 22 22 34 34 34
38897- 42 42 42 58 58 58 74 74 74 86 86 86
38898-101 98 89 122 102 70 130 98 46 121 87 25
38899-137 92 6 152 99 6 163 110 8 180 123 7
38900-185 133 11 197 138 11 206 145 10 200 144 11
38901-180 123 7 156 107 11 130 83 6 104 69 6
38902- 50 34 6 54 54 54 110 110 110 101 98 89
38903- 86 86 86 82 82 82 78 78 78 78 78 78
38904- 78 78 78 78 78 78 78 78 78 78 78 78
38905- 78 78 78 82 82 82 86 86 86 94 94 94
38906-106 106 106 101 101 101 86 66 34 124 80 6
38907-156 107 11 180 123 7 192 133 9 200 144 11
38908-206 145 10 200 144 11 192 133 9 175 118 6
38909-139 102 15 109 106 95 70 70 70 42 42 42
38910- 22 22 22 10 10 10 0 0 0 0 0 0
38911- 0 0 0 0 0 0 0 0 0 0 0 0
38912- 0 0 0 0 0 0 0 0 0 0 0 0
38913- 0 0 0 0 0 0 0 0 0 0 0 0
38914- 0 0 0 0 0 0 0 0 0 0 0 0
38915- 0 0 0 0 0 0 0 0 0 0 0 0
38916- 0 0 0 0 0 0 6 6 6 10 10 10
38917- 14 14 14 22 22 22 30 30 30 38 38 38
38918- 50 50 50 62 62 62 74 74 74 90 90 90
38919-101 98 89 112 100 78 121 87 25 124 80 6
38920-137 92 6 152 99 6 152 99 6 152 99 6
38921-138 86 6 124 80 6 98 70 6 86 66 30
38922-101 98 89 82 82 82 58 58 58 46 46 46
38923- 38 38 38 34 34 34 34 34 34 34 34 34
38924- 34 34 34 34 34 34 34 34 34 34 34 34
38925- 34 34 34 34 34 34 38 38 38 42 42 42
38926- 54 54 54 82 82 82 94 86 76 91 60 6
38927-134 86 6 156 107 11 167 114 7 175 118 6
38928-175 118 6 167 114 7 152 99 6 121 87 25
38929-101 98 89 62 62 62 34 34 34 18 18 18
38930- 6 6 6 0 0 0 0 0 0 0 0 0
38931- 0 0 0 0 0 0 0 0 0 0 0 0
38932- 0 0 0 0 0 0 0 0 0 0 0 0
38933- 0 0 0 0 0 0 0 0 0 0 0 0
38934- 0 0 0 0 0 0 0 0 0 0 0 0
38935- 0 0 0 0 0 0 0 0 0 0 0 0
38936- 0 0 0 0 0 0 0 0 0 0 0 0
38937- 0 0 0 6 6 6 6 6 6 10 10 10
38938- 18 18 18 22 22 22 30 30 30 42 42 42
38939- 50 50 50 66 66 66 86 86 86 101 98 89
38940-106 86 58 98 70 6 104 69 6 104 69 6
38941-104 69 6 91 60 6 82 62 34 90 90 90
38942- 62 62 62 38 38 38 22 22 22 14 14 14
38943- 10 10 10 10 10 10 10 10 10 10 10 10
38944- 10 10 10 10 10 10 6 6 6 10 10 10
38945- 10 10 10 10 10 10 10 10 10 14 14 14
38946- 22 22 22 42 42 42 70 70 70 89 81 66
38947- 80 54 7 104 69 6 124 80 6 137 92 6
38948-134 86 6 116 81 8 100 82 52 86 86 86
38949- 58 58 58 30 30 30 14 14 14 6 6 6
38950- 0 0 0 0 0 0 0 0 0 0 0 0
38951- 0 0 0 0 0 0 0 0 0 0 0 0
38952- 0 0 0 0 0 0 0 0 0 0 0 0
38953- 0 0 0 0 0 0 0 0 0 0 0 0
38954- 0 0 0 0 0 0 0 0 0 0 0 0
38955- 0 0 0 0 0 0 0 0 0 0 0 0
38956- 0 0 0 0 0 0 0 0 0 0 0 0
38957- 0 0 0 0 0 0 0 0 0 0 0 0
38958- 0 0 0 6 6 6 10 10 10 14 14 14
38959- 18 18 18 26 26 26 38 38 38 54 54 54
38960- 70 70 70 86 86 86 94 86 76 89 81 66
38961- 89 81 66 86 86 86 74 74 74 50 50 50
38962- 30 30 30 14 14 14 6 6 6 0 0 0
38963- 0 0 0 0 0 0 0 0 0 0 0 0
38964- 0 0 0 0 0 0 0 0 0 0 0 0
38965- 0 0 0 0 0 0 0 0 0 0 0 0
38966- 6 6 6 18 18 18 34 34 34 58 58 58
38967- 82 82 82 89 81 66 89 81 66 89 81 66
38968- 94 86 66 94 86 76 74 74 74 50 50 50
38969- 26 26 26 14 14 14 6 6 6 0 0 0
38970- 0 0 0 0 0 0 0 0 0 0 0 0
38971- 0 0 0 0 0 0 0 0 0 0 0 0
38972- 0 0 0 0 0 0 0 0 0 0 0 0
38973- 0 0 0 0 0 0 0 0 0 0 0 0
38974- 0 0 0 0 0 0 0 0 0 0 0 0
38975- 0 0 0 0 0 0 0 0 0 0 0 0
38976- 0 0 0 0 0 0 0 0 0 0 0 0
38977- 0 0 0 0 0 0 0 0 0 0 0 0
38978- 0 0 0 0 0 0 0 0 0 0 0 0
38979- 6 6 6 6 6 6 14 14 14 18 18 18
38980- 30 30 30 38 38 38 46 46 46 54 54 54
38981- 50 50 50 42 42 42 30 30 30 18 18 18
38982- 10 10 10 0 0 0 0 0 0 0 0 0
38983- 0 0 0 0 0 0 0 0 0 0 0 0
38984- 0 0 0 0 0 0 0 0 0 0 0 0
38985- 0 0 0 0 0 0 0 0 0 0 0 0
38986- 0 0 0 6 6 6 14 14 14 26 26 26
38987- 38 38 38 50 50 50 58 58 58 58 58 58
38988- 54 54 54 42 42 42 30 30 30 18 18 18
38989- 10 10 10 0 0 0 0 0 0 0 0 0
38990- 0 0 0 0 0 0 0 0 0 0 0 0
38991- 0 0 0 0 0 0 0 0 0 0 0 0
38992- 0 0 0 0 0 0 0 0 0 0 0 0
38993- 0 0 0 0 0 0 0 0 0 0 0 0
38994- 0 0 0 0 0 0 0 0 0 0 0 0
38995- 0 0 0 0 0 0 0 0 0 0 0 0
38996- 0 0 0 0 0 0 0 0 0 0 0 0
38997- 0 0 0 0 0 0 0 0 0 0 0 0
38998- 0 0 0 0 0 0 0 0 0 0 0 0
38999- 0 0 0 0 0 0 0 0 0 6 6 6
39000- 6 6 6 10 10 10 14 14 14 18 18 18
39001- 18 18 18 14 14 14 10 10 10 6 6 6
39002- 0 0 0 0 0 0 0 0 0 0 0 0
39003- 0 0 0 0 0 0 0 0 0 0 0 0
39004- 0 0 0 0 0 0 0 0 0 0 0 0
39005- 0 0 0 0 0 0 0 0 0 0 0 0
39006- 0 0 0 0 0 0 0 0 0 6 6 6
39007- 14 14 14 18 18 18 22 22 22 22 22 22
39008- 18 18 18 14 14 14 10 10 10 6 6 6
39009- 0 0 0 0 0 0 0 0 0 0 0 0
39010- 0 0 0 0 0 0 0 0 0 0 0 0
39011- 0 0 0 0 0 0 0 0 0 0 0 0
39012- 0 0 0 0 0 0 0 0 0 0 0 0
39013- 0 0 0 0 0 0 0 0 0 0 0 0
39014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39027+4 4 4 4 4 4
39028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39041+4 4 4 4 4 4
39042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39055+4 4 4 4 4 4
39056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39069+4 4 4 4 4 4
39070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39083+4 4 4 4 4 4
39084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39092+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39097+4 4 4 4 4 4
39098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39102+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
39103+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
39104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39106+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39107+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
39108+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
39109+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
39110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39111+4 4 4 4 4 4
39112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39116+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
39117+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
39118+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39121+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
39122+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
39123+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
39124+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39125+4 4 4 4 4 4
39126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39130+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
39131+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
39132+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
39133+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39134+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39135+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
39136+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
39137+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
39138+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
39139+4 4 4 4 4 4
39140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39143+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
39144+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
39145+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
39146+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
39147+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39148+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
39149+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
39150+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
39151+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
39152+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
39153+4 4 4 4 4 4
39154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39157+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
39158+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
39159+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
39160+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
39161+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39162+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
39163+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
39164+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
39165+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
39166+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
39167+4 4 4 4 4 4
39168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39171+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
39172+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
39173+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
39174+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
39175+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
39176+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
39177+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
39178+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
39179+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
39180+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
39181+4 4 4 4 4 4
39182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39184+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
39185+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
39186+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
39187+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
39188+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
39189+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
39190+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
39191+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
39192+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
39193+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
39194+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
39195+4 4 4 4 4 4
39196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39198+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
39199+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
39200+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
39201+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
39202+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
39203+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
39204+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
39205+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
39206+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
39207+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
39208+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
39209+4 4 4 4 4 4
39210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39212+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
39213+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
39214+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
39215+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
39216+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
39217+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
39218+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
39219+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
39220+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
39221+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
39222+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
39223+4 4 4 4 4 4
39224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39226+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
39227+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
39228+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
39229+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
39230+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
39231+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
39232+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
39233+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
39234+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
39235+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
39236+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
39237+4 4 4 4 4 4
39238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39239+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
39240+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
39241+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
39242+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
39243+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
39244+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
39245+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
39246+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
39247+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
39248+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
39249+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
39250+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
39251+4 4 4 4 4 4
39252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39253+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
39254+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
39255+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
39256+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
39257+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
39258+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
39259+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
39260+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
39261+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
39262+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
39263+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
39264+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
39265+0 0 0 4 4 4
39266+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
39267+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
39268+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
39269+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
39270+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
39271+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
39272+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
39273+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
39274+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
39275+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
39276+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
39277+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
39278+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
39279+2 0 0 0 0 0
39280+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
39281+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
39282+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
39283+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
39284+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
39285+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
39286+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
39287+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
39288+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
39289+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
39290+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
39291+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
39292+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
39293+37 38 37 0 0 0
39294+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
39295+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
39296+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
39297+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
39298+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
39299+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
39300+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
39301+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
39302+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
39303+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
39304+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
39305+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
39306+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
39307+85 115 134 4 0 0
39308+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
39309+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
39310+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
39311+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
39312+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
39313+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
39314+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
39315+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
39316+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
39317+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
39318+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
39319+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
39320+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
39321+60 73 81 4 0 0
39322+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
39323+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
39324+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
39325+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
39326+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
39327+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
39328+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
39329+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
39330+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
39331+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
39332+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
39333+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
39334+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
39335+16 19 21 4 0 0
39336+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
39337+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
39338+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
39339+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
39340+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
39341+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
39342+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
39343+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
39344+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
39345+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
39346+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
39347+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
39348+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
39349+4 0 0 4 3 3
39350+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
39351+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
39352+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
39353+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
39354+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
39355+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
39356+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
39357+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
39358+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
39359+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
39360+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
39361+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
39362+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
39363+3 2 2 4 4 4
39364+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
39365+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
39366+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
39367+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
39368+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
39369+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
39370+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
39371+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
39372+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
39373+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
39374+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
39375+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
39376+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
39377+4 4 4 4 4 4
39378+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
39379+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
39380+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
39381+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
39382+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
39383+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
39384+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
39385+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
39386+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
39387+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
39388+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
39389+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
39390+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
39391+4 4 4 4 4 4
39392+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
39393+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
39394+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
39395+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
39396+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
39397+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
39398+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
39399+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
39400+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
39401+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
39402+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
39403+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
39404+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
39405+5 5 5 5 5 5
39406+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
39407+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
39408+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
39409+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
39410+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
39411+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
39412+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
39413+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
39414+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
39415+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
39416+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
39417+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
39418+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
39419+5 5 5 4 4 4
39420+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
39421+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
39422+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
39423+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
39424+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
39425+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
39426+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
39427+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
39428+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
39429+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
39430+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
39431+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
39432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39433+4 4 4 4 4 4
39434+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
39435+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
39436+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
39437+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
39438+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
39439+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
39440+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
39441+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
39442+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
39443+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
39444+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
39445+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
39446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39447+4 4 4 4 4 4
39448+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
39449+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
39450+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
39451+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
39452+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
39453+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
39454+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
39455+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
39456+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
39457+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
39458+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
39459+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39461+4 4 4 4 4 4
39462+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
39463+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
39464+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
39465+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
39466+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
39467+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
39468+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
39469+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
39470+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
39471+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
39472+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
39473+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39475+4 4 4 4 4 4
39476+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
39477+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
39478+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
39479+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
39480+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
39481+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
39482+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
39483+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
39484+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
39485+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
39486+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39487+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39489+4 4 4 4 4 4
39490+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
39491+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
39492+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
39493+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
39494+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
39495+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
39496+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
39497+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
39498+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
39499+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
39500+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
39501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39503+4 4 4 4 4 4
39504+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
39505+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
39506+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
39507+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
39508+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
39509+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
39510+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
39511+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
39512+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
39513+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
39514+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
39515+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39517+4 4 4 4 4 4
39518+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
39519+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
39520+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
39521+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
39522+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
39523+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
39524+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
39525+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
39526+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
39527+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
39528+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39529+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39531+4 4 4 4 4 4
39532+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
39533+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
39534+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
39535+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
39536+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
39537+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
39538+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
39539+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
39540+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
39541+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
39542+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39543+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39545+4 4 4 4 4 4
39546+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
39547+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
39548+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
39549+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
39550+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
39551+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
39552+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
39553+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
39554+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
39555+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
39556+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39557+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39559+4 4 4 4 4 4
39560+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
39561+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
39562+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
39563+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
39564+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
39565+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
39566+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
39567+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
39568+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
39569+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
39570+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39571+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39573+4 4 4 4 4 4
39574+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
39575+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
39576+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
39577+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
39578+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
39579+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
39580+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
39581+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
39582+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
39583+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
39584+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39585+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39587+4 4 4 4 4 4
39588+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
39589+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
39590+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
39591+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
39592+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
39593+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
39594+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
39595+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
39596+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
39597+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
39598+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39599+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39601+4 4 4 4 4 4
39602+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
39603+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
39604+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
39605+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
39606+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
39607+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
39608+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
39609+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
39610+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
39611+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
39612+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39613+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39615+4 4 4 4 4 4
39616+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
39617+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
39618+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
39619+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
39620+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
39621+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
39622+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
39623+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
39624+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
39625+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
39626+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39627+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39629+4 4 4 4 4 4
39630+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
39631+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
39632+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
39633+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
39634+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
39635+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
39636+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
39637+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
39638+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
39639+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
39640+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39641+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39643+4 4 4 4 4 4
39644+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
39645+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
39646+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
39647+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
39648+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
39649+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
39650+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
39651+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
39652+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
39653+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
39654+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39655+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39657+4 4 4 4 4 4
39658+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
39659+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
39660+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
39661+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
39662+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
39663+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
39664+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
39665+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
39666+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
39667+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
39668+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39671+4 4 4 4 4 4
39672+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
39673+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
39674+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
39675+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
39676+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
39677+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
39678+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
39679+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
39680+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
39681+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
39682+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39685+4 4 4 4 4 4
39686+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
39687+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
39688+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
39689+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
39690+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
39691+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
39692+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
39693+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
39694+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
39695+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
39696+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39697+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39699+4 4 4 4 4 4
39700+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
39701+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
39702+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
39703+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
39704+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
39705+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
39706+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
39707+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
39708+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
39709+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
39710+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39713+4 4 4 4 4 4
39714+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
39715+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
39716+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
39717+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
39718+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
39719+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
39720+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
39721+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
39722+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
39723+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
39724+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39727+4 4 4 4 4 4
39728+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
39729+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
39730+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
39731+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
39732+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
39733+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
39734+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
39735+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
39736+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
39737+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
39738+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39741+4 4 4 4 4 4
39742+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
39743+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
39744+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
39745+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
39746+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
39747+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
39748+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
39749+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
39750+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
39751+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
39752+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39755+4 4 4 4 4 4
39756+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
39757+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
39758+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
39759+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
39760+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
39761+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
39762+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
39763+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
39764+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
39765+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
39766+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39769+4 4 4 4 4 4
39770+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
39771+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
39772+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
39773+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
39774+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
39775+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
39776+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
39777+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
39778+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
39779+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
39780+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39783+4 4 4 4 4 4
39784+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
39785+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
39786+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
39787+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
39788+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
39789+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
39790+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
39791+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
39792+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
39793+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
39794+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39797+4 4 4 4 4 4
39798+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
39799+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
39800+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
39801+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
39802+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
39803+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
39804+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39805+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
39806+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
39807+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
39808+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39811+4 4 4 4 4 4
39812+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
39813+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
39814+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
39815+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
39816+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
39817+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
39818+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
39819+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
39820+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
39821+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
39822+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39825+4 4 4 4 4 4
39826+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
39827+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
39828+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
39829+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
39830+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
39831+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
39832+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
39833+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
39834+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
39835+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
39836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39839+4 4 4 4 4 4
39840+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39841+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
39842+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
39843+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
39844+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
39845+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
39846+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
39847+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
39848+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
39849+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
39850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39853+4 4 4 4 4 4
39854+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
39855+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
39856+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
39857+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
39858+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
39859+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
39860+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
39861+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
39862+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
39863+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
39864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39867+4 4 4 4 4 4
39868+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
39869+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
39870+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
39871+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
39872+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
39873+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
39874+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
39875+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
39876+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
39877+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39881+4 4 4 4 4 4
39882+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
39883+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39884+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
39885+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
39886+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
39887+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
39888+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
39889+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
39890+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
39891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39894+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39895+4 4 4 4 4 4
39896+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
39897+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
39898+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
39899+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
39900+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
39901+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
39902+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
39903+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
39904+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
39905+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39908+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39909+4 4 4 4 4 4
39910+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
39911+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
39912+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
39913+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
39914+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
39915+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
39916+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
39917+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
39918+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39919+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39923+4 4 4 4 4 4
39924+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
39925+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
39926+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39927+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
39928+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
39929+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
39930+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
39931+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
39932+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39933+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39937+4 4 4 4 4 4
39938+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39939+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
39940+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
39941+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
39942+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
39943+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
39944+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
39945+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
39946+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39947+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39950+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39951+4 4 4 4 4 4
39952+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39953+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
39954+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39955+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
39956+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
39957+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
39958+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
39959+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
39960+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39961+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39965+4 4 4 4 4 4
39966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39967+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
39968+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
39969+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
39970+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
39971+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
39972+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
39973+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
39974+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39979+4 4 4 4 4 4
39980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39981+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
39982+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
39983+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39984+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
39985+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
39986+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
39987+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39988+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39993+4 4 4 4 4 4
39994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39996+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39997+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
39998+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
39999+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
40000+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
40001+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40002+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40007+4 4 4 4 4 4
40008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40011+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40012+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
40013+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
40014+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
40015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40021+4 4 4 4 4 4
40022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40025+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
40026+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
40027+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
40028+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
40029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40035+4 4 4 4 4 4
40036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40039+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
40040+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40041+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
40042+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
40043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40049+4 4 4 4 4 4
40050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40053+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
40054+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
40055+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
40056+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
40057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40063+4 4 4 4 4 4
40064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40068+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
40069+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40070+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40077+4 4 4 4 4 4
40078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40082+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
40083+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
40084+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40091+4 4 4 4 4 4
40092+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40096+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
40097+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
40098+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40105+4 4 4 4 4 4
40106+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40110+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
40111+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
40112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40119+4 4 4 4 4 4
40120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40124+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
40125+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
40126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40133+4 4 4 4 4 4
40134diff -urNp linux-3.1.4/drivers/video/udlfb.c linux-3.1.4/drivers/video/udlfb.c
40135--- linux-3.1.4/drivers/video/udlfb.c 2011-11-11 15:19:27.000000000 -0500
40136+++ linux-3.1.4/drivers/video/udlfb.c 2011-11-16 18:39:08.000000000 -0500
40137@@ -585,11 +585,11 @@ int dlfb_handle_damage(struct dlfb_data
40138 dlfb_urb_completion(urb);
40139
40140 error:
40141- atomic_add(bytes_sent, &dev->bytes_sent);
40142- atomic_add(bytes_identical, &dev->bytes_identical);
40143- atomic_add(width*height*2, &dev->bytes_rendered);
40144+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
40145+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
40146+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
40147 end_cycles = get_cycles();
40148- atomic_add(((unsigned int) ((end_cycles - start_cycles)
40149+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
40150 >> 10)), /* Kcycles */
40151 &dev->cpu_kcycles_used);
40152
40153@@ -710,11 +710,11 @@ static void dlfb_dpy_deferred_io(struct
40154 dlfb_urb_completion(urb);
40155
40156 error:
40157- atomic_add(bytes_sent, &dev->bytes_sent);
40158- atomic_add(bytes_identical, &dev->bytes_identical);
40159- atomic_add(bytes_rendered, &dev->bytes_rendered);
40160+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
40161+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
40162+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
40163 end_cycles = get_cycles();
40164- atomic_add(((unsigned int) ((end_cycles - start_cycles)
40165+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
40166 >> 10)), /* Kcycles */
40167 &dev->cpu_kcycles_used);
40168 }
40169@@ -1306,7 +1306,7 @@ static ssize_t metrics_bytes_rendered_sh
40170 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40171 struct dlfb_data *dev = fb_info->par;
40172 return snprintf(buf, PAGE_SIZE, "%u\n",
40173- atomic_read(&dev->bytes_rendered));
40174+ atomic_read_unchecked(&dev->bytes_rendered));
40175 }
40176
40177 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
40178@@ -1314,7 +1314,7 @@ static ssize_t metrics_bytes_identical_s
40179 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40180 struct dlfb_data *dev = fb_info->par;
40181 return snprintf(buf, PAGE_SIZE, "%u\n",
40182- atomic_read(&dev->bytes_identical));
40183+ atomic_read_unchecked(&dev->bytes_identical));
40184 }
40185
40186 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
40187@@ -1322,7 +1322,7 @@ static ssize_t metrics_bytes_sent_show(s
40188 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40189 struct dlfb_data *dev = fb_info->par;
40190 return snprintf(buf, PAGE_SIZE, "%u\n",
40191- atomic_read(&dev->bytes_sent));
40192+ atomic_read_unchecked(&dev->bytes_sent));
40193 }
40194
40195 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
40196@@ -1330,7 +1330,7 @@ static ssize_t metrics_cpu_kcycles_used_
40197 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40198 struct dlfb_data *dev = fb_info->par;
40199 return snprintf(buf, PAGE_SIZE, "%u\n",
40200- atomic_read(&dev->cpu_kcycles_used));
40201+ atomic_read_unchecked(&dev->cpu_kcycles_used));
40202 }
40203
40204 static ssize_t edid_show(
40205@@ -1387,10 +1387,10 @@ static ssize_t metrics_reset_store(struc
40206 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40207 struct dlfb_data *dev = fb_info->par;
40208
40209- atomic_set(&dev->bytes_rendered, 0);
40210- atomic_set(&dev->bytes_identical, 0);
40211- atomic_set(&dev->bytes_sent, 0);
40212- atomic_set(&dev->cpu_kcycles_used, 0);
40213+ atomic_set_unchecked(&dev->bytes_rendered, 0);
40214+ atomic_set_unchecked(&dev->bytes_identical, 0);
40215+ atomic_set_unchecked(&dev->bytes_sent, 0);
40216+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
40217
40218 return count;
40219 }
40220diff -urNp linux-3.1.4/drivers/video/uvesafb.c linux-3.1.4/drivers/video/uvesafb.c
40221--- linux-3.1.4/drivers/video/uvesafb.c 2011-11-11 15:19:27.000000000 -0500
40222+++ linux-3.1.4/drivers/video/uvesafb.c 2011-11-16 18:39:08.000000000 -0500
40223@@ -19,6 +19,7 @@
40224 #include <linux/io.h>
40225 #include <linux/mutex.h>
40226 #include <linux/slab.h>
40227+#include <linux/moduleloader.h>
40228 #include <video/edid.h>
40229 #include <video/uvesafb.h>
40230 #ifdef CONFIG_X86
40231@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
40232 NULL,
40233 };
40234
40235- return call_usermodehelper(v86d_path, argv, envp, 1);
40236+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
40237 }
40238
40239 /*
40240@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
40241 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
40242 par->pmi_setpal = par->ypan = 0;
40243 } else {
40244+
40245+#ifdef CONFIG_PAX_KERNEXEC
40246+#ifdef CONFIG_MODULES
40247+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
40248+#endif
40249+ if (!par->pmi_code) {
40250+ par->pmi_setpal = par->ypan = 0;
40251+ return 0;
40252+ }
40253+#endif
40254+
40255 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
40256 + task->t.regs.edi);
40257+
40258+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40259+ pax_open_kernel();
40260+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
40261+ pax_close_kernel();
40262+
40263+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
40264+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
40265+#else
40266 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
40267 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
40268+#endif
40269+
40270 printk(KERN_INFO "uvesafb: protected mode interface info at "
40271 "%04x:%04x\n",
40272 (u16)task->t.regs.es, (u16)task->t.regs.edi);
40273@@ -1821,6 +1844,11 @@ out:
40274 if (par->vbe_modes)
40275 kfree(par->vbe_modes);
40276
40277+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40278+ if (par->pmi_code)
40279+ module_free_exec(NULL, par->pmi_code);
40280+#endif
40281+
40282 framebuffer_release(info);
40283 return err;
40284 }
40285@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
40286 kfree(par->vbe_state_orig);
40287 if (par->vbe_state_saved)
40288 kfree(par->vbe_state_saved);
40289+
40290+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40291+ if (par->pmi_code)
40292+ module_free_exec(NULL, par->pmi_code);
40293+#endif
40294+
40295 }
40296
40297 framebuffer_release(info);
40298diff -urNp linux-3.1.4/drivers/video/vesafb.c linux-3.1.4/drivers/video/vesafb.c
40299--- linux-3.1.4/drivers/video/vesafb.c 2011-11-11 15:19:27.000000000 -0500
40300+++ linux-3.1.4/drivers/video/vesafb.c 2011-11-16 18:39:08.000000000 -0500
40301@@ -9,6 +9,7 @@
40302 */
40303
40304 #include <linux/module.h>
40305+#include <linux/moduleloader.h>
40306 #include <linux/kernel.h>
40307 #include <linux/errno.h>
40308 #include <linux/string.h>
40309@@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
40310 static int vram_total __initdata; /* Set total amount of memory */
40311 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
40312 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
40313-static void (*pmi_start)(void) __read_mostly;
40314-static void (*pmi_pal) (void) __read_mostly;
40315+static void (*pmi_start)(void) __read_only;
40316+static void (*pmi_pal) (void) __read_only;
40317 static int depth __read_mostly;
40318 static int vga_compat __read_mostly;
40319 /* --------------------------------------------------------------------- */
40320@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
40321 unsigned int size_vmode;
40322 unsigned int size_remap;
40323 unsigned int size_total;
40324+ void *pmi_code = NULL;
40325
40326 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
40327 return -ENODEV;
40328@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
40329 size_remap = size_total;
40330 vesafb_fix.smem_len = size_remap;
40331
40332-#ifndef __i386__
40333- screen_info.vesapm_seg = 0;
40334-#endif
40335-
40336 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
40337 printk(KERN_WARNING
40338 "vesafb: cannot reserve video memory at 0x%lx\n",
40339@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct pl
40340 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
40341 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
40342
40343+#ifdef __i386__
40344+
40345+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40346+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
40347+ if (!pmi_code)
40348+#elif !defined(CONFIG_PAX_KERNEXEC)
40349+ if (0)
40350+#endif
40351+
40352+#endif
40353+ screen_info.vesapm_seg = 0;
40354+
40355 if (screen_info.vesapm_seg) {
40356- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
40357- screen_info.vesapm_seg,screen_info.vesapm_off);
40358+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
40359+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
40360 }
40361
40362 if (screen_info.vesapm_seg < 0xc000)
40363@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct pl
40364
40365 if (ypan || pmi_setpal) {
40366 unsigned short *pmi_base;
40367+
40368 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
40369- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
40370- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
40371+
40372+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40373+ pax_open_kernel();
40374+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
40375+#else
40376+ pmi_code = pmi_base;
40377+#endif
40378+
40379+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
40380+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
40381+
40382+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40383+ pmi_start = ktva_ktla(pmi_start);
40384+ pmi_pal = ktva_ktla(pmi_pal);
40385+ pax_close_kernel();
40386+#endif
40387+
40388 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
40389 if (pmi_base[3]) {
40390 printk(KERN_INFO "vesafb: pmi: ports = ");
40391@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct pl
40392 info->node, info->fix.id);
40393 return 0;
40394 err:
40395+
40396+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40397+ module_free_exec(NULL, pmi_code);
40398+#endif
40399+
40400 if (info->screen_base)
40401 iounmap(info->screen_base);
40402 framebuffer_release(info);
40403diff -urNp linux-3.1.4/drivers/video/via/via_clock.h linux-3.1.4/drivers/video/via/via_clock.h
40404--- linux-3.1.4/drivers/video/via/via_clock.h 2011-11-11 15:19:27.000000000 -0500
40405+++ linux-3.1.4/drivers/video/via/via_clock.h 2011-11-16 18:39:08.000000000 -0500
40406@@ -56,7 +56,7 @@ struct via_clock {
40407
40408 void (*set_engine_pll_state)(u8 state);
40409 void (*set_engine_pll)(struct via_pll_config config);
40410-};
40411+} __no_const;
40412
40413
40414 static inline u32 get_pll_internal_frequency(u32 ref_freq,
40415diff -urNp linux-3.1.4/drivers/virtio/virtio_balloon.c linux-3.1.4/drivers/virtio/virtio_balloon.c
40416--- linux-3.1.4/drivers/virtio/virtio_balloon.c 2011-11-11 15:19:27.000000000 -0500
40417+++ linux-3.1.4/drivers/virtio/virtio_balloon.c 2011-11-16 18:40:29.000000000 -0500
40418@@ -174,6 +174,8 @@ static void update_balloon_stats(struct
40419 struct sysinfo i;
40420 int idx = 0;
40421
40422+ pax_track_stack();
40423+
40424 all_vm_events(events);
40425 si_meminfo(&i);
40426
40427diff -urNp linux-3.1.4/drivers/xen/xen-pciback/conf_space.h linux-3.1.4/drivers/xen/xen-pciback/conf_space.h
40428--- linux-3.1.4/drivers/xen/xen-pciback/conf_space.h 2011-11-11 15:19:27.000000000 -0500
40429+++ linux-3.1.4/drivers/xen/xen-pciback/conf_space.h 2011-11-16 18:39:08.000000000 -0500
40430@@ -44,15 +44,15 @@ struct config_field {
40431 struct {
40432 conf_dword_write write;
40433 conf_dword_read read;
40434- } dw;
40435+ } __no_const dw;
40436 struct {
40437 conf_word_write write;
40438 conf_word_read read;
40439- } w;
40440+ } __no_const w;
40441 struct {
40442 conf_byte_write write;
40443 conf_byte_read read;
40444- } b;
40445+ } __no_const b;
40446 } u;
40447 struct list_head list;
40448 };
40449diff -urNp linux-3.1.4/fs/9p/vfs_inode.c linux-3.1.4/fs/9p/vfs_inode.c
40450--- linux-3.1.4/fs/9p/vfs_inode.c 2011-11-11 15:19:27.000000000 -0500
40451+++ linux-3.1.4/fs/9p/vfs_inode.c 2011-11-16 18:39:08.000000000 -0500
40452@@ -1288,7 +1288,7 @@ static void *v9fs_vfs_follow_link(struct
40453 void
40454 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
40455 {
40456- char *s = nd_get_link(nd);
40457+ const char *s = nd_get_link(nd);
40458
40459 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
40460 IS_ERR(s) ? "<error>" : s);
40461diff -urNp linux-3.1.4/fs/aio.c linux-3.1.4/fs/aio.c
40462--- linux-3.1.4/fs/aio.c 2011-11-11 15:19:27.000000000 -0500
40463+++ linux-3.1.4/fs/aio.c 2011-11-16 18:40:29.000000000 -0500
40464@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
40465 size += sizeof(struct io_event) * nr_events;
40466 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
40467
40468- if (nr_pages < 0)
40469+ if (nr_pages <= 0)
40470 return -EINVAL;
40471
40472 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
40473@@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
40474 struct aio_timeout to;
40475 int retry = 0;
40476
40477+ pax_track_stack();
40478+
40479 /* needed to zero any padding within an entry (there shouldn't be
40480 * any, but C is fun!
40481 */
40482@@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
40483 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
40484 {
40485 ssize_t ret;
40486+ struct iovec iovstack;
40487
40488 #ifdef CONFIG_COMPAT
40489 if (compat)
40490 ret = compat_rw_copy_check_uvector(type,
40491 (struct compat_iovec __user *)kiocb->ki_buf,
40492- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
40493+ kiocb->ki_nbytes, 1, &iovstack,
40494 &kiocb->ki_iovec);
40495 else
40496 #endif
40497 ret = rw_copy_check_uvector(type,
40498 (struct iovec __user *)kiocb->ki_buf,
40499- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
40500+ kiocb->ki_nbytes, 1, &iovstack,
40501 &kiocb->ki_iovec);
40502 if (ret < 0)
40503 goto out;
40504
40505+ if (kiocb->ki_iovec == &iovstack) {
40506+ kiocb->ki_inline_vec = iovstack;
40507+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
40508+ }
40509 kiocb->ki_nr_segs = kiocb->ki_nbytes;
40510 kiocb->ki_cur_seg = 0;
40511 /* ki_nbytes/left now reflect bytes instead of segs */
40512diff -urNp linux-3.1.4/fs/attr.c linux-3.1.4/fs/attr.c
40513--- linux-3.1.4/fs/attr.c 2011-11-11 15:19:27.000000000 -0500
40514+++ linux-3.1.4/fs/attr.c 2011-11-16 18:40:29.000000000 -0500
40515@@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
40516 unsigned long limit;
40517
40518 limit = rlimit(RLIMIT_FSIZE);
40519+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
40520 if (limit != RLIM_INFINITY && offset > limit)
40521 goto out_sig;
40522 if (offset > inode->i_sb->s_maxbytes)
40523diff -urNp linux-3.1.4/fs/autofs4/waitq.c linux-3.1.4/fs/autofs4/waitq.c
40524--- linux-3.1.4/fs/autofs4/waitq.c 2011-11-11 15:19:27.000000000 -0500
40525+++ linux-3.1.4/fs/autofs4/waitq.c 2011-11-16 18:39:08.000000000 -0500
40526@@ -60,7 +60,7 @@ static int autofs4_write(struct file *fi
40527 {
40528 unsigned long sigpipe, flags;
40529 mm_segment_t fs;
40530- const char *data = (const char *)addr;
40531+ const char __user *data = (const char __force_user *)addr;
40532 ssize_t wr = 0;
40533
40534 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
40535diff -urNp linux-3.1.4/fs/befs/linuxvfs.c linux-3.1.4/fs/befs/linuxvfs.c
40536--- linux-3.1.4/fs/befs/linuxvfs.c 2011-11-11 15:19:27.000000000 -0500
40537+++ linux-3.1.4/fs/befs/linuxvfs.c 2011-11-16 18:39:08.000000000 -0500
40538@@ -503,7 +503,7 @@ static void befs_put_link(struct dentry
40539 {
40540 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
40541 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
40542- char *link = nd_get_link(nd);
40543+ const char *link = nd_get_link(nd);
40544 if (!IS_ERR(link))
40545 kfree(link);
40546 }
40547diff -urNp linux-3.1.4/fs/binfmt_aout.c linux-3.1.4/fs/binfmt_aout.c
40548--- linux-3.1.4/fs/binfmt_aout.c 2011-11-11 15:19:27.000000000 -0500
40549+++ linux-3.1.4/fs/binfmt_aout.c 2011-11-16 18:40:29.000000000 -0500
40550@@ -16,6 +16,7 @@
40551 #include <linux/string.h>
40552 #include <linux/fs.h>
40553 #include <linux/file.h>
40554+#include <linux/security.h>
40555 #include <linux/stat.h>
40556 #include <linux/fcntl.h>
40557 #include <linux/ptrace.h>
40558@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
40559 #endif
40560 # define START_STACK(u) ((void __user *)u.start_stack)
40561
40562+ memset(&dump, 0, sizeof(dump));
40563+
40564 fs = get_fs();
40565 set_fs(KERNEL_DS);
40566 has_dumped = 1;
40567@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
40568
40569 /* If the size of the dump file exceeds the rlimit, then see what would happen
40570 if we wrote the stack, but not the data area. */
40571+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
40572 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
40573 dump.u_dsize = 0;
40574
40575 /* Make sure we have enough room to write the stack and data areas. */
40576+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
40577 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
40578 dump.u_ssize = 0;
40579
40580@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
40581 rlim = rlimit(RLIMIT_DATA);
40582 if (rlim >= RLIM_INFINITY)
40583 rlim = ~0;
40584+
40585+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
40586 if (ex.a_data + ex.a_bss > rlim)
40587 return -ENOMEM;
40588
40589@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
40590 install_exec_creds(bprm);
40591 current->flags &= ~PF_FORKNOEXEC;
40592
40593+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
40594+ current->mm->pax_flags = 0UL;
40595+#endif
40596+
40597+#ifdef CONFIG_PAX_PAGEEXEC
40598+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
40599+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
40600+
40601+#ifdef CONFIG_PAX_EMUTRAMP
40602+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
40603+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
40604+#endif
40605+
40606+#ifdef CONFIG_PAX_MPROTECT
40607+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
40608+ current->mm->pax_flags |= MF_PAX_MPROTECT;
40609+#endif
40610+
40611+ }
40612+#endif
40613+
40614 if (N_MAGIC(ex) == OMAGIC) {
40615 unsigned long text_addr, map_size;
40616 loff_t pos;
40617@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
40618
40619 down_write(&current->mm->mmap_sem);
40620 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
40621- PROT_READ | PROT_WRITE | PROT_EXEC,
40622+ PROT_READ | PROT_WRITE,
40623 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
40624 fd_offset + ex.a_text);
40625 up_write(&current->mm->mmap_sem);
40626diff -urNp linux-3.1.4/fs/binfmt_elf.c linux-3.1.4/fs/binfmt_elf.c
40627--- linux-3.1.4/fs/binfmt_elf.c 2011-11-11 15:19:27.000000000 -0500
40628+++ linux-3.1.4/fs/binfmt_elf.c 2011-11-16 18:40:29.000000000 -0500
40629@@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
40630 #define elf_core_dump NULL
40631 #endif
40632
40633+#ifdef CONFIG_PAX_MPROTECT
40634+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
40635+#endif
40636+
40637 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
40638 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
40639 #else
40640@@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
40641 .load_binary = load_elf_binary,
40642 .load_shlib = load_elf_library,
40643 .core_dump = elf_core_dump,
40644+
40645+#ifdef CONFIG_PAX_MPROTECT
40646+ .handle_mprotect= elf_handle_mprotect,
40647+#endif
40648+
40649 .min_coredump = ELF_EXEC_PAGESIZE,
40650 };
40651
40652@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
40653
40654 static int set_brk(unsigned long start, unsigned long end)
40655 {
40656+ unsigned long e = end;
40657+
40658 start = ELF_PAGEALIGN(start);
40659 end = ELF_PAGEALIGN(end);
40660 if (end > start) {
40661@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
40662 if (BAD_ADDR(addr))
40663 return addr;
40664 }
40665- current->mm->start_brk = current->mm->brk = end;
40666+ current->mm->start_brk = current->mm->brk = e;
40667 return 0;
40668 }
40669
40670@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
40671 elf_addr_t __user *u_rand_bytes;
40672 const char *k_platform = ELF_PLATFORM;
40673 const char *k_base_platform = ELF_BASE_PLATFORM;
40674- unsigned char k_rand_bytes[16];
40675+ u32 k_rand_bytes[4];
40676 int items;
40677 elf_addr_t *elf_info;
40678 int ei_index = 0;
40679 const struct cred *cred = current_cred();
40680 struct vm_area_struct *vma;
40681+ unsigned long saved_auxv[AT_VECTOR_SIZE];
40682+
40683+ pax_track_stack();
40684
40685 /*
40686 * In some cases (e.g. Hyper-Threading), we want to avoid L1
40687@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
40688 * Generate 16 random bytes for userspace PRNG seeding.
40689 */
40690 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
40691- u_rand_bytes = (elf_addr_t __user *)
40692- STACK_ALLOC(p, sizeof(k_rand_bytes));
40693+ srandom32(k_rand_bytes[0] ^ random32());
40694+ srandom32(k_rand_bytes[1] ^ random32());
40695+ srandom32(k_rand_bytes[2] ^ random32());
40696+ srandom32(k_rand_bytes[3] ^ random32());
40697+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
40698+ u_rand_bytes = (elf_addr_t __user *) p;
40699 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
40700 return -EFAULT;
40701
40702@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
40703 return -EFAULT;
40704 current->mm->env_end = p;
40705
40706+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
40707+
40708 /* Put the elf_info on the stack in the right place. */
40709 sp = (elf_addr_t __user *)envp + 1;
40710- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
40711+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
40712 return -EFAULT;
40713 return 0;
40714 }
40715@@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
40716 {
40717 struct elf_phdr *elf_phdata;
40718 struct elf_phdr *eppnt;
40719- unsigned long load_addr = 0;
40720+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
40721 int load_addr_set = 0;
40722 unsigned long last_bss = 0, elf_bss = 0;
40723- unsigned long error = ~0UL;
40724+ unsigned long error = -EINVAL;
40725 unsigned long total_size;
40726 int retval, i, size;
40727
40728@@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
40729 goto out_close;
40730 }
40731
40732+#ifdef CONFIG_PAX_SEGMEXEC
40733+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
40734+ pax_task_size = SEGMEXEC_TASK_SIZE;
40735+#endif
40736+
40737 eppnt = elf_phdata;
40738 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
40739 if (eppnt->p_type == PT_LOAD) {
40740@@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
40741 k = load_addr + eppnt->p_vaddr;
40742 if (BAD_ADDR(k) ||
40743 eppnt->p_filesz > eppnt->p_memsz ||
40744- eppnt->p_memsz > TASK_SIZE ||
40745- TASK_SIZE - eppnt->p_memsz < k) {
40746+ eppnt->p_memsz > pax_task_size ||
40747+ pax_task_size - eppnt->p_memsz < k) {
40748 error = -ENOMEM;
40749 goto out_close;
40750 }
40751@@ -528,6 +553,193 @@ out:
40752 return error;
40753 }
40754
40755+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
40756+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
40757+{
40758+ unsigned long pax_flags = 0UL;
40759+
40760+#ifdef CONFIG_PAX_PAGEEXEC
40761+ if (elf_phdata->p_flags & PF_PAGEEXEC)
40762+ pax_flags |= MF_PAX_PAGEEXEC;
40763+#endif
40764+
40765+#ifdef CONFIG_PAX_SEGMEXEC
40766+ if (elf_phdata->p_flags & PF_SEGMEXEC)
40767+ pax_flags |= MF_PAX_SEGMEXEC;
40768+#endif
40769+
40770+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40771+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40772+ if ((__supported_pte_mask & _PAGE_NX))
40773+ pax_flags &= ~MF_PAX_SEGMEXEC;
40774+ else
40775+ pax_flags &= ~MF_PAX_PAGEEXEC;
40776+ }
40777+#endif
40778+
40779+#ifdef CONFIG_PAX_EMUTRAMP
40780+ if (elf_phdata->p_flags & PF_EMUTRAMP)
40781+ pax_flags |= MF_PAX_EMUTRAMP;
40782+#endif
40783+
40784+#ifdef CONFIG_PAX_MPROTECT
40785+ if (elf_phdata->p_flags & PF_MPROTECT)
40786+ pax_flags |= MF_PAX_MPROTECT;
40787+#endif
40788+
40789+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40790+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
40791+ pax_flags |= MF_PAX_RANDMMAP;
40792+#endif
40793+
40794+ return pax_flags;
40795+}
40796+#endif
40797+
40798+#ifdef CONFIG_PAX_PT_PAX_FLAGS
40799+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
40800+{
40801+ unsigned long pax_flags = 0UL;
40802+
40803+#ifdef CONFIG_PAX_PAGEEXEC
40804+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
40805+ pax_flags |= MF_PAX_PAGEEXEC;
40806+#endif
40807+
40808+#ifdef CONFIG_PAX_SEGMEXEC
40809+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
40810+ pax_flags |= MF_PAX_SEGMEXEC;
40811+#endif
40812+
40813+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40814+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40815+ if ((__supported_pte_mask & _PAGE_NX))
40816+ pax_flags &= ~MF_PAX_SEGMEXEC;
40817+ else
40818+ pax_flags &= ~MF_PAX_PAGEEXEC;
40819+ }
40820+#endif
40821+
40822+#ifdef CONFIG_PAX_EMUTRAMP
40823+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
40824+ pax_flags |= MF_PAX_EMUTRAMP;
40825+#endif
40826+
40827+#ifdef CONFIG_PAX_MPROTECT
40828+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
40829+ pax_flags |= MF_PAX_MPROTECT;
40830+#endif
40831+
40832+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40833+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
40834+ pax_flags |= MF_PAX_RANDMMAP;
40835+#endif
40836+
40837+ return pax_flags;
40838+}
40839+#endif
40840+
40841+#ifdef CONFIG_PAX_EI_PAX
40842+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
40843+{
40844+ unsigned long pax_flags = 0UL;
40845+
40846+#ifdef CONFIG_PAX_PAGEEXEC
40847+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
40848+ pax_flags |= MF_PAX_PAGEEXEC;
40849+#endif
40850+
40851+#ifdef CONFIG_PAX_SEGMEXEC
40852+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
40853+ pax_flags |= MF_PAX_SEGMEXEC;
40854+#endif
40855+
40856+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40857+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40858+ if ((__supported_pte_mask & _PAGE_NX))
40859+ pax_flags &= ~MF_PAX_SEGMEXEC;
40860+ else
40861+ pax_flags &= ~MF_PAX_PAGEEXEC;
40862+ }
40863+#endif
40864+
40865+#ifdef CONFIG_PAX_EMUTRAMP
40866+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
40867+ pax_flags |= MF_PAX_EMUTRAMP;
40868+#endif
40869+
40870+#ifdef CONFIG_PAX_MPROTECT
40871+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
40872+ pax_flags |= MF_PAX_MPROTECT;
40873+#endif
40874+
40875+#ifdef CONFIG_PAX_ASLR
40876+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
40877+ pax_flags |= MF_PAX_RANDMMAP;
40878+#endif
40879+
40880+ return pax_flags;
40881+}
40882+#endif
40883+
40884+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
40885+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
40886+{
40887+ unsigned long pax_flags = 0UL;
40888+
40889+#ifdef CONFIG_PAX_PT_PAX_FLAGS
40890+ unsigned long i;
40891+ int found_flags = 0;
40892+#endif
40893+
40894+#ifdef CONFIG_PAX_EI_PAX
40895+ pax_flags = pax_parse_ei_pax(elf_ex);
40896+#endif
40897+
40898+#ifdef CONFIG_PAX_PT_PAX_FLAGS
40899+ for (i = 0UL; i < elf_ex->e_phnum; i++)
40900+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
40901+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
40902+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
40903+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
40904+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
40905+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
40906+ return -EINVAL;
40907+
40908+#ifdef CONFIG_PAX_SOFTMODE
40909+ if (pax_softmode)
40910+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
40911+ else
40912+#endif
40913+
40914+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
40915+ found_flags = 1;
40916+ break;
40917+ }
40918+#endif
40919+
40920+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
40921+ if (found_flags == 0) {
40922+ struct elf_phdr phdr;
40923+ memset(&phdr, 0, sizeof(phdr));
40924+ phdr.p_flags = PF_NOEMUTRAMP;
40925+#ifdef CONFIG_PAX_SOFTMODE
40926+ if (pax_softmode)
40927+ pax_flags = pax_parse_softmode(&phdr);
40928+ else
40929+#endif
40930+ pax_flags = pax_parse_hardmode(&phdr);
40931+ }
40932+#endif
40933+
40934+ if (0 > pax_check_flags(&pax_flags))
40935+ return -EINVAL;
40936+
40937+ current->mm->pax_flags = pax_flags;
40938+ return 0;
40939+}
40940+#endif
40941+
40942 /*
40943 * These are the functions used to load ELF style executables and shared
40944 * libraries. There is no binary dependent code anywhere else.
40945@@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
40946 {
40947 unsigned int random_variable = 0;
40948
40949+#ifdef CONFIG_PAX_RANDUSTACK
40950+ if (randomize_va_space)
40951+ return stack_top - current->mm->delta_stack;
40952+#endif
40953+
40954 if ((current->flags & PF_RANDOMIZE) &&
40955 !(current->personality & ADDR_NO_RANDOMIZE)) {
40956 random_variable = get_random_int() & STACK_RND_MASK;
40957@@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
40958 unsigned long load_addr = 0, load_bias = 0;
40959 int load_addr_set = 0;
40960 char * elf_interpreter = NULL;
40961- unsigned long error;
40962+ unsigned long error = 0;
40963 struct elf_phdr *elf_ppnt, *elf_phdata;
40964 unsigned long elf_bss, elf_brk;
40965 int retval, i;
40966@@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
40967 unsigned long start_code, end_code, start_data, end_data;
40968 unsigned long reloc_func_desc __maybe_unused = 0;
40969 int executable_stack = EXSTACK_DEFAULT;
40970- unsigned long def_flags = 0;
40971 struct {
40972 struct elfhdr elf_ex;
40973 struct elfhdr interp_elf_ex;
40974 } *loc;
40975+ unsigned long pax_task_size = TASK_SIZE;
40976
40977 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
40978 if (!loc) {
40979@@ -713,11 +930,81 @@ static int load_elf_binary(struct linux_
40980
40981 /* OK, This is the point of no return */
40982 current->flags &= ~PF_FORKNOEXEC;
40983- current->mm->def_flags = def_flags;
40984+
40985+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
40986+ current->mm->pax_flags = 0UL;
40987+#endif
40988+
40989+#ifdef CONFIG_PAX_DLRESOLVE
40990+ current->mm->call_dl_resolve = 0UL;
40991+#endif
40992+
40993+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
40994+ current->mm->call_syscall = 0UL;
40995+#endif
40996+
40997+#ifdef CONFIG_PAX_ASLR
40998+ current->mm->delta_mmap = 0UL;
40999+ current->mm->delta_stack = 0UL;
41000+#endif
41001+
41002+ current->mm->def_flags = 0;
41003+
41004+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
41005+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
41006+ send_sig(SIGKILL, current, 0);
41007+ goto out_free_dentry;
41008+ }
41009+#endif
41010+
41011+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
41012+ pax_set_initial_flags(bprm);
41013+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
41014+ if (pax_set_initial_flags_func)
41015+ (pax_set_initial_flags_func)(bprm);
41016+#endif
41017+
41018+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
41019+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
41020+ current->mm->context.user_cs_limit = PAGE_SIZE;
41021+ current->mm->def_flags |= VM_PAGEEXEC;
41022+ }
41023+#endif
41024+
41025+#ifdef CONFIG_PAX_SEGMEXEC
41026+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
41027+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
41028+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
41029+ pax_task_size = SEGMEXEC_TASK_SIZE;
41030+ current->mm->def_flags |= VM_NOHUGEPAGE;
41031+ }
41032+#endif
41033+
41034+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
41035+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41036+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
41037+ put_cpu();
41038+ }
41039+#endif
41040
41041 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
41042 may depend on the personality. */
41043 SET_PERSONALITY(loc->elf_ex);
41044+
41045+#ifdef CONFIG_PAX_ASLR
41046+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
41047+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
41048+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
41049+ }
41050+#endif
41051+
41052+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41053+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41054+ executable_stack = EXSTACK_DISABLE_X;
41055+ current->personality &= ~READ_IMPLIES_EXEC;
41056+ } else
41057+#endif
41058+
41059 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
41060 current->personality |= READ_IMPLIES_EXEC;
41061
41062@@ -808,6 +1095,20 @@ static int load_elf_binary(struct linux_
41063 #else
41064 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
41065 #endif
41066+
41067+#ifdef CONFIG_PAX_RANDMMAP
41068+ /* PaX: randomize base address at the default exe base if requested */
41069+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
41070+#ifdef CONFIG_SPARC64
41071+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
41072+#else
41073+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
41074+#endif
41075+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
41076+ elf_flags |= MAP_FIXED;
41077+ }
41078+#endif
41079+
41080 }
41081
41082 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
41083@@ -840,9 +1141,9 @@ static int load_elf_binary(struct linux_
41084 * allowed task size. Note that p_filesz must always be
41085 * <= p_memsz so it is only necessary to check p_memsz.
41086 */
41087- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
41088- elf_ppnt->p_memsz > TASK_SIZE ||
41089- TASK_SIZE - elf_ppnt->p_memsz < k) {
41090+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
41091+ elf_ppnt->p_memsz > pax_task_size ||
41092+ pax_task_size - elf_ppnt->p_memsz < k) {
41093 /* set_brk can never work. Avoid overflows. */
41094 send_sig(SIGKILL, current, 0);
41095 retval = -EINVAL;
41096@@ -870,6 +1171,11 @@ static int load_elf_binary(struct linux_
41097 start_data += load_bias;
41098 end_data += load_bias;
41099
41100+#ifdef CONFIG_PAX_RANDMMAP
41101+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
41102+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
41103+#endif
41104+
41105 /* Calling set_brk effectively mmaps the pages that we need
41106 * for the bss and break sections. We must do this before
41107 * mapping in the interpreter, to make sure it doesn't wind
41108@@ -881,9 +1187,11 @@ static int load_elf_binary(struct linux_
41109 goto out_free_dentry;
41110 }
41111 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
41112- send_sig(SIGSEGV, current, 0);
41113- retval = -EFAULT; /* Nobody gets to see this, but.. */
41114- goto out_free_dentry;
41115+ /*
41116+ * This bss-zeroing can fail if the ELF
41117+ * file specifies odd protections. So
41118+ * we don't check the return value
41119+ */
41120 }
41121
41122 if (elf_interpreter) {
41123@@ -1098,7 +1406,7 @@ out:
41124 * Decide what to dump of a segment, part, all or none.
41125 */
41126 static unsigned long vma_dump_size(struct vm_area_struct *vma,
41127- unsigned long mm_flags)
41128+ unsigned long mm_flags, long signr)
41129 {
41130 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
41131
41132@@ -1132,7 +1440,7 @@ static unsigned long vma_dump_size(struc
41133 if (vma->vm_file == NULL)
41134 return 0;
41135
41136- if (FILTER(MAPPED_PRIVATE))
41137+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
41138 goto whole;
41139
41140 /*
41141@@ -1354,9 +1662,9 @@ static void fill_auxv_note(struct memelf
41142 {
41143 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
41144 int i = 0;
41145- do
41146+ do {
41147 i += 2;
41148- while (auxv[i - 2] != AT_NULL);
41149+ } while (auxv[i - 2] != AT_NULL);
41150 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
41151 }
41152
41153@@ -1862,14 +2170,14 @@ static void fill_extnum_info(struct elfh
41154 }
41155
41156 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
41157- unsigned long mm_flags)
41158+ struct coredump_params *cprm)
41159 {
41160 struct vm_area_struct *vma;
41161 size_t size = 0;
41162
41163 for (vma = first_vma(current, gate_vma); vma != NULL;
41164 vma = next_vma(vma, gate_vma))
41165- size += vma_dump_size(vma, mm_flags);
41166+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
41167 return size;
41168 }
41169
41170@@ -1963,7 +2271,7 @@ static int elf_core_dump(struct coredump
41171
41172 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
41173
41174- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
41175+ offset += elf_core_vma_data_size(gate_vma, cprm);
41176 offset += elf_core_extra_data_size();
41177 e_shoff = offset;
41178
41179@@ -1977,10 +2285,12 @@ static int elf_core_dump(struct coredump
41180 offset = dataoff;
41181
41182 size += sizeof(*elf);
41183+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
41184 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
41185 goto end_coredump;
41186
41187 size += sizeof(*phdr4note);
41188+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
41189 if (size > cprm->limit
41190 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
41191 goto end_coredump;
41192@@ -1994,7 +2304,7 @@ static int elf_core_dump(struct coredump
41193 phdr.p_offset = offset;
41194 phdr.p_vaddr = vma->vm_start;
41195 phdr.p_paddr = 0;
41196- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
41197+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
41198 phdr.p_memsz = vma->vm_end - vma->vm_start;
41199 offset += phdr.p_filesz;
41200 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
41201@@ -2005,6 +2315,7 @@ static int elf_core_dump(struct coredump
41202 phdr.p_align = ELF_EXEC_PAGESIZE;
41203
41204 size += sizeof(phdr);
41205+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
41206 if (size > cprm->limit
41207 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
41208 goto end_coredump;
41209@@ -2029,7 +2340,7 @@ static int elf_core_dump(struct coredump
41210 unsigned long addr;
41211 unsigned long end;
41212
41213- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
41214+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
41215
41216 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
41217 struct page *page;
41218@@ -2038,6 +2349,7 @@ static int elf_core_dump(struct coredump
41219 page = get_dump_page(addr);
41220 if (page) {
41221 void *kaddr = kmap(page);
41222+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
41223 stop = ((size += PAGE_SIZE) > cprm->limit) ||
41224 !dump_write(cprm->file, kaddr,
41225 PAGE_SIZE);
41226@@ -2055,6 +2367,7 @@ static int elf_core_dump(struct coredump
41227
41228 if (e_phnum == PN_XNUM) {
41229 size += sizeof(*shdr4extnum);
41230+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
41231 if (size > cprm->limit
41232 || !dump_write(cprm->file, shdr4extnum,
41233 sizeof(*shdr4extnum)))
41234@@ -2075,6 +2388,97 @@ out:
41235
41236 #endif /* CONFIG_ELF_CORE */
41237
41238+#ifdef CONFIG_PAX_MPROTECT
41239+/* PaX: non-PIC ELF libraries need relocations on their executable segments
41240+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
41241+ * we'll remove VM_MAYWRITE for good on RELRO segments.
41242+ *
41243+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
41244+ * basis because we want to allow the common case and not the special ones.
41245+ */
41246+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
41247+{
41248+ struct elfhdr elf_h;
41249+ struct elf_phdr elf_p;
41250+ unsigned long i;
41251+ unsigned long oldflags;
41252+ bool is_textrel_rw, is_textrel_rx, is_relro;
41253+
41254+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
41255+ return;
41256+
41257+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
41258+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
41259+
41260+#ifdef CONFIG_PAX_ELFRELOCS
41261+ /* possible TEXTREL */
41262+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
41263+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
41264+#else
41265+ is_textrel_rw = false;
41266+ is_textrel_rx = false;
41267+#endif
41268+
41269+ /* possible RELRO */
41270+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
41271+
41272+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
41273+ return;
41274+
41275+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
41276+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
41277+
41278+#ifdef CONFIG_PAX_ETEXECRELOCS
41279+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
41280+#else
41281+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
41282+#endif
41283+
41284+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
41285+ !elf_check_arch(&elf_h) ||
41286+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
41287+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
41288+ return;
41289+
41290+ for (i = 0UL; i < elf_h.e_phnum; i++) {
41291+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
41292+ return;
41293+ switch (elf_p.p_type) {
41294+ case PT_DYNAMIC:
41295+ if (!is_textrel_rw && !is_textrel_rx)
41296+ continue;
41297+ i = 0UL;
41298+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
41299+ elf_dyn dyn;
41300+
41301+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
41302+ return;
41303+ if (dyn.d_tag == DT_NULL)
41304+ return;
41305+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
41306+ gr_log_textrel(vma);
41307+ if (is_textrel_rw)
41308+ vma->vm_flags |= VM_MAYWRITE;
41309+ else
41310+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
41311+ vma->vm_flags &= ~VM_MAYWRITE;
41312+ return;
41313+ }
41314+ i++;
41315+ }
41316+ return;
41317+
41318+ case PT_GNU_RELRO:
41319+ if (!is_relro)
41320+ continue;
41321+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
41322+ vma->vm_flags &= ~VM_MAYWRITE;
41323+ return;
41324+ }
41325+ }
41326+}
41327+#endif
41328+
41329 static int __init init_elf_binfmt(void)
41330 {
41331 return register_binfmt(&elf_format);
41332diff -urNp linux-3.1.4/fs/binfmt_flat.c linux-3.1.4/fs/binfmt_flat.c
41333--- linux-3.1.4/fs/binfmt_flat.c 2011-11-11 15:19:27.000000000 -0500
41334+++ linux-3.1.4/fs/binfmt_flat.c 2011-11-16 18:39:08.000000000 -0500
41335@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
41336 realdatastart = (unsigned long) -ENOMEM;
41337 printk("Unable to allocate RAM for process data, errno %d\n",
41338 (int)-realdatastart);
41339+ down_write(&current->mm->mmap_sem);
41340 do_munmap(current->mm, textpos, text_len);
41341+ up_write(&current->mm->mmap_sem);
41342 ret = realdatastart;
41343 goto err;
41344 }
41345@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
41346 }
41347 if (IS_ERR_VALUE(result)) {
41348 printk("Unable to read data+bss, errno %d\n", (int)-result);
41349+ down_write(&current->mm->mmap_sem);
41350 do_munmap(current->mm, textpos, text_len);
41351 do_munmap(current->mm, realdatastart, len);
41352+ up_write(&current->mm->mmap_sem);
41353 ret = result;
41354 goto err;
41355 }
41356@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
41357 }
41358 if (IS_ERR_VALUE(result)) {
41359 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
41360+ down_write(&current->mm->mmap_sem);
41361 do_munmap(current->mm, textpos, text_len + data_len + extra +
41362 MAX_SHARED_LIBS * sizeof(unsigned long));
41363+ up_write(&current->mm->mmap_sem);
41364 ret = result;
41365 goto err;
41366 }
41367diff -urNp linux-3.1.4/fs/bio.c linux-3.1.4/fs/bio.c
41368--- linux-3.1.4/fs/bio.c 2011-11-11 15:19:27.000000000 -0500
41369+++ linux-3.1.4/fs/bio.c 2011-11-16 18:39:08.000000000 -0500
41370@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
41371 const int read = bio_data_dir(bio) == READ;
41372 struct bio_map_data *bmd = bio->bi_private;
41373 int i;
41374- char *p = bmd->sgvecs[0].iov_base;
41375+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
41376
41377 __bio_for_each_segment(bvec, bio, i, 0) {
41378 char *addr = page_address(bvec->bv_page);
41379diff -urNp linux-3.1.4/fs/block_dev.c linux-3.1.4/fs/block_dev.c
41380--- linux-3.1.4/fs/block_dev.c 2011-11-11 15:19:27.000000000 -0500
41381+++ linux-3.1.4/fs/block_dev.c 2011-11-16 18:39:08.000000000 -0500
41382@@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_de
41383 else if (bdev->bd_contains == bdev)
41384 return true; /* is a whole device which isn't held */
41385
41386- else if (whole->bd_holder == bd_may_claim)
41387+ else if (whole->bd_holder == (void *)bd_may_claim)
41388 return true; /* is a partition of a device that is being partitioned */
41389 else if (whole->bd_holder != NULL)
41390 return false; /* is a partition of a held device */
41391diff -urNp linux-3.1.4/fs/btrfs/ctree.c linux-3.1.4/fs/btrfs/ctree.c
41392--- linux-3.1.4/fs/btrfs/ctree.c 2011-11-11 15:19:27.000000000 -0500
41393+++ linux-3.1.4/fs/btrfs/ctree.c 2011-11-16 18:39:08.000000000 -0500
41394@@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(st
41395 free_extent_buffer(buf);
41396 add_root_to_dirty_list(root);
41397 } else {
41398- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
41399- parent_start = parent->start;
41400- else
41401+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
41402+ if (parent)
41403+ parent_start = parent->start;
41404+ else
41405+ parent_start = 0;
41406+ } else
41407 parent_start = 0;
41408
41409 WARN_ON(trans->transid != btrfs_header_generation(parent));
41410diff -urNp linux-3.1.4/fs/btrfs/inode.c linux-3.1.4/fs/btrfs/inode.c
41411--- linux-3.1.4/fs/btrfs/inode.c 2011-11-11 15:19:27.000000000 -0500
41412+++ linux-3.1.4/fs/btrfs/inode.c 2011-11-17 18:12:11.000000000 -0500
41413@@ -6922,7 +6922,7 @@ fail:
41414 return -ENOMEM;
41415 }
41416
41417-static int btrfs_getattr(struct vfsmount *mnt,
41418+int btrfs_getattr(struct vfsmount *mnt,
41419 struct dentry *dentry, struct kstat *stat)
41420 {
41421 struct inode *inode = dentry->d_inode;
41422@@ -6934,6 +6934,14 @@ static int btrfs_getattr(struct vfsmount
41423 return 0;
41424 }
41425
41426+EXPORT_SYMBOL(btrfs_getattr);
41427+
41428+dev_t get_btrfs_dev_from_inode(struct inode *inode)
41429+{
41430+ return BTRFS_I(inode)->root->anon_dev;
41431+}
41432+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
41433+
41434 /*
41435 * If a file is moved, it will inherit the cow and compression flags of the new
41436 * directory.
41437diff -urNp linux-3.1.4/fs/btrfs/ioctl.c linux-3.1.4/fs/btrfs/ioctl.c
41438--- linux-3.1.4/fs/btrfs/ioctl.c 2011-11-11 15:19:27.000000000 -0500
41439+++ linux-3.1.4/fs/btrfs/ioctl.c 2011-11-16 18:40:29.000000000 -0500
41440@@ -2704,9 +2704,12 @@ long btrfs_ioctl_space_info(struct btrfs
41441 for (i = 0; i < num_types; i++) {
41442 struct btrfs_space_info *tmp;
41443
41444+ /* Don't copy in more than we allocated */
41445 if (!slot_count)
41446 break;
41447
41448+ slot_count--;
41449+
41450 info = NULL;
41451 rcu_read_lock();
41452 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
41453@@ -2728,15 +2731,12 @@ long btrfs_ioctl_space_info(struct btrfs
41454 memcpy(dest, &space, sizeof(space));
41455 dest++;
41456 space_args.total_spaces++;
41457- slot_count--;
41458 }
41459- if (!slot_count)
41460- break;
41461 }
41462 up_read(&info->groups_sem);
41463 }
41464
41465- user_dest = (struct btrfs_ioctl_space_info *)
41466+ user_dest = (struct btrfs_ioctl_space_info __user *)
41467 (arg + sizeof(struct btrfs_ioctl_space_args));
41468
41469 if (copy_to_user(user_dest, dest_orig, alloc_size))
41470diff -urNp linux-3.1.4/fs/btrfs/relocation.c linux-3.1.4/fs/btrfs/relocation.c
41471--- linux-3.1.4/fs/btrfs/relocation.c 2011-11-11 15:19:27.000000000 -0500
41472+++ linux-3.1.4/fs/btrfs/relocation.c 2011-11-16 18:39:08.000000000 -0500
41473@@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct bt
41474 }
41475 spin_unlock(&rc->reloc_root_tree.lock);
41476
41477- BUG_ON((struct btrfs_root *)node->data != root);
41478+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
41479
41480 if (!del) {
41481 spin_lock(&rc->reloc_root_tree.lock);
41482diff -urNp linux-3.1.4/fs/cachefiles/bind.c linux-3.1.4/fs/cachefiles/bind.c
41483--- linux-3.1.4/fs/cachefiles/bind.c 2011-11-11 15:19:27.000000000 -0500
41484+++ linux-3.1.4/fs/cachefiles/bind.c 2011-11-16 18:39:08.000000000 -0500
41485@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
41486 args);
41487
41488 /* start by checking things over */
41489- ASSERT(cache->fstop_percent >= 0 &&
41490- cache->fstop_percent < cache->fcull_percent &&
41491+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
41492 cache->fcull_percent < cache->frun_percent &&
41493 cache->frun_percent < 100);
41494
41495- ASSERT(cache->bstop_percent >= 0 &&
41496- cache->bstop_percent < cache->bcull_percent &&
41497+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
41498 cache->bcull_percent < cache->brun_percent &&
41499 cache->brun_percent < 100);
41500
41501diff -urNp linux-3.1.4/fs/cachefiles/daemon.c linux-3.1.4/fs/cachefiles/daemon.c
41502--- linux-3.1.4/fs/cachefiles/daemon.c 2011-11-11 15:19:27.000000000 -0500
41503+++ linux-3.1.4/fs/cachefiles/daemon.c 2011-11-16 18:39:08.000000000 -0500
41504@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
41505 if (n > buflen)
41506 return -EMSGSIZE;
41507
41508- if (copy_to_user(_buffer, buffer, n) != 0)
41509+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
41510 return -EFAULT;
41511
41512 return n;
41513@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
41514 if (test_bit(CACHEFILES_DEAD, &cache->flags))
41515 return -EIO;
41516
41517- if (datalen < 0 || datalen > PAGE_SIZE - 1)
41518+ if (datalen > PAGE_SIZE - 1)
41519 return -EOPNOTSUPP;
41520
41521 /* drag the command string into the kernel so we can parse it */
41522@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
41523 if (args[0] != '%' || args[1] != '\0')
41524 return -EINVAL;
41525
41526- if (fstop < 0 || fstop >= cache->fcull_percent)
41527+ if (fstop >= cache->fcull_percent)
41528 return cachefiles_daemon_range_error(cache, args);
41529
41530 cache->fstop_percent = fstop;
41531@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
41532 if (args[0] != '%' || args[1] != '\0')
41533 return -EINVAL;
41534
41535- if (bstop < 0 || bstop >= cache->bcull_percent)
41536+ if (bstop >= cache->bcull_percent)
41537 return cachefiles_daemon_range_error(cache, args);
41538
41539 cache->bstop_percent = bstop;
41540diff -urNp linux-3.1.4/fs/cachefiles/internal.h linux-3.1.4/fs/cachefiles/internal.h
41541--- linux-3.1.4/fs/cachefiles/internal.h 2011-11-11 15:19:27.000000000 -0500
41542+++ linux-3.1.4/fs/cachefiles/internal.h 2011-11-16 18:39:08.000000000 -0500
41543@@ -57,7 +57,7 @@ struct cachefiles_cache {
41544 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
41545 struct rb_root active_nodes; /* active nodes (can't be culled) */
41546 rwlock_t active_lock; /* lock for active_nodes */
41547- atomic_t gravecounter; /* graveyard uniquifier */
41548+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
41549 unsigned frun_percent; /* when to stop culling (% files) */
41550 unsigned fcull_percent; /* when to start culling (% files) */
41551 unsigned fstop_percent; /* when to stop allocating (% files) */
41552@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
41553 * proc.c
41554 */
41555 #ifdef CONFIG_CACHEFILES_HISTOGRAM
41556-extern atomic_t cachefiles_lookup_histogram[HZ];
41557-extern atomic_t cachefiles_mkdir_histogram[HZ];
41558-extern atomic_t cachefiles_create_histogram[HZ];
41559+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
41560+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
41561+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
41562
41563 extern int __init cachefiles_proc_init(void);
41564 extern void cachefiles_proc_cleanup(void);
41565 static inline
41566-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
41567+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
41568 {
41569 unsigned long jif = jiffies - start_jif;
41570 if (jif >= HZ)
41571 jif = HZ - 1;
41572- atomic_inc(&histogram[jif]);
41573+ atomic_inc_unchecked(&histogram[jif]);
41574 }
41575
41576 #else
41577diff -urNp linux-3.1.4/fs/cachefiles/namei.c linux-3.1.4/fs/cachefiles/namei.c
41578--- linux-3.1.4/fs/cachefiles/namei.c 2011-11-11 15:19:27.000000000 -0500
41579+++ linux-3.1.4/fs/cachefiles/namei.c 2011-11-16 18:39:08.000000000 -0500
41580@@ -318,7 +318,7 @@ try_again:
41581 /* first step is to make up a grave dentry in the graveyard */
41582 sprintf(nbuffer, "%08x%08x",
41583 (uint32_t) get_seconds(),
41584- (uint32_t) atomic_inc_return(&cache->gravecounter));
41585+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
41586
41587 /* do the multiway lock magic */
41588 trap = lock_rename(cache->graveyard, dir);
41589diff -urNp linux-3.1.4/fs/cachefiles/proc.c linux-3.1.4/fs/cachefiles/proc.c
41590--- linux-3.1.4/fs/cachefiles/proc.c 2011-11-11 15:19:27.000000000 -0500
41591+++ linux-3.1.4/fs/cachefiles/proc.c 2011-11-16 18:39:08.000000000 -0500
41592@@ -14,9 +14,9 @@
41593 #include <linux/seq_file.h>
41594 #include "internal.h"
41595
41596-atomic_t cachefiles_lookup_histogram[HZ];
41597-atomic_t cachefiles_mkdir_histogram[HZ];
41598-atomic_t cachefiles_create_histogram[HZ];
41599+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
41600+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
41601+atomic_unchecked_t cachefiles_create_histogram[HZ];
41602
41603 /*
41604 * display the latency histogram
41605@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
41606 return 0;
41607 default:
41608 index = (unsigned long) v - 3;
41609- x = atomic_read(&cachefiles_lookup_histogram[index]);
41610- y = atomic_read(&cachefiles_mkdir_histogram[index]);
41611- z = atomic_read(&cachefiles_create_histogram[index]);
41612+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
41613+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
41614+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
41615 if (x == 0 && y == 0 && z == 0)
41616 return 0;
41617
41618diff -urNp linux-3.1.4/fs/cachefiles/rdwr.c linux-3.1.4/fs/cachefiles/rdwr.c
41619--- linux-3.1.4/fs/cachefiles/rdwr.c 2011-11-11 15:19:27.000000000 -0500
41620+++ linux-3.1.4/fs/cachefiles/rdwr.c 2011-11-16 18:39:08.000000000 -0500
41621@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
41622 old_fs = get_fs();
41623 set_fs(KERNEL_DS);
41624 ret = file->f_op->write(
41625- file, (const void __user *) data, len, &pos);
41626+ file, (const void __force_user *) data, len, &pos);
41627 set_fs(old_fs);
41628 kunmap(page);
41629 if (ret != len)
41630diff -urNp linux-3.1.4/fs/ceph/dir.c linux-3.1.4/fs/ceph/dir.c
41631--- linux-3.1.4/fs/ceph/dir.c 2011-11-11 15:19:27.000000000 -0500
41632+++ linux-3.1.4/fs/ceph/dir.c 2011-11-16 18:39:08.000000000 -0500
41633@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *fil
41634 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
41635 struct ceph_mds_client *mdsc = fsc->mdsc;
41636 unsigned frag = fpos_frag(filp->f_pos);
41637- int off = fpos_off(filp->f_pos);
41638+ unsigned int off = fpos_off(filp->f_pos);
41639 int err;
41640 u32 ftype;
41641 struct ceph_mds_reply_info_parsed *rinfo;
41642diff -urNp linux-3.1.4/fs/cifs/cifs_debug.c linux-3.1.4/fs/cifs/cifs_debug.c
41643--- linux-3.1.4/fs/cifs/cifs_debug.c 2011-11-11 15:19:27.000000000 -0500
41644+++ linux-3.1.4/fs/cifs/cifs_debug.c 2011-11-16 18:39:08.000000000 -0500
41645@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(str
41646
41647 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
41648 #ifdef CONFIG_CIFS_STATS2
41649- atomic_set(&totBufAllocCount, 0);
41650- atomic_set(&totSmBufAllocCount, 0);
41651+ atomic_set_unchecked(&totBufAllocCount, 0);
41652+ atomic_set_unchecked(&totSmBufAllocCount, 0);
41653 #endif /* CONFIG_CIFS_STATS2 */
41654 spin_lock(&cifs_tcp_ses_lock);
41655 list_for_each(tmp1, &cifs_tcp_ses_list) {
41656@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
41657 tcon = list_entry(tmp3,
41658 struct cifs_tcon,
41659 tcon_list);
41660- atomic_set(&tcon->num_smbs_sent, 0);
41661- atomic_set(&tcon->num_writes, 0);
41662- atomic_set(&tcon->num_reads, 0);
41663- atomic_set(&tcon->num_oplock_brks, 0);
41664- atomic_set(&tcon->num_opens, 0);
41665- atomic_set(&tcon->num_posixopens, 0);
41666- atomic_set(&tcon->num_posixmkdirs, 0);
41667- atomic_set(&tcon->num_closes, 0);
41668- atomic_set(&tcon->num_deletes, 0);
41669- atomic_set(&tcon->num_mkdirs, 0);
41670- atomic_set(&tcon->num_rmdirs, 0);
41671- atomic_set(&tcon->num_renames, 0);
41672- atomic_set(&tcon->num_t2renames, 0);
41673- atomic_set(&tcon->num_ffirst, 0);
41674- atomic_set(&tcon->num_fnext, 0);
41675- atomic_set(&tcon->num_fclose, 0);
41676- atomic_set(&tcon->num_hardlinks, 0);
41677- atomic_set(&tcon->num_symlinks, 0);
41678- atomic_set(&tcon->num_locks, 0);
41679+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
41680+ atomic_set_unchecked(&tcon->num_writes, 0);
41681+ atomic_set_unchecked(&tcon->num_reads, 0);
41682+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
41683+ atomic_set_unchecked(&tcon->num_opens, 0);
41684+ atomic_set_unchecked(&tcon->num_posixopens, 0);
41685+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
41686+ atomic_set_unchecked(&tcon->num_closes, 0);
41687+ atomic_set_unchecked(&tcon->num_deletes, 0);
41688+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
41689+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
41690+ atomic_set_unchecked(&tcon->num_renames, 0);
41691+ atomic_set_unchecked(&tcon->num_t2renames, 0);
41692+ atomic_set_unchecked(&tcon->num_ffirst, 0);
41693+ atomic_set_unchecked(&tcon->num_fnext, 0);
41694+ atomic_set_unchecked(&tcon->num_fclose, 0);
41695+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
41696+ atomic_set_unchecked(&tcon->num_symlinks, 0);
41697+ atomic_set_unchecked(&tcon->num_locks, 0);
41698 }
41699 }
41700 }
41701@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct s
41702 smBufAllocCount.counter, cifs_min_small);
41703 #ifdef CONFIG_CIFS_STATS2
41704 seq_printf(m, "Total Large %d Small %d Allocations\n",
41705- atomic_read(&totBufAllocCount),
41706- atomic_read(&totSmBufAllocCount));
41707+ atomic_read_unchecked(&totBufAllocCount),
41708+ atomic_read_unchecked(&totSmBufAllocCount));
41709 #endif /* CONFIG_CIFS_STATS2 */
41710
41711 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
41712@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
41713 if (tcon->need_reconnect)
41714 seq_puts(m, "\tDISCONNECTED ");
41715 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
41716- atomic_read(&tcon->num_smbs_sent),
41717- atomic_read(&tcon->num_oplock_brks));
41718+ atomic_read_unchecked(&tcon->num_smbs_sent),
41719+ atomic_read_unchecked(&tcon->num_oplock_brks));
41720 seq_printf(m, "\nReads: %d Bytes: %lld",
41721- atomic_read(&tcon->num_reads),
41722+ atomic_read_unchecked(&tcon->num_reads),
41723 (long long)(tcon->bytes_read));
41724 seq_printf(m, "\nWrites: %d Bytes: %lld",
41725- atomic_read(&tcon->num_writes),
41726+ atomic_read_unchecked(&tcon->num_writes),
41727 (long long)(tcon->bytes_written));
41728 seq_printf(m, "\nFlushes: %d",
41729- atomic_read(&tcon->num_flushes));
41730+ atomic_read_unchecked(&tcon->num_flushes));
41731 seq_printf(m, "\nLocks: %d HardLinks: %d "
41732 "Symlinks: %d",
41733- atomic_read(&tcon->num_locks),
41734- atomic_read(&tcon->num_hardlinks),
41735- atomic_read(&tcon->num_symlinks));
41736+ atomic_read_unchecked(&tcon->num_locks),
41737+ atomic_read_unchecked(&tcon->num_hardlinks),
41738+ atomic_read_unchecked(&tcon->num_symlinks));
41739 seq_printf(m, "\nOpens: %d Closes: %d "
41740 "Deletes: %d",
41741- atomic_read(&tcon->num_opens),
41742- atomic_read(&tcon->num_closes),
41743- atomic_read(&tcon->num_deletes));
41744+ atomic_read_unchecked(&tcon->num_opens),
41745+ atomic_read_unchecked(&tcon->num_closes),
41746+ atomic_read_unchecked(&tcon->num_deletes));
41747 seq_printf(m, "\nPosix Opens: %d "
41748 "Posix Mkdirs: %d",
41749- atomic_read(&tcon->num_posixopens),
41750- atomic_read(&tcon->num_posixmkdirs));
41751+ atomic_read_unchecked(&tcon->num_posixopens),
41752+ atomic_read_unchecked(&tcon->num_posixmkdirs));
41753 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
41754- atomic_read(&tcon->num_mkdirs),
41755- atomic_read(&tcon->num_rmdirs));
41756+ atomic_read_unchecked(&tcon->num_mkdirs),
41757+ atomic_read_unchecked(&tcon->num_rmdirs));
41758 seq_printf(m, "\nRenames: %d T2 Renames %d",
41759- atomic_read(&tcon->num_renames),
41760- atomic_read(&tcon->num_t2renames));
41761+ atomic_read_unchecked(&tcon->num_renames),
41762+ atomic_read_unchecked(&tcon->num_t2renames));
41763 seq_printf(m, "\nFindFirst: %d FNext %d "
41764 "FClose %d",
41765- atomic_read(&tcon->num_ffirst),
41766- atomic_read(&tcon->num_fnext),
41767- atomic_read(&tcon->num_fclose));
41768+ atomic_read_unchecked(&tcon->num_ffirst),
41769+ atomic_read_unchecked(&tcon->num_fnext),
41770+ atomic_read_unchecked(&tcon->num_fclose));
41771 }
41772 }
41773 }
41774diff -urNp linux-3.1.4/fs/cifs/cifsfs.c linux-3.1.4/fs/cifs/cifsfs.c
41775--- linux-3.1.4/fs/cifs/cifsfs.c 2011-11-11 15:19:27.000000000 -0500
41776+++ linux-3.1.4/fs/cifs/cifsfs.c 2011-11-16 18:39:08.000000000 -0500
41777@@ -981,7 +981,7 @@ cifs_init_request_bufs(void)
41778 cifs_req_cachep = kmem_cache_create("cifs_request",
41779 CIFSMaxBufSize +
41780 MAX_CIFS_HDR_SIZE, 0,
41781- SLAB_HWCACHE_ALIGN, NULL);
41782+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
41783 if (cifs_req_cachep == NULL)
41784 return -ENOMEM;
41785
41786@@ -1008,7 +1008,7 @@ cifs_init_request_bufs(void)
41787 efficient to alloc 1 per page off the slab compared to 17K (5page)
41788 alloc of large cifs buffers even when page debugging is on */
41789 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
41790- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
41791+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
41792 NULL);
41793 if (cifs_sm_req_cachep == NULL) {
41794 mempool_destroy(cifs_req_poolp);
41795@@ -1093,8 +1093,8 @@ init_cifs(void)
41796 atomic_set(&bufAllocCount, 0);
41797 atomic_set(&smBufAllocCount, 0);
41798 #ifdef CONFIG_CIFS_STATS2
41799- atomic_set(&totBufAllocCount, 0);
41800- atomic_set(&totSmBufAllocCount, 0);
41801+ atomic_set_unchecked(&totBufAllocCount, 0);
41802+ atomic_set_unchecked(&totSmBufAllocCount, 0);
41803 #endif /* CONFIG_CIFS_STATS2 */
41804
41805 atomic_set(&midCount, 0);
41806diff -urNp linux-3.1.4/fs/cifs/cifsglob.h linux-3.1.4/fs/cifs/cifsglob.h
41807--- linux-3.1.4/fs/cifs/cifsglob.h 2011-11-11 15:19:27.000000000 -0500
41808+++ linux-3.1.4/fs/cifs/cifsglob.h 2011-11-16 18:39:08.000000000 -0500
41809@@ -381,28 +381,28 @@ struct cifs_tcon {
41810 __u16 Flags; /* optional support bits */
41811 enum statusEnum tidStatus;
41812 #ifdef CONFIG_CIFS_STATS
41813- atomic_t num_smbs_sent;
41814- atomic_t num_writes;
41815- atomic_t num_reads;
41816- atomic_t num_flushes;
41817- atomic_t num_oplock_brks;
41818- atomic_t num_opens;
41819- atomic_t num_closes;
41820- atomic_t num_deletes;
41821- atomic_t num_mkdirs;
41822- atomic_t num_posixopens;
41823- atomic_t num_posixmkdirs;
41824- atomic_t num_rmdirs;
41825- atomic_t num_renames;
41826- atomic_t num_t2renames;
41827- atomic_t num_ffirst;
41828- atomic_t num_fnext;
41829- atomic_t num_fclose;
41830- atomic_t num_hardlinks;
41831- atomic_t num_symlinks;
41832- atomic_t num_locks;
41833- atomic_t num_acl_get;
41834- atomic_t num_acl_set;
41835+ atomic_unchecked_t num_smbs_sent;
41836+ atomic_unchecked_t num_writes;
41837+ atomic_unchecked_t num_reads;
41838+ atomic_unchecked_t num_flushes;
41839+ atomic_unchecked_t num_oplock_brks;
41840+ atomic_unchecked_t num_opens;
41841+ atomic_unchecked_t num_closes;
41842+ atomic_unchecked_t num_deletes;
41843+ atomic_unchecked_t num_mkdirs;
41844+ atomic_unchecked_t num_posixopens;
41845+ atomic_unchecked_t num_posixmkdirs;
41846+ atomic_unchecked_t num_rmdirs;
41847+ atomic_unchecked_t num_renames;
41848+ atomic_unchecked_t num_t2renames;
41849+ atomic_unchecked_t num_ffirst;
41850+ atomic_unchecked_t num_fnext;
41851+ atomic_unchecked_t num_fclose;
41852+ atomic_unchecked_t num_hardlinks;
41853+ atomic_unchecked_t num_symlinks;
41854+ atomic_unchecked_t num_locks;
41855+ atomic_unchecked_t num_acl_get;
41856+ atomic_unchecked_t num_acl_set;
41857 #ifdef CONFIG_CIFS_STATS2
41858 unsigned long long time_writes;
41859 unsigned long long time_reads;
41860@@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim
41861 }
41862
41863 #ifdef CONFIG_CIFS_STATS
41864-#define cifs_stats_inc atomic_inc
41865+#define cifs_stats_inc atomic_inc_unchecked
41866
41867 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
41868 unsigned int bytes)
41869@@ -953,8 +953,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
41870 /* Various Debug counters */
41871 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
41872 #ifdef CONFIG_CIFS_STATS2
41873-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
41874-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
41875+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
41876+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
41877 #endif
41878 GLOBAL_EXTERN atomic_t smBufAllocCount;
41879 GLOBAL_EXTERN atomic_t midCount;
41880diff -urNp linux-3.1.4/fs/cifs/link.c linux-3.1.4/fs/cifs/link.c
41881--- linux-3.1.4/fs/cifs/link.c 2011-11-11 15:19:27.000000000 -0500
41882+++ linux-3.1.4/fs/cifs/link.c 2011-11-16 18:39:08.000000000 -0500
41883@@ -593,7 +593,7 @@ symlink_exit:
41884
41885 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
41886 {
41887- char *p = nd_get_link(nd);
41888+ const char *p = nd_get_link(nd);
41889 if (!IS_ERR(p))
41890 kfree(p);
41891 }
41892diff -urNp linux-3.1.4/fs/cifs/misc.c linux-3.1.4/fs/cifs/misc.c
41893--- linux-3.1.4/fs/cifs/misc.c 2011-11-11 15:19:27.000000000 -0500
41894+++ linux-3.1.4/fs/cifs/misc.c 2011-11-16 18:39:08.000000000 -0500
41895@@ -156,7 +156,7 @@ cifs_buf_get(void)
41896 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
41897 atomic_inc(&bufAllocCount);
41898 #ifdef CONFIG_CIFS_STATS2
41899- atomic_inc(&totBufAllocCount);
41900+ atomic_inc_unchecked(&totBufAllocCount);
41901 #endif /* CONFIG_CIFS_STATS2 */
41902 }
41903
41904@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
41905 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
41906 atomic_inc(&smBufAllocCount);
41907 #ifdef CONFIG_CIFS_STATS2
41908- atomic_inc(&totSmBufAllocCount);
41909+ atomic_inc_unchecked(&totSmBufAllocCount);
41910 #endif /* CONFIG_CIFS_STATS2 */
41911
41912 }
41913diff -urNp linux-3.1.4/fs/coda/cache.c linux-3.1.4/fs/coda/cache.c
41914--- linux-3.1.4/fs/coda/cache.c 2011-11-11 15:19:27.000000000 -0500
41915+++ linux-3.1.4/fs/coda/cache.c 2011-11-16 18:39:08.000000000 -0500
41916@@ -24,7 +24,7 @@
41917 #include "coda_linux.h"
41918 #include "coda_cache.h"
41919
41920-static atomic_t permission_epoch = ATOMIC_INIT(0);
41921+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
41922
41923 /* replace or extend an acl cache hit */
41924 void coda_cache_enter(struct inode *inode, int mask)
41925@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
41926 struct coda_inode_info *cii = ITOC(inode);
41927
41928 spin_lock(&cii->c_lock);
41929- cii->c_cached_epoch = atomic_read(&permission_epoch);
41930+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
41931 if (cii->c_uid != current_fsuid()) {
41932 cii->c_uid = current_fsuid();
41933 cii->c_cached_perm = mask;
41934@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
41935 {
41936 struct coda_inode_info *cii = ITOC(inode);
41937 spin_lock(&cii->c_lock);
41938- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
41939+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
41940 spin_unlock(&cii->c_lock);
41941 }
41942
41943 /* remove all acl caches */
41944 void coda_cache_clear_all(struct super_block *sb)
41945 {
41946- atomic_inc(&permission_epoch);
41947+ atomic_inc_unchecked(&permission_epoch);
41948 }
41949
41950
41951@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
41952 spin_lock(&cii->c_lock);
41953 hit = (mask & cii->c_cached_perm) == mask &&
41954 cii->c_uid == current_fsuid() &&
41955- cii->c_cached_epoch == atomic_read(&permission_epoch);
41956+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
41957 spin_unlock(&cii->c_lock);
41958
41959 return hit;
41960diff -urNp linux-3.1.4/fs/compat_binfmt_elf.c linux-3.1.4/fs/compat_binfmt_elf.c
41961--- linux-3.1.4/fs/compat_binfmt_elf.c 2011-11-11 15:19:27.000000000 -0500
41962+++ linux-3.1.4/fs/compat_binfmt_elf.c 2011-11-16 18:39:08.000000000 -0500
41963@@ -30,11 +30,13 @@
41964 #undef elf_phdr
41965 #undef elf_shdr
41966 #undef elf_note
41967+#undef elf_dyn
41968 #undef elf_addr_t
41969 #define elfhdr elf32_hdr
41970 #define elf_phdr elf32_phdr
41971 #define elf_shdr elf32_shdr
41972 #define elf_note elf32_note
41973+#define elf_dyn Elf32_Dyn
41974 #define elf_addr_t Elf32_Addr
41975
41976 /*
41977diff -urNp linux-3.1.4/fs/compat.c linux-3.1.4/fs/compat.c
41978--- linux-3.1.4/fs/compat.c 2011-11-11 15:19:27.000000000 -0500
41979+++ linux-3.1.4/fs/compat.c 2011-11-16 18:40:29.000000000 -0500
41980@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(const
41981 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
41982 {
41983 compat_ino_t ino = stat->ino;
41984- typeof(ubuf->st_uid) uid = 0;
41985- typeof(ubuf->st_gid) gid = 0;
41986+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
41987+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
41988 int err;
41989
41990 SET_UID(uid, stat->uid);
41991@@ -508,7 +508,7 @@ compat_sys_io_setup(unsigned nr_reqs, u3
41992
41993 set_fs(KERNEL_DS);
41994 /* The __user pointer cast is valid because of the set_fs() */
41995- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
41996+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
41997 set_fs(oldfs);
41998 /* truncating is ok because it's a user address */
41999 if (!ret)
42000@@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
42001 goto out;
42002
42003 ret = -EINVAL;
42004- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
42005+ if (nr_segs > UIO_MAXIOV)
42006 goto out;
42007 if (nr_segs > fast_segs) {
42008 ret = -ENOMEM;
42009@@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
42010
42011 struct compat_readdir_callback {
42012 struct compat_old_linux_dirent __user *dirent;
42013+ struct file * file;
42014 int result;
42015 };
42016
42017@@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
42018 buf->result = -EOVERFLOW;
42019 return -EOVERFLOW;
42020 }
42021+
42022+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42023+ return 0;
42024+
42025 buf->result++;
42026 dirent = buf->dirent;
42027 if (!access_ok(VERIFY_WRITE, dirent,
42028@@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
42029
42030 buf.result = 0;
42031 buf.dirent = dirent;
42032+ buf.file = file;
42033
42034 error = vfs_readdir(file, compat_fillonedir, &buf);
42035 if (buf.result)
42036@@ -917,6 +923,7 @@ struct compat_linux_dirent {
42037 struct compat_getdents_callback {
42038 struct compat_linux_dirent __user *current_dir;
42039 struct compat_linux_dirent __user *previous;
42040+ struct file * file;
42041 int count;
42042 int error;
42043 };
42044@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
42045 buf->error = -EOVERFLOW;
42046 return -EOVERFLOW;
42047 }
42048+
42049+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42050+ return 0;
42051+
42052 dirent = buf->previous;
42053 if (dirent) {
42054 if (__put_user(offset, &dirent->d_off))
42055@@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
42056 buf.previous = NULL;
42057 buf.count = count;
42058 buf.error = 0;
42059+ buf.file = file;
42060
42061 error = vfs_readdir(file, compat_filldir, &buf);
42062 if (error >= 0)
42063@@ -1006,6 +1018,7 @@ out:
42064 struct compat_getdents_callback64 {
42065 struct linux_dirent64 __user *current_dir;
42066 struct linux_dirent64 __user *previous;
42067+ struct file * file;
42068 int count;
42069 int error;
42070 };
42071@@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
42072 buf->error = -EINVAL; /* only used if we fail.. */
42073 if (reclen > buf->count)
42074 return -EINVAL;
42075+
42076+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42077+ return 0;
42078+
42079 dirent = buf->previous;
42080
42081 if (dirent) {
42082@@ -1073,13 +1090,14 @@ asmlinkage long compat_sys_getdents64(un
42083 buf.previous = NULL;
42084 buf.count = count;
42085 buf.error = 0;
42086+ buf.file = file;
42087
42088 error = vfs_readdir(file, compat_filldir64, &buf);
42089 if (error >= 0)
42090 error = buf.error;
42091 lastdirent = buf.previous;
42092 if (lastdirent) {
42093- typeof(lastdirent->d_off) d_off = file->f_pos;
42094+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
42095 if (__put_user_unaligned(d_off, &lastdirent->d_off))
42096 error = -EFAULT;
42097 else
42098@@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat
42099 struct fdtable *fdt;
42100 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
42101
42102+ pax_track_stack();
42103+
42104 if (n < 0)
42105 goto out_nofds;
42106
42107diff -urNp linux-3.1.4/fs/compat_ioctl.c linux-3.1.4/fs/compat_ioctl.c
42108--- linux-3.1.4/fs/compat_ioctl.c 2011-11-11 15:19:27.000000000 -0500
42109+++ linux-3.1.4/fs/compat_ioctl.c 2011-11-16 18:39:08.000000000 -0500
42110@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsi
42111
42112 err = get_user(palp, &up->palette);
42113 err |= get_user(length, &up->length);
42114+ if (err)
42115+ return -EFAULT;
42116
42117 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
42118 err = put_user(compat_ptr(palp), &up_native->palette);
42119@@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned
42120 return -EFAULT;
42121 if (__get_user(udata, &ss32->iomem_base))
42122 return -EFAULT;
42123- ss.iomem_base = compat_ptr(udata);
42124+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
42125 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
42126 __get_user(ss.port_high, &ss32->port_high))
42127 return -EFAULT;
42128@@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(stru
42129 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
42130 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
42131 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
42132- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
42133+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
42134 return -EFAULT;
42135
42136 return ioctl_preallocate(file, p);
42137@@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigne
42138 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
42139 {
42140 unsigned int a, b;
42141- a = *(unsigned int *)p;
42142- b = *(unsigned int *)q;
42143+ a = *(const unsigned int *)p;
42144+ b = *(const unsigned int *)q;
42145 if (a > b)
42146 return 1;
42147 if (a < b)
42148diff -urNp linux-3.1.4/fs/configfs/dir.c linux-3.1.4/fs/configfs/dir.c
42149--- linux-3.1.4/fs/configfs/dir.c 2011-11-11 15:19:27.000000000 -0500
42150+++ linux-3.1.4/fs/configfs/dir.c 2011-11-16 18:39:08.000000000 -0500
42151@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
42152 }
42153 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
42154 struct configfs_dirent *next;
42155- const char * name;
42156+ const unsigned char * name;
42157+ char d_name[sizeof(next->s_dentry->d_iname)];
42158 int len;
42159 struct inode *inode = NULL;
42160
42161@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
42162 continue;
42163
42164 name = configfs_get_name(next);
42165- len = strlen(name);
42166+ if (next->s_dentry && name == next->s_dentry->d_iname) {
42167+ len = next->s_dentry->d_name.len;
42168+ memcpy(d_name, name, len);
42169+ name = d_name;
42170+ } else
42171+ len = strlen(name);
42172
42173 /*
42174 * We'll have a dentry and an inode for
42175diff -urNp linux-3.1.4/fs/dcache.c linux-3.1.4/fs/dcache.c
42176--- linux-3.1.4/fs/dcache.c 2011-11-11 15:19:27.000000000 -0500
42177+++ linux-3.1.4/fs/dcache.c 2011-11-16 18:39:08.000000000 -0500
42178@@ -2998,7 +2998,7 @@ void __init vfs_caches_init(unsigned lon
42179 mempages -= reserve;
42180
42181 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
42182- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
42183+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
42184
42185 dcache_init();
42186 inode_init();
42187diff -urNp linux-3.1.4/fs/ecryptfs/inode.c linux-3.1.4/fs/ecryptfs/inode.c
42188--- linux-3.1.4/fs/ecryptfs/inode.c 2011-11-11 15:19:27.000000000 -0500
42189+++ linux-3.1.4/fs/ecryptfs/inode.c 2011-11-16 18:39:08.000000000 -0500
42190@@ -681,7 +681,7 @@ static int ecryptfs_readlink_lower(struc
42191 old_fs = get_fs();
42192 set_fs(get_ds());
42193 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
42194- (char __user *)lower_buf,
42195+ (char __force_user *)lower_buf,
42196 lower_bufsiz);
42197 set_fs(old_fs);
42198 if (rc < 0)
42199@@ -727,7 +727,7 @@ static void *ecryptfs_follow_link(struct
42200 }
42201 old_fs = get_fs();
42202 set_fs(get_ds());
42203- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
42204+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
42205 set_fs(old_fs);
42206 if (rc < 0) {
42207 kfree(buf);
42208@@ -742,7 +742,7 @@ out:
42209 static void
42210 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
42211 {
42212- char *buf = nd_get_link(nd);
42213+ const char *buf = nd_get_link(nd);
42214 if (!IS_ERR(buf)) {
42215 /* Free the char* */
42216 kfree(buf);
42217diff -urNp linux-3.1.4/fs/ecryptfs/miscdev.c linux-3.1.4/fs/ecryptfs/miscdev.c
42218--- linux-3.1.4/fs/ecryptfs/miscdev.c 2011-11-11 15:19:27.000000000 -0500
42219+++ linux-3.1.4/fs/ecryptfs/miscdev.c 2011-11-16 18:39:08.000000000 -0500
42220@@ -328,7 +328,7 @@ check_list:
42221 goto out_unlock_msg_ctx;
42222 i = 5;
42223 if (msg_ctx->msg) {
42224- if (copy_to_user(&buf[i], packet_length, packet_length_size))
42225+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
42226 goto out_unlock_msg_ctx;
42227 i += packet_length_size;
42228 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
42229diff -urNp linux-3.1.4/fs/ecryptfs/read_write.c linux-3.1.4/fs/ecryptfs/read_write.c
42230--- linux-3.1.4/fs/ecryptfs/read_write.c 2011-11-11 15:19:27.000000000 -0500
42231+++ linux-3.1.4/fs/ecryptfs/read_write.c 2011-11-16 18:39:08.000000000 -0500
42232@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *e
42233 return -EIO;
42234 fs_save = get_fs();
42235 set_fs(get_ds());
42236- rc = vfs_write(lower_file, data, size, &offset);
42237+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
42238 set_fs(fs_save);
42239 mark_inode_dirty_sync(ecryptfs_inode);
42240 return rc;
42241@@ -235,7 +235,7 @@ int ecryptfs_read_lower(char *data, loff
42242 return -EIO;
42243 fs_save = get_fs();
42244 set_fs(get_ds());
42245- rc = vfs_read(lower_file, data, size, &offset);
42246+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
42247 set_fs(fs_save);
42248 return rc;
42249 }
42250diff -urNp linux-3.1.4/fs/exec.c linux-3.1.4/fs/exec.c
42251--- linux-3.1.4/fs/exec.c 2011-11-11 15:19:27.000000000 -0500
42252+++ linux-3.1.4/fs/exec.c 2011-11-17 18:40:47.000000000 -0500
42253@@ -55,12 +55,24 @@
42254 #include <linux/pipe_fs_i.h>
42255 #include <linux/oom.h>
42256 #include <linux/compat.h>
42257+#include <linux/random.h>
42258+#include <linux/seq_file.h>
42259+
42260+#ifdef CONFIG_PAX_REFCOUNT
42261+#include <linux/kallsyms.h>
42262+#include <linux/kdebug.h>
42263+#endif
42264
42265 #include <asm/uaccess.h>
42266 #include <asm/mmu_context.h>
42267 #include <asm/tlb.h>
42268 #include "internal.h"
42269
42270+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
42271+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
42272+EXPORT_SYMBOL(pax_set_initial_flags_func);
42273+#endif
42274+
42275 int core_uses_pid;
42276 char core_pattern[CORENAME_MAX_SIZE] = "core";
42277 unsigned int core_pipe_limit;
42278@@ -70,7 +82,7 @@ struct core_name {
42279 char *corename;
42280 int used, size;
42281 };
42282-static atomic_t call_count = ATOMIC_INIT(1);
42283+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
42284
42285 /* The maximal length of core_pattern is also specified in sysctl.c */
42286
42287@@ -188,18 +200,10 @@ static struct page *get_arg_page(struct
42288 int write)
42289 {
42290 struct page *page;
42291- int ret;
42292
42293-#ifdef CONFIG_STACK_GROWSUP
42294- if (write) {
42295- ret = expand_downwards(bprm->vma, pos);
42296- if (ret < 0)
42297- return NULL;
42298- }
42299-#endif
42300- ret = get_user_pages(current, bprm->mm, pos,
42301- 1, write, 1, &page, NULL);
42302- if (ret <= 0)
42303+ if (0 > expand_downwards(bprm->vma, pos))
42304+ return NULL;
42305+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
42306 return NULL;
42307
42308 if (write) {
42309@@ -274,6 +278,11 @@ static int __bprm_mm_init(struct linux_b
42310 vma->vm_end = STACK_TOP_MAX;
42311 vma->vm_start = vma->vm_end - PAGE_SIZE;
42312 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
42313+
42314+#ifdef CONFIG_PAX_SEGMEXEC
42315+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
42316+#endif
42317+
42318 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
42319 INIT_LIST_HEAD(&vma->anon_vma_chain);
42320
42321@@ -288,6 +297,12 @@ static int __bprm_mm_init(struct linux_b
42322 mm->stack_vm = mm->total_vm = 1;
42323 up_write(&mm->mmap_sem);
42324 bprm->p = vma->vm_end - sizeof(void *);
42325+
42326+#ifdef CONFIG_PAX_RANDUSTACK
42327+ if (randomize_va_space)
42328+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
42329+#endif
42330+
42331 return 0;
42332 err:
42333 up_write(&mm->mmap_sem);
42334@@ -396,19 +411,7 @@ err:
42335 return err;
42336 }
42337
42338-struct user_arg_ptr {
42339-#ifdef CONFIG_COMPAT
42340- bool is_compat;
42341-#endif
42342- union {
42343- const char __user *const __user *native;
42344-#ifdef CONFIG_COMPAT
42345- compat_uptr_t __user *compat;
42346-#endif
42347- } ptr;
42348-};
42349-
42350-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
42351+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
42352 {
42353 const char __user *native;
42354
42355@@ -417,14 +420,14 @@ static const char __user *get_user_arg_p
42356 compat_uptr_t compat;
42357
42358 if (get_user(compat, argv.ptr.compat + nr))
42359- return ERR_PTR(-EFAULT);
42360+ return (const char __force_user *)ERR_PTR(-EFAULT);
42361
42362 return compat_ptr(compat);
42363 }
42364 #endif
42365
42366 if (get_user(native, argv.ptr.native + nr))
42367- return ERR_PTR(-EFAULT);
42368+ return (const char __force_user *)ERR_PTR(-EFAULT);
42369
42370 return native;
42371 }
42372@@ -443,7 +446,7 @@ static int count(struct user_arg_ptr arg
42373 if (!p)
42374 break;
42375
42376- if (IS_ERR(p))
42377+ if (IS_ERR((const char __force_kernel *)p))
42378 return -EFAULT;
42379
42380 if (i++ >= max)
42381@@ -477,7 +480,7 @@ static int copy_strings(int argc, struct
42382
42383 ret = -EFAULT;
42384 str = get_user_arg_ptr(argv, argc);
42385- if (IS_ERR(str))
42386+ if (IS_ERR((const char __force_kernel *)str))
42387 goto out;
42388
42389 len = strnlen_user(str, MAX_ARG_STRLEN);
42390@@ -559,7 +562,7 @@ int copy_strings_kernel(int argc, const
42391 int r;
42392 mm_segment_t oldfs = get_fs();
42393 struct user_arg_ptr argv = {
42394- .ptr.native = (const char __user *const __user *)__argv,
42395+ .ptr.native = (const char __force_user *const __force_user *)__argv,
42396 };
42397
42398 set_fs(KERNEL_DS);
42399@@ -594,7 +597,8 @@ static int shift_arg_pages(struct vm_are
42400 unsigned long new_end = old_end - shift;
42401 struct mmu_gather tlb;
42402
42403- BUG_ON(new_start > new_end);
42404+ if (new_start >= new_end || new_start < mmap_min_addr)
42405+ return -ENOMEM;
42406
42407 /*
42408 * ensure there are no vmas between where we want to go
42409@@ -603,6 +607,10 @@ static int shift_arg_pages(struct vm_are
42410 if (vma != find_vma(mm, new_start))
42411 return -EFAULT;
42412
42413+#ifdef CONFIG_PAX_SEGMEXEC
42414+ BUG_ON(pax_find_mirror_vma(vma));
42415+#endif
42416+
42417 /*
42418 * cover the whole range: [new_start, old_end)
42419 */
42420@@ -683,10 +691,6 @@ int setup_arg_pages(struct linux_binprm
42421 stack_top = arch_align_stack(stack_top);
42422 stack_top = PAGE_ALIGN(stack_top);
42423
42424- if (unlikely(stack_top < mmap_min_addr) ||
42425- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
42426- return -ENOMEM;
42427-
42428 stack_shift = vma->vm_end - stack_top;
42429
42430 bprm->p -= stack_shift;
42431@@ -698,8 +702,28 @@ int setup_arg_pages(struct linux_binprm
42432 bprm->exec -= stack_shift;
42433
42434 down_write(&mm->mmap_sem);
42435+
42436+ /* Move stack pages down in memory. */
42437+ if (stack_shift) {
42438+ ret = shift_arg_pages(vma, stack_shift);
42439+ if (ret)
42440+ goto out_unlock;
42441+ }
42442+
42443 vm_flags = VM_STACK_FLAGS;
42444
42445+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42446+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42447+ vm_flags &= ~VM_EXEC;
42448+
42449+#ifdef CONFIG_PAX_MPROTECT
42450+ if (mm->pax_flags & MF_PAX_MPROTECT)
42451+ vm_flags &= ~VM_MAYEXEC;
42452+#endif
42453+
42454+ }
42455+#endif
42456+
42457 /*
42458 * Adjust stack execute permissions; explicitly enable for
42459 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
42460@@ -718,13 +742,6 @@ int setup_arg_pages(struct linux_binprm
42461 goto out_unlock;
42462 BUG_ON(prev != vma);
42463
42464- /* Move stack pages down in memory. */
42465- if (stack_shift) {
42466- ret = shift_arg_pages(vma, stack_shift);
42467- if (ret)
42468- goto out_unlock;
42469- }
42470-
42471 /* mprotect_fixup is overkill to remove the temporary stack flags */
42472 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
42473
42474@@ -805,7 +822,7 @@ int kernel_read(struct file *file, loff_
42475 old_fs = get_fs();
42476 set_fs(get_ds());
42477 /* The cast to a user pointer is valid due to the set_fs() */
42478- result = vfs_read(file, (void __user *)addr, count, &pos);
42479+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
42480 set_fs(old_fs);
42481 return result;
42482 }
42483@@ -1251,7 +1268,7 @@ int check_unsafe_exec(struct linux_binpr
42484 }
42485 rcu_read_unlock();
42486
42487- if (p->fs->users > n_fs) {
42488+ if (atomic_read(&p->fs->users) > n_fs) {
42489 bprm->unsafe |= LSM_UNSAFE_SHARE;
42490 } else {
42491 res = -EAGAIN;
42492@@ -1454,6 +1471,11 @@ static int do_execve_common(const char *
42493 struct user_arg_ptr envp,
42494 struct pt_regs *regs)
42495 {
42496+#ifdef CONFIG_GRKERNSEC
42497+ struct file *old_exec_file;
42498+ struct acl_subject_label *old_acl;
42499+ struct rlimit old_rlim[RLIM_NLIMITS];
42500+#endif
42501 struct linux_binprm *bprm;
42502 struct file *file;
42503 struct files_struct *displaced;
42504@@ -1461,6 +1483,8 @@ static int do_execve_common(const char *
42505 int retval;
42506 const struct cred *cred = current_cred();
42507
42508+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
42509+
42510 /*
42511 * We move the actual failure in case of RLIMIT_NPROC excess from
42512 * set*uid() to execve() because too many poorly written programs
42513@@ -1507,6 +1531,16 @@ static int do_execve_common(const char *
42514 bprm->filename = filename;
42515 bprm->interp = filename;
42516
42517+ if (gr_process_user_ban()) {
42518+ retval = -EPERM;
42519+ goto out_file;
42520+ }
42521+
42522+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
42523+ retval = -EACCES;
42524+ goto out_file;
42525+ }
42526+
42527 retval = bprm_mm_init(bprm);
42528 if (retval)
42529 goto out_file;
42530@@ -1536,9 +1570,40 @@ static int do_execve_common(const char *
42531 if (retval < 0)
42532 goto out;
42533
42534+ if (!gr_tpe_allow(file)) {
42535+ retval = -EACCES;
42536+ goto out;
42537+ }
42538+
42539+ if (gr_check_crash_exec(file)) {
42540+ retval = -EACCES;
42541+ goto out;
42542+ }
42543+
42544+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
42545+
42546+ gr_handle_exec_args(bprm, argv);
42547+
42548+#ifdef CONFIG_GRKERNSEC
42549+ old_acl = current->acl;
42550+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
42551+ old_exec_file = current->exec_file;
42552+ get_file(file);
42553+ current->exec_file = file;
42554+#endif
42555+
42556+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
42557+ bprm->unsafe & LSM_UNSAFE_SHARE);
42558+ if (retval < 0)
42559+ goto out_fail;
42560+
42561 retval = search_binary_handler(bprm,regs);
42562 if (retval < 0)
42563- goto out;
42564+ goto out_fail;
42565+#ifdef CONFIG_GRKERNSEC
42566+ if (old_exec_file)
42567+ fput(old_exec_file);
42568+#endif
42569
42570 /* execve succeeded */
42571 current->fs->in_exec = 0;
42572@@ -1549,6 +1614,14 @@ static int do_execve_common(const char *
42573 put_files_struct(displaced);
42574 return retval;
42575
42576+out_fail:
42577+#ifdef CONFIG_GRKERNSEC
42578+ current->acl = old_acl;
42579+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
42580+ fput(current->exec_file);
42581+ current->exec_file = old_exec_file;
42582+#endif
42583+
42584 out:
42585 if (bprm->mm) {
42586 acct_arg_size(bprm, 0);
42587@@ -1622,7 +1695,7 @@ static int expand_corename(struct core_n
42588 {
42589 char *old_corename = cn->corename;
42590
42591- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
42592+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
42593 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
42594
42595 if (!cn->corename) {
42596@@ -1719,7 +1792,7 @@ static int format_corename(struct core_n
42597 int pid_in_pattern = 0;
42598 int err = 0;
42599
42600- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
42601+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
42602 cn->corename = kmalloc(cn->size, GFP_KERNEL);
42603 cn->used = 0;
42604
42605@@ -1816,6 +1889,218 @@ out:
42606 return ispipe;
42607 }
42608
42609+int pax_check_flags(unsigned long *flags)
42610+{
42611+ int retval = 0;
42612+
42613+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
42614+ if (*flags & MF_PAX_SEGMEXEC)
42615+ {
42616+ *flags &= ~MF_PAX_SEGMEXEC;
42617+ retval = -EINVAL;
42618+ }
42619+#endif
42620+
42621+ if ((*flags & MF_PAX_PAGEEXEC)
42622+
42623+#ifdef CONFIG_PAX_PAGEEXEC
42624+ && (*flags & MF_PAX_SEGMEXEC)
42625+#endif
42626+
42627+ )
42628+ {
42629+ *flags &= ~MF_PAX_PAGEEXEC;
42630+ retval = -EINVAL;
42631+ }
42632+
42633+ if ((*flags & MF_PAX_MPROTECT)
42634+
42635+#ifdef CONFIG_PAX_MPROTECT
42636+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
42637+#endif
42638+
42639+ )
42640+ {
42641+ *flags &= ~MF_PAX_MPROTECT;
42642+ retval = -EINVAL;
42643+ }
42644+
42645+ if ((*flags & MF_PAX_EMUTRAMP)
42646+
42647+#ifdef CONFIG_PAX_EMUTRAMP
42648+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
42649+#endif
42650+
42651+ )
42652+ {
42653+ *flags &= ~MF_PAX_EMUTRAMP;
42654+ retval = -EINVAL;
42655+ }
42656+
42657+ return retval;
42658+}
42659+
42660+EXPORT_SYMBOL(pax_check_flags);
42661+
42662+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42663+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
42664+{
42665+ struct task_struct *tsk = current;
42666+ struct mm_struct *mm = current->mm;
42667+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
42668+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
42669+ char *path_exec = NULL;
42670+ char *path_fault = NULL;
42671+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
42672+
42673+ if (buffer_exec && buffer_fault) {
42674+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
42675+
42676+ down_read(&mm->mmap_sem);
42677+ vma = mm->mmap;
42678+ while (vma && (!vma_exec || !vma_fault)) {
42679+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
42680+ vma_exec = vma;
42681+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
42682+ vma_fault = vma;
42683+ vma = vma->vm_next;
42684+ }
42685+ if (vma_exec) {
42686+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
42687+ if (IS_ERR(path_exec))
42688+ path_exec = "<path too long>";
42689+ else {
42690+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
42691+ if (path_exec) {
42692+ *path_exec = 0;
42693+ path_exec = buffer_exec;
42694+ } else
42695+ path_exec = "<path too long>";
42696+ }
42697+ }
42698+ if (vma_fault) {
42699+ start = vma_fault->vm_start;
42700+ end = vma_fault->vm_end;
42701+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
42702+ if (vma_fault->vm_file) {
42703+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
42704+ if (IS_ERR(path_fault))
42705+ path_fault = "<path too long>";
42706+ else {
42707+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
42708+ if (path_fault) {
42709+ *path_fault = 0;
42710+ path_fault = buffer_fault;
42711+ } else
42712+ path_fault = "<path too long>";
42713+ }
42714+ } else
42715+ path_fault = "<anonymous mapping>";
42716+ }
42717+ up_read(&mm->mmap_sem);
42718+ }
42719+ if (tsk->signal->curr_ip)
42720+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
42721+ else
42722+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
42723+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
42724+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
42725+ task_uid(tsk), task_euid(tsk), pc, sp);
42726+ free_page((unsigned long)buffer_exec);
42727+ free_page((unsigned long)buffer_fault);
42728+ pax_report_insns(regs, pc, sp);
42729+ do_coredump(SIGKILL, SIGKILL, regs);
42730+}
42731+#endif
42732+
42733+#ifdef CONFIG_PAX_REFCOUNT
42734+void pax_report_refcount_overflow(struct pt_regs *regs)
42735+{
42736+ if (current->signal->curr_ip)
42737+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
42738+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
42739+ else
42740+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
42741+ current->comm, task_pid_nr(current), current_uid(), current_euid());
42742+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
42743+ show_regs(regs);
42744+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
42745+}
42746+#endif
42747+
42748+#ifdef CONFIG_PAX_USERCOPY
42749+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
42750+int object_is_on_stack(const void *obj, unsigned long len)
42751+{
42752+ const void * const stack = task_stack_page(current);
42753+ const void * const stackend = stack + THREAD_SIZE;
42754+
42755+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
42756+ const void *frame = NULL;
42757+ const void *oldframe;
42758+#endif
42759+
42760+ if (obj + len < obj)
42761+ return -1;
42762+
42763+ if (obj + len <= stack || stackend <= obj)
42764+ return 0;
42765+
42766+ if (obj < stack || stackend < obj + len)
42767+ return -1;
42768+
42769+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
42770+ oldframe = __builtin_frame_address(1);
42771+ if (oldframe)
42772+ frame = __builtin_frame_address(2);
42773+ /*
42774+ low ----------------------------------------------> high
42775+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
42776+ ^----------------^
42777+ allow copies only within here
42778+ */
42779+ while (stack <= frame && frame < stackend) {
42780+ /* if obj + len extends past the last frame, this
42781+ check won't pass and the next frame will be 0,
42782+ causing us to bail out and correctly report
42783+ the copy as invalid
42784+ */
42785+ if (obj + len <= frame)
42786+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
42787+ oldframe = frame;
42788+ frame = *(const void * const *)frame;
42789+ }
42790+ return -1;
42791+#else
42792+ return 1;
42793+#endif
42794+}
42795+
42796+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
42797+{
42798+ if (current->signal->curr_ip)
42799+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
42800+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
42801+ else
42802+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
42803+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
42804+ dump_stack();
42805+ gr_handle_kernel_exploit();
42806+ do_group_exit(SIGKILL);
42807+}
42808+#endif
42809+
42810+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
42811+void pax_track_stack(void)
42812+{
42813+ unsigned long sp = (unsigned long)&sp;
42814+ if (sp < current_thread_info()->lowest_stack &&
42815+ sp > (unsigned long)task_stack_page(current))
42816+ current_thread_info()->lowest_stack = sp;
42817+}
42818+EXPORT_SYMBOL(pax_track_stack);
42819+#endif
42820+
42821 static int zap_process(struct task_struct *start, int exit_code)
42822 {
42823 struct task_struct *t;
42824@@ -2027,17 +2312,17 @@ static void wait_for_dump_helpers(struct
42825 pipe = file->f_path.dentry->d_inode->i_pipe;
42826
42827 pipe_lock(pipe);
42828- pipe->readers++;
42829- pipe->writers--;
42830+ atomic_inc(&pipe->readers);
42831+ atomic_dec(&pipe->writers);
42832
42833- while ((pipe->readers > 1) && (!signal_pending(current))) {
42834+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
42835 wake_up_interruptible_sync(&pipe->wait);
42836 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
42837 pipe_wait(pipe);
42838 }
42839
42840- pipe->readers--;
42841- pipe->writers++;
42842+ atomic_dec(&pipe->readers);
42843+ atomic_inc(&pipe->writers);
42844 pipe_unlock(pipe);
42845
42846 }
42847@@ -2098,7 +2383,7 @@ void do_coredump(long signr, int exit_co
42848 int retval = 0;
42849 int flag = 0;
42850 int ispipe;
42851- static atomic_t core_dump_count = ATOMIC_INIT(0);
42852+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
42853 struct coredump_params cprm = {
42854 .signr = signr,
42855 .regs = regs,
42856@@ -2113,6 +2398,9 @@ void do_coredump(long signr, int exit_co
42857
42858 audit_core_dumps(signr);
42859
42860+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
42861+ gr_handle_brute_attach(current, cprm.mm_flags);
42862+
42863 binfmt = mm->binfmt;
42864 if (!binfmt || !binfmt->core_dump)
42865 goto fail;
42866@@ -2180,7 +2468,7 @@ void do_coredump(long signr, int exit_co
42867 }
42868 cprm.limit = RLIM_INFINITY;
42869
42870- dump_count = atomic_inc_return(&core_dump_count);
42871+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
42872 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
42873 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
42874 task_tgid_vnr(current), current->comm);
42875@@ -2207,6 +2495,8 @@ void do_coredump(long signr, int exit_co
42876 } else {
42877 struct inode *inode;
42878
42879+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
42880+
42881 if (cprm.limit < binfmt->min_coredump)
42882 goto fail_unlock;
42883
42884@@ -2250,7 +2540,7 @@ close_fail:
42885 filp_close(cprm.file, NULL);
42886 fail_dropcount:
42887 if (ispipe)
42888- atomic_dec(&core_dump_count);
42889+ atomic_dec_unchecked(&core_dump_count);
42890 fail_unlock:
42891 kfree(cn.corename);
42892 fail_corename:
42893@@ -2269,7 +2559,7 @@ fail:
42894 */
42895 int dump_write(struct file *file, const void *addr, int nr)
42896 {
42897- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
42898+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
42899 }
42900 EXPORT_SYMBOL(dump_write);
42901
42902diff -urNp linux-3.1.4/fs/ext2/balloc.c linux-3.1.4/fs/ext2/balloc.c
42903--- linux-3.1.4/fs/ext2/balloc.c 2011-11-11 15:19:27.000000000 -0500
42904+++ linux-3.1.4/fs/ext2/balloc.c 2011-11-16 18:40:29.000000000 -0500
42905@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
42906
42907 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42908 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42909- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42910+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
42911 sbi->s_resuid != current_fsuid() &&
42912 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42913 return 0;
42914diff -urNp linux-3.1.4/fs/ext3/balloc.c linux-3.1.4/fs/ext3/balloc.c
42915--- linux-3.1.4/fs/ext3/balloc.c 2011-11-11 15:19:27.000000000 -0500
42916+++ linux-3.1.4/fs/ext3/balloc.c 2011-11-16 18:40:29.000000000 -0500
42917@@ -1446,7 +1446,7 @@ static int ext3_has_free_blocks(struct e
42918
42919 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42920 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42921- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42922+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
42923 sbi->s_resuid != current_fsuid() &&
42924 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42925 return 0;
42926diff -urNp linux-3.1.4/fs/ext4/balloc.c linux-3.1.4/fs/ext4/balloc.c
42927--- linux-3.1.4/fs/ext4/balloc.c 2011-11-11 15:19:27.000000000 -0500
42928+++ linux-3.1.4/fs/ext4/balloc.c 2011-11-16 18:40:29.000000000 -0500
42929@@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct e
42930 /* Hm, nope. Are (enough) root reserved blocks available? */
42931 if (sbi->s_resuid == current_fsuid() ||
42932 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
42933- capable(CAP_SYS_RESOURCE) ||
42934- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
42935+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
42936+ capable_nolog(CAP_SYS_RESOURCE)) {
42937
42938 if (free_blocks >= (nblocks + dirty_blocks))
42939 return 1;
42940diff -urNp linux-3.1.4/fs/ext4/ext4.h linux-3.1.4/fs/ext4/ext4.h
42941--- linux-3.1.4/fs/ext4/ext4.h 2011-11-11 15:19:27.000000000 -0500
42942+++ linux-3.1.4/fs/ext4/ext4.h 2011-11-16 18:39:08.000000000 -0500
42943@@ -1180,19 +1180,19 @@ struct ext4_sb_info {
42944 unsigned long s_mb_last_start;
42945
42946 /* stats for buddy allocator */
42947- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
42948- atomic_t s_bal_success; /* we found long enough chunks */
42949- atomic_t s_bal_allocated; /* in blocks */
42950- atomic_t s_bal_ex_scanned; /* total extents scanned */
42951- atomic_t s_bal_goals; /* goal hits */
42952- atomic_t s_bal_breaks; /* too long searches */
42953- atomic_t s_bal_2orders; /* 2^order hits */
42954+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
42955+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
42956+ atomic_unchecked_t s_bal_allocated; /* in blocks */
42957+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
42958+ atomic_unchecked_t s_bal_goals; /* goal hits */
42959+ atomic_unchecked_t s_bal_breaks; /* too long searches */
42960+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
42961 spinlock_t s_bal_lock;
42962 unsigned long s_mb_buddies_generated;
42963 unsigned long long s_mb_generation_time;
42964- atomic_t s_mb_lost_chunks;
42965- atomic_t s_mb_preallocated;
42966- atomic_t s_mb_discarded;
42967+ atomic_unchecked_t s_mb_lost_chunks;
42968+ atomic_unchecked_t s_mb_preallocated;
42969+ atomic_unchecked_t s_mb_discarded;
42970 atomic_t s_lock_busy;
42971
42972 /* locality groups */
42973diff -urNp linux-3.1.4/fs/ext4/file.c linux-3.1.4/fs/ext4/file.c
42974--- linux-3.1.4/fs/ext4/file.c 2011-11-11 15:19:27.000000000 -0500
42975+++ linux-3.1.4/fs/ext4/file.c 2011-11-16 18:40:29.000000000 -0500
42976@@ -181,8 +181,8 @@ static int ext4_file_open(struct inode *
42977 path.dentry = mnt->mnt_root;
42978 cp = d_path(&path, buf, sizeof(buf));
42979 if (!IS_ERR(cp)) {
42980- memcpy(sbi->s_es->s_last_mounted, cp,
42981- sizeof(sbi->s_es->s_last_mounted));
42982+ strlcpy(sbi->s_es->s_last_mounted, cp,
42983+ sizeof(sbi->s_es->s_last_mounted));
42984 ext4_mark_super_dirty(sb);
42985 }
42986 }
42987diff -urNp linux-3.1.4/fs/ext4/ioctl.c linux-3.1.4/fs/ext4/ioctl.c
42988--- linux-3.1.4/fs/ext4/ioctl.c 2011-11-11 15:19:27.000000000 -0500
42989+++ linux-3.1.4/fs/ext4/ioctl.c 2011-11-16 18:39:08.000000000 -0500
42990@@ -348,7 +348,7 @@ mext_out:
42991 if (!blk_queue_discard(q))
42992 return -EOPNOTSUPP;
42993
42994- if (copy_from_user(&range, (struct fstrim_range *)arg,
42995+ if (copy_from_user(&range, (struct fstrim_range __user *)arg,
42996 sizeof(range)))
42997 return -EFAULT;
42998
42999@@ -358,7 +358,7 @@ mext_out:
43000 if (ret < 0)
43001 return ret;
43002
43003- if (copy_to_user((struct fstrim_range *)arg, &range,
43004+ if (copy_to_user((struct fstrim_range __user *)arg, &range,
43005 sizeof(range)))
43006 return -EFAULT;
43007
43008diff -urNp linux-3.1.4/fs/ext4/mballoc.c linux-3.1.4/fs/ext4/mballoc.c
43009--- linux-3.1.4/fs/ext4/mballoc.c 2011-11-11 15:19:27.000000000 -0500
43010+++ linux-3.1.4/fs/ext4/mballoc.c 2011-11-16 18:40:29.000000000 -0500
43011@@ -1795,7 +1795,7 @@ void ext4_mb_simple_scan_group(struct ex
43012 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
43013
43014 if (EXT4_SB(sb)->s_mb_stats)
43015- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
43016+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
43017
43018 break;
43019 }
43020@@ -2089,7 +2089,7 @@ repeat:
43021 ac->ac_status = AC_STATUS_CONTINUE;
43022 ac->ac_flags |= EXT4_MB_HINT_FIRST;
43023 cr = 3;
43024- atomic_inc(&sbi->s_mb_lost_chunks);
43025+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
43026 goto repeat;
43027 }
43028 }
43029@@ -2132,6 +2132,8 @@ static int ext4_mb_seq_groups_show(struc
43030 ext4_grpblk_t counters[16];
43031 } sg;
43032
43033+ pax_track_stack();
43034+
43035 group--;
43036 if (group == 0)
43037 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
43038@@ -2573,25 +2575,25 @@ int ext4_mb_release(struct super_block *
43039 if (sbi->s_mb_stats) {
43040 ext4_msg(sb, KERN_INFO,
43041 "mballoc: %u blocks %u reqs (%u success)",
43042- atomic_read(&sbi->s_bal_allocated),
43043- atomic_read(&sbi->s_bal_reqs),
43044- atomic_read(&sbi->s_bal_success));
43045+ atomic_read_unchecked(&sbi->s_bal_allocated),
43046+ atomic_read_unchecked(&sbi->s_bal_reqs),
43047+ atomic_read_unchecked(&sbi->s_bal_success));
43048 ext4_msg(sb, KERN_INFO,
43049 "mballoc: %u extents scanned, %u goal hits, "
43050 "%u 2^N hits, %u breaks, %u lost",
43051- atomic_read(&sbi->s_bal_ex_scanned),
43052- atomic_read(&sbi->s_bal_goals),
43053- atomic_read(&sbi->s_bal_2orders),
43054- atomic_read(&sbi->s_bal_breaks),
43055- atomic_read(&sbi->s_mb_lost_chunks));
43056+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
43057+ atomic_read_unchecked(&sbi->s_bal_goals),
43058+ atomic_read_unchecked(&sbi->s_bal_2orders),
43059+ atomic_read_unchecked(&sbi->s_bal_breaks),
43060+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
43061 ext4_msg(sb, KERN_INFO,
43062 "mballoc: %lu generated and it took %Lu",
43063 sbi->s_mb_buddies_generated,
43064 sbi->s_mb_generation_time);
43065 ext4_msg(sb, KERN_INFO,
43066 "mballoc: %u preallocated, %u discarded",
43067- atomic_read(&sbi->s_mb_preallocated),
43068- atomic_read(&sbi->s_mb_discarded));
43069+ atomic_read_unchecked(&sbi->s_mb_preallocated),
43070+ atomic_read_unchecked(&sbi->s_mb_discarded));
43071 }
43072
43073 free_percpu(sbi->s_locality_groups);
43074@@ -3070,16 +3072,16 @@ static void ext4_mb_collect_stats(struct
43075 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
43076
43077 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
43078- atomic_inc(&sbi->s_bal_reqs);
43079- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
43080+ atomic_inc_unchecked(&sbi->s_bal_reqs);
43081+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
43082 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
43083- atomic_inc(&sbi->s_bal_success);
43084- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
43085+ atomic_inc_unchecked(&sbi->s_bal_success);
43086+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
43087 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
43088 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
43089- atomic_inc(&sbi->s_bal_goals);
43090+ atomic_inc_unchecked(&sbi->s_bal_goals);
43091 if (ac->ac_found > sbi->s_mb_max_to_scan)
43092- atomic_inc(&sbi->s_bal_breaks);
43093+ atomic_inc_unchecked(&sbi->s_bal_breaks);
43094 }
43095
43096 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
43097@@ -3477,7 +3479,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
43098 trace_ext4_mb_new_inode_pa(ac, pa);
43099
43100 ext4_mb_use_inode_pa(ac, pa);
43101- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
43102+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
43103
43104 ei = EXT4_I(ac->ac_inode);
43105 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
43106@@ -3537,7 +3539,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
43107 trace_ext4_mb_new_group_pa(ac, pa);
43108
43109 ext4_mb_use_group_pa(ac, pa);
43110- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
43111+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
43112
43113 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
43114 lg = ac->ac_lg;
43115@@ -3625,7 +3627,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
43116 * from the bitmap and continue.
43117 */
43118 }
43119- atomic_add(free, &sbi->s_mb_discarded);
43120+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
43121
43122 return err;
43123 }
43124@@ -3643,7 +3645,7 @@ ext4_mb_release_group_pa(struct ext4_bud
43125 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
43126 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
43127 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
43128- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
43129+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
43130 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
43131
43132 return 0;
43133diff -urNp linux-3.1.4/fs/fcntl.c linux-3.1.4/fs/fcntl.c
43134--- linux-3.1.4/fs/fcntl.c 2011-11-11 15:19:27.000000000 -0500
43135+++ linux-3.1.4/fs/fcntl.c 2011-11-16 23:40:25.000000000 -0500
43136@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
43137 if (err)
43138 return err;
43139
43140+ if (gr_handle_chroot_fowner(pid, type))
43141+ return -ENOENT;
43142+ if (gr_check_protected_task_fowner(pid, type))
43143+ return -EACCES;
43144+
43145 f_modown(filp, pid, type, force);
43146 return 0;
43147 }
43148@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
43149
43150 static int f_setown_ex(struct file *filp, unsigned long arg)
43151 {
43152- struct f_owner_ex * __user owner_p = (void * __user)arg;
43153+ struct f_owner_ex __user *owner_p = (void __user *)arg;
43154 struct f_owner_ex owner;
43155 struct pid *pid;
43156 int type;
43157@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp
43158
43159 static int f_getown_ex(struct file *filp, unsigned long arg)
43160 {
43161- struct f_owner_ex * __user owner_p = (void * __user)arg;
43162+ struct f_owner_ex __user *owner_p = (void __user *)arg;
43163 struct f_owner_ex owner;
43164 int ret = 0;
43165
43166@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
43167 switch (cmd) {
43168 case F_DUPFD:
43169 case F_DUPFD_CLOEXEC:
43170+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
43171 if (arg >= rlimit(RLIMIT_NOFILE))
43172 break;
43173 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
43174diff -urNp linux-3.1.4/fs/fifo.c linux-3.1.4/fs/fifo.c
43175--- linux-3.1.4/fs/fifo.c 2011-11-11 15:19:27.000000000 -0500
43176+++ linux-3.1.4/fs/fifo.c 2011-11-16 18:39:08.000000000 -0500
43177@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
43178 */
43179 filp->f_op = &read_pipefifo_fops;
43180 pipe->r_counter++;
43181- if (pipe->readers++ == 0)
43182+ if (atomic_inc_return(&pipe->readers) == 1)
43183 wake_up_partner(inode);
43184
43185- if (!pipe->writers) {
43186+ if (!atomic_read(&pipe->writers)) {
43187 if ((filp->f_flags & O_NONBLOCK)) {
43188 /* suppress POLLHUP until we have
43189 * seen a writer */
43190@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
43191 * errno=ENXIO when there is no process reading the FIFO.
43192 */
43193 ret = -ENXIO;
43194- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
43195+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
43196 goto err;
43197
43198 filp->f_op = &write_pipefifo_fops;
43199 pipe->w_counter++;
43200- if (!pipe->writers++)
43201+ if (atomic_inc_return(&pipe->writers) == 1)
43202 wake_up_partner(inode);
43203
43204- if (!pipe->readers) {
43205+ if (!atomic_read(&pipe->readers)) {
43206 wait_for_partner(inode, &pipe->r_counter);
43207 if (signal_pending(current))
43208 goto err_wr;
43209@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
43210 */
43211 filp->f_op = &rdwr_pipefifo_fops;
43212
43213- pipe->readers++;
43214- pipe->writers++;
43215+ atomic_inc(&pipe->readers);
43216+ atomic_inc(&pipe->writers);
43217 pipe->r_counter++;
43218 pipe->w_counter++;
43219- if (pipe->readers == 1 || pipe->writers == 1)
43220+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
43221 wake_up_partner(inode);
43222 break;
43223
43224@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
43225 return 0;
43226
43227 err_rd:
43228- if (!--pipe->readers)
43229+ if (atomic_dec_and_test(&pipe->readers))
43230 wake_up_interruptible(&pipe->wait);
43231 ret = -ERESTARTSYS;
43232 goto err;
43233
43234 err_wr:
43235- if (!--pipe->writers)
43236+ if (atomic_dec_and_test(&pipe->writers))
43237 wake_up_interruptible(&pipe->wait);
43238 ret = -ERESTARTSYS;
43239 goto err;
43240
43241 err:
43242- if (!pipe->readers && !pipe->writers)
43243+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
43244 free_pipe_info(inode);
43245
43246 err_nocleanup:
43247diff -urNp linux-3.1.4/fs/file.c linux-3.1.4/fs/file.c
43248--- linux-3.1.4/fs/file.c 2011-11-11 15:19:27.000000000 -0500
43249+++ linux-3.1.4/fs/file.c 2011-11-16 18:40:29.000000000 -0500
43250@@ -15,6 +15,7 @@
43251 #include <linux/slab.h>
43252 #include <linux/vmalloc.h>
43253 #include <linux/file.h>
43254+#include <linux/security.h>
43255 #include <linux/fdtable.h>
43256 #include <linux/bitops.h>
43257 #include <linux/interrupt.h>
43258@@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
43259 * N.B. For clone tasks sharing a files structure, this test
43260 * will limit the total number of files that can be opened.
43261 */
43262+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
43263 if (nr >= rlimit(RLIMIT_NOFILE))
43264 return -EMFILE;
43265
43266diff -urNp linux-3.1.4/fs/filesystems.c linux-3.1.4/fs/filesystems.c
43267--- linux-3.1.4/fs/filesystems.c 2011-11-11 15:19:27.000000000 -0500
43268+++ linux-3.1.4/fs/filesystems.c 2011-11-16 18:40:29.000000000 -0500
43269@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
43270 int len = dot ? dot - name : strlen(name);
43271
43272 fs = __get_fs_type(name, len);
43273+
43274+#ifdef CONFIG_GRKERNSEC_MODHARDEN
43275+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
43276+#else
43277 if (!fs && (request_module("%.*s", len, name) == 0))
43278+#endif
43279 fs = __get_fs_type(name, len);
43280
43281 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
43282diff -urNp linux-3.1.4/fs/fscache/cookie.c linux-3.1.4/fs/fscache/cookie.c
43283--- linux-3.1.4/fs/fscache/cookie.c 2011-11-11 15:19:27.000000000 -0500
43284+++ linux-3.1.4/fs/fscache/cookie.c 2011-11-16 18:39:08.000000000 -0500
43285@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
43286 parent ? (char *) parent->def->name : "<no-parent>",
43287 def->name, netfs_data);
43288
43289- fscache_stat(&fscache_n_acquires);
43290+ fscache_stat_unchecked(&fscache_n_acquires);
43291
43292 /* if there's no parent cookie, then we don't create one here either */
43293 if (!parent) {
43294- fscache_stat(&fscache_n_acquires_null);
43295+ fscache_stat_unchecked(&fscache_n_acquires_null);
43296 _leave(" [no parent]");
43297 return NULL;
43298 }
43299@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
43300 /* allocate and initialise a cookie */
43301 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
43302 if (!cookie) {
43303- fscache_stat(&fscache_n_acquires_oom);
43304+ fscache_stat_unchecked(&fscache_n_acquires_oom);
43305 _leave(" [ENOMEM]");
43306 return NULL;
43307 }
43308@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
43309
43310 switch (cookie->def->type) {
43311 case FSCACHE_COOKIE_TYPE_INDEX:
43312- fscache_stat(&fscache_n_cookie_index);
43313+ fscache_stat_unchecked(&fscache_n_cookie_index);
43314 break;
43315 case FSCACHE_COOKIE_TYPE_DATAFILE:
43316- fscache_stat(&fscache_n_cookie_data);
43317+ fscache_stat_unchecked(&fscache_n_cookie_data);
43318 break;
43319 default:
43320- fscache_stat(&fscache_n_cookie_special);
43321+ fscache_stat_unchecked(&fscache_n_cookie_special);
43322 break;
43323 }
43324
43325@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
43326 if (fscache_acquire_non_index_cookie(cookie) < 0) {
43327 atomic_dec(&parent->n_children);
43328 __fscache_cookie_put(cookie);
43329- fscache_stat(&fscache_n_acquires_nobufs);
43330+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
43331 _leave(" = NULL");
43332 return NULL;
43333 }
43334 }
43335
43336- fscache_stat(&fscache_n_acquires_ok);
43337+ fscache_stat_unchecked(&fscache_n_acquires_ok);
43338 _leave(" = %p", cookie);
43339 return cookie;
43340 }
43341@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
43342 cache = fscache_select_cache_for_object(cookie->parent);
43343 if (!cache) {
43344 up_read(&fscache_addremove_sem);
43345- fscache_stat(&fscache_n_acquires_no_cache);
43346+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
43347 _leave(" = -ENOMEDIUM [no cache]");
43348 return -ENOMEDIUM;
43349 }
43350@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
43351 object = cache->ops->alloc_object(cache, cookie);
43352 fscache_stat_d(&fscache_n_cop_alloc_object);
43353 if (IS_ERR(object)) {
43354- fscache_stat(&fscache_n_object_no_alloc);
43355+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
43356 ret = PTR_ERR(object);
43357 goto error;
43358 }
43359
43360- fscache_stat(&fscache_n_object_alloc);
43361+ fscache_stat_unchecked(&fscache_n_object_alloc);
43362
43363 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
43364
43365@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
43366 struct fscache_object *object;
43367 struct hlist_node *_p;
43368
43369- fscache_stat(&fscache_n_updates);
43370+ fscache_stat_unchecked(&fscache_n_updates);
43371
43372 if (!cookie) {
43373- fscache_stat(&fscache_n_updates_null);
43374+ fscache_stat_unchecked(&fscache_n_updates_null);
43375 _leave(" [no cookie]");
43376 return;
43377 }
43378@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
43379 struct fscache_object *object;
43380 unsigned long event;
43381
43382- fscache_stat(&fscache_n_relinquishes);
43383+ fscache_stat_unchecked(&fscache_n_relinquishes);
43384 if (retire)
43385- fscache_stat(&fscache_n_relinquishes_retire);
43386+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
43387
43388 if (!cookie) {
43389- fscache_stat(&fscache_n_relinquishes_null);
43390+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
43391 _leave(" [no cookie]");
43392 return;
43393 }
43394@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
43395
43396 /* wait for the cookie to finish being instantiated (or to fail) */
43397 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
43398- fscache_stat(&fscache_n_relinquishes_waitcrt);
43399+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
43400 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
43401 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
43402 }
43403diff -urNp linux-3.1.4/fs/fscache/internal.h linux-3.1.4/fs/fscache/internal.h
43404--- linux-3.1.4/fs/fscache/internal.h 2011-11-11 15:19:27.000000000 -0500
43405+++ linux-3.1.4/fs/fscache/internal.h 2011-11-16 18:39:08.000000000 -0500
43406@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
43407 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
43408 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
43409
43410-extern atomic_t fscache_n_op_pend;
43411-extern atomic_t fscache_n_op_run;
43412-extern atomic_t fscache_n_op_enqueue;
43413-extern atomic_t fscache_n_op_deferred_release;
43414-extern atomic_t fscache_n_op_release;
43415-extern atomic_t fscache_n_op_gc;
43416-extern atomic_t fscache_n_op_cancelled;
43417-extern atomic_t fscache_n_op_rejected;
43418-
43419-extern atomic_t fscache_n_attr_changed;
43420-extern atomic_t fscache_n_attr_changed_ok;
43421-extern atomic_t fscache_n_attr_changed_nobufs;
43422-extern atomic_t fscache_n_attr_changed_nomem;
43423-extern atomic_t fscache_n_attr_changed_calls;
43424-
43425-extern atomic_t fscache_n_allocs;
43426-extern atomic_t fscache_n_allocs_ok;
43427-extern atomic_t fscache_n_allocs_wait;
43428-extern atomic_t fscache_n_allocs_nobufs;
43429-extern atomic_t fscache_n_allocs_intr;
43430-extern atomic_t fscache_n_allocs_object_dead;
43431-extern atomic_t fscache_n_alloc_ops;
43432-extern atomic_t fscache_n_alloc_op_waits;
43433-
43434-extern atomic_t fscache_n_retrievals;
43435-extern atomic_t fscache_n_retrievals_ok;
43436-extern atomic_t fscache_n_retrievals_wait;
43437-extern atomic_t fscache_n_retrievals_nodata;
43438-extern atomic_t fscache_n_retrievals_nobufs;
43439-extern atomic_t fscache_n_retrievals_intr;
43440-extern atomic_t fscache_n_retrievals_nomem;
43441-extern atomic_t fscache_n_retrievals_object_dead;
43442-extern atomic_t fscache_n_retrieval_ops;
43443-extern atomic_t fscache_n_retrieval_op_waits;
43444-
43445-extern atomic_t fscache_n_stores;
43446-extern atomic_t fscache_n_stores_ok;
43447-extern atomic_t fscache_n_stores_again;
43448-extern atomic_t fscache_n_stores_nobufs;
43449-extern atomic_t fscache_n_stores_oom;
43450-extern atomic_t fscache_n_store_ops;
43451-extern atomic_t fscache_n_store_calls;
43452-extern atomic_t fscache_n_store_pages;
43453-extern atomic_t fscache_n_store_radix_deletes;
43454-extern atomic_t fscache_n_store_pages_over_limit;
43455-
43456-extern atomic_t fscache_n_store_vmscan_not_storing;
43457-extern atomic_t fscache_n_store_vmscan_gone;
43458-extern atomic_t fscache_n_store_vmscan_busy;
43459-extern atomic_t fscache_n_store_vmscan_cancelled;
43460-
43461-extern atomic_t fscache_n_marks;
43462-extern atomic_t fscache_n_uncaches;
43463-
43464-extern atomic_t fscache_n_acquires;
43465-extern atomic_t fscache_n_acquires_null;
43466-extern atomic_t fscache_n_acquires_no_cache;
43467-extern atomic_t fscache_n_acquires_ok;
43468-extern atomic_t fscache_n_acquires_nobufs;
43469-extern atomic_t fscache_n_acquires_oom;
43470-
43471-extern atomic_t fscache_n_updates;
43472-extern atomic_t fscache_n_updates_null;
43473-extern atomic_t fscache_n_updates_run;
43474-
43475-extern atomic_t fscache_n_relinquishes;
43476-extern atomic_t fscache_n_relinquishes_null;
43477-extern atomic_t fscache_n_relinquishes_waitcrt;
43478-extern atomic_t fscache_n_relinquishes_retire;
43479-
43480-extern atomic_t fscache_n_cookie_index;
43481-extern atomic_t fscache_n_cookie_data;
43482-extern atomic_t fscache_n_cookie_special;
43483-
43484-extern atomic_t fscache_n_object_alloc;
43485-extern atomic_t fscache_n_object_no_alloc;
43486-extern atomic_t fscache_n_object_lookups;
43487-extern atomic_t fscache_n_object_lookups_negative;
43488-extern atomic_t fscache_n_object_lookups_positive;
43489-extern atomic_t fscache_n_object_lookups_timed_out;
43490-extern atomic_t fscache_n_object_created;
43491-extern atomic_t fscache_n_object_avail;
43492-extern atomic_t fscache_n_object_dead;
43493-
43494-extern atomic_t fscache_n_checkaux_none;
43495-extern atomic_t fscache_n_checkaux_okay;
43496-extern atomic_t fscache_n_checkaux_update;
43497-extern atomic_t fscache_n_checkaux_obsolete;
43498+extern atomic_unchecked_t fscache_n_op_pend;
43499+extern atomic_unchecked_t fscache_n_op_run;
43500+extern atomic_unchecked_t fscache_n_op_enqueue;
43501+extern atomic_unchecked_t fscache_n_op_deferred_release;
43502+extern atomic_unchecked_t fscache_n_op_release;
43503+extern atomic_unchecked_t fscache_n_op_gc;
43504+extern atomic_unchecked_t fscache_n_op_cancelled;
43505+extern atomic_unchecked_t fscache_n_op_rejected;
43506+
43507+extern atomic_unchecked_t fscache_n_attr_changed;
43508+extern atomic_unchecked_t fscache_n_attr_changed_ok;
43509+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
43510+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
43511+extern atomic_unchecked_t fscache_n_attr_changed_calls;
43512+
43513+extern atomic_unchecked_t fscache_n_allocs;
43514+extern atomic_unchecked_t fscache_n_allocs_ok;
43515+extern atomic_unchecked_t fscache_n_allocs_wait;
43516+extern atomic_unchecked_t fscache_n_allocs_nobufs;
43517+extern atomic_unchecked_t fscache_n_allocs_intr;
43518+extern atomic_unchecked_t fscache_n_allocs_object_dead;
43519+extern atomic_unchecked_t fscache_n_alloc_ops;
43520+extern atomic_unchecked_t fscache_n_alloc_op_waits;
43521+
43522+extern atomic_unchecked_t fscache_n_retrievals;
43523+extern atomic_unchecked_t fscache_n_retrievals_ok;
43524+extern atomic_unchecked_t fscache_n_retrievals_wait;
43525+extern atomic_unchecked_t fscache_n_retrievals_nodata;
43526+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
43527+extern atomic_unchecked_t fscache_n_retrievals_intr;
43528+extern atomic_unchecked_t fscache_n_retrievals_nomem;
43529+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
43530+extern atomic_unchecked_t fscache_n_retrieval_ops;
43531+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
43532+
43533+extern atomic_unchecked_t fscache_n_stores;
43534+extern atomic_unchecked_t fscache_n_stores_ok;
43535+extern atomic_unchecked_t fscache_n_stores_again;
43536+extern atomic_unchecked_t fscache_n_stores_nobufs;
43537+extern atomic_unchecked_t fscache_n_stores_oom;
43538+extern atomic_unchecked_t fscache_n_store_ops;
43539+extern atomic_unchecked_t fscache_n_store_calls;
43540+extern atomic_unchecked_t fscache_n_store_pages;
43541+extern atomic_unchecked_t fscache_n_store_radix_deletes;
43542+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
43543+
43544+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43545+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
43546+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
43547+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43548+
43549+extern atomic_unchecked_t fscache_n_marks;
43550+extern atomic_unchecked_t fscache_n_uncaches;
43551+
43552+extern atomic_unchecked_t fscache_n_acquires;
43553+extern atomic_unchecked_t fscache_n_acquires_null;
43554+extern atomic_unchecked_t fscache_n_acquires_no_cache;
43555+extern atomic_unchecked_t fscache_n_acquires_ok;
43556+extern atomic_unchecked_t fscache_n_acquires_nobufs;
43557+extern atomic_unchecked_t fscache_n_acquires_oom;
43558+
43559+extern atomic_unchecked_t fscache_n_updates;
43560+extern atomic_unchecked_t fscache_n_updates_null;
43561+extern atomic_unchecked_t fscache_n_updates_run;
43562+
43563+extern atomic_unchecked_t fscache_n_relinquishes;
43564+extern atomic_unchecked_t fscache_n_relinquishes_null;
43565+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43566+extern atomic_unchecked_t fscache_n_relinquishes_retire;
43567+
43568+extern atomic_unchecked_t fscache_n_cookie_index;
43569+extern atomic_unchecked_t fscache_n_cookie_data;
43570+extern atomic_unchecked_t fscache_n_cookie_special;
43571+
43572+extern atomic_unchecked_t fscache_n_object_alloc;
43573+extern atomic_unchecked_t fscache_n_object_no_alloc;
43574+extern atomic_unchecked_t fscache_n_object_lookups;
43575+extern atomic_unchecked_t fscache_n_object_lookups_negative;
43576+extern atomic_unchecked_t fscache_n_object_lookups_positive;
43577+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
43578+extern atomic_unchecked_t fscache_n_object_created;
43579+extern atomic_unchecked_t fscache_n_object_avail;
43580+extern atomic_unchecked_t fscache_n_object_dead;
43581+
43582+extern atomic_unchecked_t fscache_n_checkaux_none;
43583+extern atomic_unchecked_t fscache_n_checkaux_okay;
43584+extern atomic_unchecked_t fscache_n_checkaux_update;
43585+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
43586
43587 extern atomic_t fscache_n_cop_alloc_object;
43588 extern atomic_t fscache_n_cop_lookup_object;
43589@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
43590 atomic_inc(stat);
43591 }
43592
43593+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
43594+{
43595+ atomic_inc_unchecked(stat);
43596+}
43597+
43598 static inline void fscache_stat_d(atomic_t *stat)
43599 {
43600 atomic_dec(stat);
43601@@ -267,6 +272,7 @@ extern const struct file_operations fsca
43602
43603 #define __fscache_stat(stat) (NULL)
43604 #define fscache_stat(stat) do {} while (0)
43605+#define fscache_stat_unchecked(stat) do {} while (0)
43606 #define fscache_stat_d(stat) do {} while (0)
43607 #endif
43608
43609diff -urNp linux-3.1.4/fs/fscache/object.c linux-3.1.4/fs/fscache/object.c
43610--- linux-3.1.4/fs/fscache/object.c 2011-11-11 15:19:27.000000000 -0500
43611+++ linux-3.1.4/fs/fscache/object.c 2011-11-16 18:39:08.000000000 -0500
43612@@ -128,7 +128,7 @@ static void fscache_object_state_machine
43613 /* update the object metadata on disk */
43614 case FSCACHE_OBJECT_UPDATING:
43615 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
43616- fscache_stat(&fscache_n_updates_run);
43617+ fscache_stat_unchecked(&fscache_n_updates_run);
43618 fscache_stat(&fscache_n_cop_update_object);
43619 object->cache->ops->update_object(object);
43620 fscache_stat_d(&fscache_n_cop_update_object);
43621@@ -217,7 +217,7 @@ static void fscache_object_state_machine
43622 spin_lock(&object->lock);
43623 object->state = FSCACHE_OBJECT_DEAD;
43624 spin_unlock(&object->lock);
43625- fscache_stat(&fscache_n_object_dead);
43626+ fscache_stat_unchecked(&fscache_n_object_dead);
43627 goto terminal_transit;
43628
43629 /* handle the parent cache of this object being withdrawn from
43630@@ -232,7 +232,7 @@ static void fscache_object_state_machine
43631 spin_lock(&object->lock);
43632 object->state = FSCACHE_OBJECT_DEAD;
43633 spin_unlock(&object->lock);
43634- fscache_stat(&fscache_n_object_dead);
43635+ fscache_stat_unchecked(&fscache_n_object_dead);
43636 goto terminal_transit;
43637
43638 /* complain about the object being woken up once it is
43639@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
43640 parent->cookie->def->name, cookie->def->name,
43641 object->cache->tag->name);
43642
43643- fscache_stat(&fscache_n_object_lookups);
43644+ fscache_stat_unchecked(&fscache_n_object_lookups);
43645 fscache_stat(&fscache_n_cop_lookup_object);
43646 ret = object->cache->ops->lookup_object(object);
43647 fscache_stat_d(&fscache_n_cop_lookup_object);
43648@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
43649 if (ret == -ETIMEDOUT) {
43650 /* probably stuck behind another object, so move this one to
43651 * the back of the queue */
43652- fscache_stat(&fscache_n_object_lookups_timed_out);
43653+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
43654 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
43655 }
43656
43657@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
43658
43659 spin_lock(&object->lock);
43660 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
43661- fscache_stat(&fscache_n_object_lookups_negative);
43662+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
43663
43664 /* transit here to allow write requests to begin stacking up
43665 * and read requests to begin returning ENODATA */
43666@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
43667 * result, in which case there may be data available */
43668 spin_lock(&object->lock);
43669 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
43670- fscache_stat(&fscache_n_object_lookups_positive);
43671+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
43672
43673 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
43674
43675@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
43676 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
43677 } else {
43678 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
43679- fscache_stat(&fscache_n_object_created);
43680+ fscache_stat_unchecked(&fscache_n_object_created);
43681
43682 object->state = FSCACHE_OBJECT_AVAILABLE;
43683 spin_unlock(&object->lock);
43684@@ -602,7 +602,7 @@ static void fscache_object_available(str
43685 fscache_enqueue_dependents(object);
43686
43687 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
43688- fscache_stat(&fscache_n_object_avail);
43689+ fscache_stat_unchecked(&fscache_n_object_avail);
43690
43691 _leave("");
43692 }
43693@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
43694 enum fscache_checkaux result;
43695
43696 if (!object->cookie->def->check_aux) {
43697- fscache_stat(&fscache_n_checkaux_none);
43698+ fscache_stat_unchecked(&fscache_n_checkaux_none);
43699 return FSCACHE_CHECKAUX_OKAY;
43700 }
43701
43702@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
43703 switch (result) {
43704 /* entry okay as is */
43705 case FSCACHE_CHECKAUX_OKAY:
43706- fscache_stat(&fscache_n_checkaux_okay);
43707+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
43708 break;
43709
43710 /* entry requires update */
43711 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
43712- fscache_stat(&fscache_n_checkaux_update);
43713+ fscache_stat_unchecked(&fscache_n_checkaux_update);
43714 break;
43715
43716 /* entry requires deletion */
43717 case FSCACHE_CHECKAUX_OBSOLETE:
43718- fscache_stat(&fscache_n_checkaux_obsolete);
43719+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
43720 break;
43721
43722 default:
43723diff -urNp linux-3.1.4/fs/fscache/operation.c linux-3.1.4/fs/fscache/operation.c
43724--- linux-3.1.4/fs/fscache/operation.c 2011-11-11 15:19:27.000000000 -0500
43725+++ linux-3.1.4/fs/fscache/operation.c 2011-11-16 18:39:08.000000000 -0500
43726@@ -17,7 +17,7 @@
43727 #include <linux/slab.h>
43728 #include "internal.h"
43729
43730-atomic_t fscache_op_debug_id;
43731+atomic_unchecked_t fscache_op_debug_id;
43732 EXPORT_SYMBOL(fscache_op_debug_id);
43733
43734 /**
43735@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fs
43736 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
43737 ASSERTCMP(atomic_read(&op->usage), >, 0);
43738
43739- fscache_stat(&fscache_n_op_enqueue);
43740+ fscache_stat_unchecked(&fscache_n_op_enqueue);
43741 switch (op->flags & FSCACHE_OP_TYPE) {
43742 case FSCACHE_OP_ASYNC:
43743 _debug("queue async");
43744@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscach
43745 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
43746 if (op->processor)
43747 fscache_enqueue_operation(op);
43748- fscache_stat(&fscache_n_op_run);
43749+ fscache_stat_unchecked(&fscache_n_op_run);
43750 }
43751
43752 /*
43753@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct f
43754 if (object->n_ops > 1) {
43755 atomic_inc(&op->usage);
43756 list_add_tail(&op->pend_link, &object->pending_ops);
43757- fscache_stat(&fscache_n_op_pend);
43758+ fscache_stat_unchecked(&fscache_n_op_pend);
43759 } else if (!list_empty(&object->pending_ops)) {
43760 atomic_inc(&op->usage);
43761 list_add_tail(&op->pend_link, &object->pending_ops);
43762- fscache_stat(&fscache_n_op_pend);
43763+ fscache_stat_unchecked(&fscache_n_op_pend);
43764 fscache_start_operations(object);
43765 } else {
43766 ASSERTCMP(object->n_in_progress, ==, 0);
43767@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct f
43768 object->n_exclusive++; /* reads and writes must wait */
43769 atomic_inc(&op->usage);
43770 list_add_tail(&op->pend_link, &object->pending_ops);
43771- fscache_stat(&fscache_n_op_pend);
43772+ fscache_stat_unchecked(&fscache_n_op_pend);
43773 ret = 0;
43774 } else {
43775 /* not allowed to submit ops in any other state */
43776@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_obj
43777 if (object->n_exclusive > 0) {
43778 atomic_inc(&op->usage);
43779 list_add_tail(&op->pend_link, &object->pending_ops);
43780- fscache_stat(&fscache_n_op_pend);
43781+ fscache_stat_unchecked(&fscache_n_op_pend);
43782 } else if (!list_empty(&object->pending_ops)) {
43783 atomic_inc(&op->usage);
43784 list_add_tail(&op->pend_link, &object->pending_ops);
43785- fscache_stat(&fscache_n_op_pend);
43786+ fscache_stat_unchecked(&fscache_n_op_pend);
43787 fscache_start_operations(object);
43788 } else {
43789 ASSERTCMP(object->n_exclusive, ==, 0);
43790@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_obj
43791 object->n_ops++;
43792 atomic_inc(&op->usage);
43793 list_add_tail(&op->pend_link, &object->pending_ops);
43794- fscache_stat(&fscache_n_op_pend);
43795+ fscache_stat_unchecked(&fscache_n_op_pend);
43796 ret = 0;
43797 } else if (object->state == FSCACHE_OBJECT_DYING ||
43798 object->state == FSCACHE_OBJECT_LC_DYING ||
43799 object->state == FSCACHE_OBJECT_WITHDRAWING) {
43800- fscache_stat(&fscache_n_op_rejected);
43801+ fscache_stat_unchecked(&fscache_n_op_rejected);
43802 ret = -ENOBUFS;
43803 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
43804 fscache_report_unexpected_submission(object, op, ostate);
43805@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_ope
43806
43807 ret = -EBUSY;
43808 if (!list_empty(&op->pend_link)) {
43809- fscache_stat(&fscache_n_op_cancelled);
43810+ fscache_stat_unchecked(&fscache_n_op_cancelled);
43811 list_del_init(&op->pend_link);
43812 object->n_ops--;
43813 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
43814@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscach
43815 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
43816 BUG();
43817
43818- fscache_stat(&fscache_n_op_release);
43819+ fscache_stat_unchecked(&fscache_n_op_release);
43820
43821 if (op->release) {
43822 op->release(op);
43823@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscach
43824 * lock, and defer it otherwise */
43825 if (!spin_trylock(&object->lock)) {
43826 _debug("defer put");
43827- fscache_stat(&fscache_n_op_deferred_release);
43828+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
43829
43830 cache = object->cache;
43831 spin_lock(&cache->op_gc_list_lock);
43832@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_st
43833
43834 _debug("GC DEFERRED REL OBJ%x OP%x",
43835 object->debug_id, op->debug_id);
43836- fscache_stat(&fscache_n_op_gc);
43837+ fscache_stat_unchecked(&fscache_n_op_gc);
43838
43839 ASSERTCMP(atomic_read(&op->usage), ==, 0);
43840
43841diff -urNp linux-3.1.4/fs/fscache/page.c linux-3.1.4/fs/fscache/page.c
43842--- linux-3.1.4/fs/fscache/page.c 2011-11-11 15:19:27.000000000 -0500
43843+++ linux-3.1.4/fs/fscache/page.c 2011-11-16 18:39:08.000000000 -0500
43844@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
43845 val = radix_tree_lookup(&cookie->stores, page->index);
43846 if (!val) {
43847 rcu_read_unlock();
43848- fscache_stat(&fscache_n_store_vmscan_not_storing);
43849+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
43850 __fscache_uncache_page(cookie, page);
43851 return true;
43852 }
43853@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
43854 spin_unlock(&cookie->stores_lock);
43855
43856 if (xpage) {
43857- fscache_stat(&fscache_n_store_vmscan_cancelled);
43858- fscache_stat(&fscache_n_store_radix_deletes);
43859+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
43860+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43861 ASSERTCMP(xpage, ==, page);
43862 } else {
43863- fscache_stat(&fscache_n_store_vmscan_gone);
43864+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
43865 }
43866
43867 wake_up_bit(&cookie->flags, 0);
43868@@ -107,7 +107,7 @@ page_busy:
43869 /* we might want to wait here, but that could deadlock the allocator as
43870 * the work threads writing to the cache may all end up sleeping
43871 * on memory allocation */
43872- fscache_stat(&fscache_n_store_vmscan_busy);
43873+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
43874 return false;
43875 }
43876 EXPORT_SYMBOL(__fscache_maybe_release_page);
43877@@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
43878 FSCACHE_COOKIE_STORING_TAG);
43879 if (!radix_tree_tag_get(&cookie->stores, page->index,
43880 FSCACHE_COOKIE_PENDING_TAG)) {
43881- fscache_stat(&fscache_n_store_radix_deletes);
43882+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43883 xpage = radix_tree_delete(&cookie->stores, page->index);
43884 }
43885 spin_unlock(&cookie->stores_lock);
43886@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
43887
43888 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
43889
43890- fscache_stat(&fscache_n_attr_changed_calls);
43891+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
43892
43893 if (fscache_object_is_active(object)) {
43894 fscache_stat(&fscache_n_cop_attr_changed);
43895@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscach
43896
43897 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43898
43899- fscache_stat(&fscache_n_attr_changed);
43900+ fscache_stat_unchecked(&fscache_n_attr_changed);
43901
43902 op = kzalloc(sizeof(*op), GFP_KERNEL);
43903 if (!op) {
43904- fscache_stat(&fscache_n_attr_changed_nomem);
43905+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
43906 _leave(" = -ENOMEM");
43907 return -ENOMEM;
43908 }
43909@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscach
43910 if (fscache_submit_exclusive_op(object, op) < 0)
43911 goto nobufs;
43912 spin_unlock(&cookie->lock);
43913- fscache_stat(&fscache_n_attr_changed_ok);
43914+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
43915 fscache_put_operation(op);
43916 _leave(" = 0");
43917 return 0;
43918@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscach
43919 nobufs:
43920 spin_unlock(&cookie->lock);
43921 kfree(op);
43922- fscache_stat(&fscache_n_attr_changed_nobufs);
43923+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
43924 _leave(" = %d", -ENOBUFS);
43925 return -ENOBUFS;
43926 }
43927@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache
43928 /* allocate a retrieval operation and attempt to submit it */
43929 op = kzalloc(sizeof(*op), GFP_NOIO);
43930 if (!op) {
43931- fscache_stat(&fscache_n_retrievals_nomem);
43932+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43933 return NULL;
43934 }
43935
43936@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_loo
43937 return 0;
43938 }
43939
43940- fscache_stat(&fscache_n_retrievals_wait);
43941+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
43942
43943 jif = jiffies;
43944 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
43945 fscache_wait_bit_interruptible,
43946 TASK_INTERRUPTIBLE) != 0) {
43947- fscache_stat(&fscache_n_retrievals_intr);
43948+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43949 _leave(" = -ERESTARTSYS");
43950 return -ERESTARTSYS;
43951 }
43952@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_loo
43953 */
43954 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43955 struct fscache_retrieval *op,
43956- atomic_t *stat_op_waits,
43957- atomic_t *stat_object_dead)
43958+ atomic_unchecked_t *stat_op_waits,
43959+ atomic_unchecked_t *stat_object_dead)
43960 {
43961 int ret;
43962
43963@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_ac
43964 goto check_if_dead;
43965
43966 _debug(">>> WT");
43967- fscache_stat(stat_op_waits);
43968+ fscache_stat_unchecked(stat_op_waits);
43969 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
43970 fscache_wait_bit_interruptible,
43971 TASK_INTERRUPTIBLE) < 0) {
43972@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_ac
43973
43974 check_if_dead:
43975 if (unlikely(fscache_object_is_dead(object))) {
43976- fscache_stat(stat_object_dead);
43977+ fscache_stat_unchecked(stat_object_dead);
43978 return -ENOBUFS;
43979 }
43980 return 0;
43981@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct
43982
43983 _enter("%p,%p,,,", cookie, page);
43984
43985- fscache_stat(&fscache_n_retrievals);
43986+ fscache_stat_unchecked(&fscache_n_retrievals);
43987
43988 if (hlist_empty(&cookie->backing_objects))
43989 goto nobufs;
43990@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct
43991 goto nobufs_unlock;
43992 spin_unlock(&cookie->lock);
43993
43994- fscache_stat(&fscache_n_retrieval_ops);
43995+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
43996
43997 /* pin the netfs read context in case we need to do the actual netfs
43998 * read because we've encountered a cache read failure */
43999@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct
44000
44001 error:
44002 if (ret == -ENOMEM)
44003- fscache_stat(&fscache_n_retrievals_nomem);
44004+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
44005 else if (ret == -ERESTARTSYS)
44006- fscache_stat(&fscache_n_retrievals_intr);
44007+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
44008 else if (ret == -ENODATA)
44009- fscache_stat(&fscache_n_retrievals_nodata);
44010+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
44011 else if (ret < 0)
44012- fscache_stat(&fscache_n_retrievals_nobufs);
44013+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
44014 else
44015- fscache_stat(&fscache_n_retrievals_ok);
44016+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
44017
44018 fscache_put_retrieval(op);
44019 _leave(" = %d", ret);
44020@@ -429,7 +429,7 @@ nobufs_unlock:
44021 spin_unlock(&cookie->lock);
44022 kfree(op);
44023 nobufs:
44024- fscache_stat(&fscache_n_retrievals_nobufs);
44025+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
44026 _leave(" = -ENOBUFS");
44027 return -ENOBUFS;
44028 }
44029@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct
44030
44031 _enter("%p,,%d,,,", cookie, *nr_pages);
44032
44033- fscache_stat(&fscache_n_retrievals);
44034+ fscache_stat_unchecked(&fscache_n_retrievals);
44035
44036 if (hlist_empty(&cookie->backing_objects))
44037 goto nobufs;
44038@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct
44039 goto nobufs_unlock;
44040 spin_unlock(&cookie->lock);
44041
44042- fscache_stat(&fscache_n_retrieval_ops);
44043+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
44044
44045 /* pin the netfs read context in case we need to do the actual netfs
44046 * read because we've encountered a cache read failure */
44047@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct
44048
44049 error:
44050 if (ret == -ENOMEM)
44051- fscache_stat(&fscache_n_retrievals_nomem);
44052+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
44053 else if (ret == -ERESTARTSYS)
44054- fscache_stat(&fscache_n_retrievals_intr);
44055+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
44056 else if (ret == -ENODATA)
44057- fscache_stat(&fscache_n_retrievals_nodata);
44058+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
44059 else if (ret < 0)
44060- fscache_stat(&fscache_n_retrievals_nobufs);
44061+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
44062 else
44063- fscache_stat(&fscache_n_retrievals_ok);
44064+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
44065
44066 fscache_put_retrieval(op);
44067 _leave(" = %d", ret);
44068@@ -545,7 +545,7 @@ nobufs_unlock:
44069 spin_unlock(&cookie->lock);
44070 kfree(op);
44071 nobufs:
44072- fscache_stat(&fscache_n_retrievals_nobufs);
44073+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
44074 _leave(" = -ENOBUFS");
44075 return -ENOBUFS;
44076 }
44077@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_
44078
44079 _enter("%p,%p,,,", cookie, page);
44080
44081- fscache_stat(&fscache_n_allocs);
44082+ fscache_stat_unchecked(&fscache_n_allocs);
44083
44084 if (hlist_empty(&cookie->backing_objects))
44085 goto nobufs;
44086@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_
44087 goto nobufs_unlock;
44088 spin_unlock(&cookie->lock);
44089
44090- fscache_stat(&fscache_n_alloc_ops);
44091+ fscache_stat_unchecked(&fscache_n_alloc_ops);
44092
44093 ret = fscache_wait_for_retrieval_activation(
44094 object, op,
44095@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_
44096
44097 error:
44098 if (ret == -ERESTARTSYS)
44099- fscache_stat(&fscache_n_allocs_intr);
44100+ fscache_stat_unchecked(&fscache_n_allocs_intr);
44101 else if (ret < 0)
44102- fscache_stat(&fscache_n_allocs_nobufs);
44103+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
44104 else
44105- fscache_stat(&fscache_n_allocs_ok);
44106+ fscache_stat_unchecked(&fscache_n_allocs_ok);
44107
44108 fscache_put_retrieval(op);
44109 _leave(" = %d", ret);
44110@@ -625,7 +625,7 @@ nobufs_unlock:
44111 spin_unlock(&cookie->lock);
44112 kfree(op);
44113 nobufs:
44114- fscache_stat(&fscache_n_allocs_nobufs);
44115+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
44116 _leave(" = -ENOBUFS");
44117 return -ENOBUFS;
44118 }
44119@@ -666,7 +666,7 @@ static void fscache_write_op(struct fsca
44120
44121 spin_lock(&cookie->stores_lock);
44122
44123- fscache_stat(&fscache_n_store_calls);
44124+ fscache_stat_unchecked(&fscache_n_store_calls);
44125
44126 /* find a page to store */
44127 page = NULL;
44128@@ -677,7 +677,7 @@ static void fscache_write_op(struct fsca
44129 page = results[0];
44130 _debug("gang %d [%lx]", n, page->index);
44131 if (page->index > op->store_limit) {
44132- fscache_stat(&fscache_n_store_pages_over_limit);
44133+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
44134 goto superseded;
44135 }
44136
44137@@ -689,7 +689,7 @@ static void fscache_write_op(struct fsca
44138 spin_unlock(&cookie->stores_lock);
44139 spin_unlock(&object->lock);
44140
44141- fscache_stat(&fscache_n_store_pages);
44142+ fscache_stat_unchecked(&fscache_n_store_pages);
44143 fscache_stat(&fscache_n_cop_write_page);
44144 ret = object->cache->ops->write_page(op, page);
44145 fscache_stat_d(&fscache_n_cop_write_page);
44146@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_
44147 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
44148 ASSERT(PageFsCache(page));
44149
44150- fscache_stat(&fscache_n_stores);
44151+ fscache_stat_unchecked(&fscache_n_stores);
44152
44153 op = kzalloc(sizeof(*op), GFP_NOIO);
44154 if (!op)
44155@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_
44156 spin_unlock(&cookie->stores_lock);
44157 spin_unlock(&object->lock);
44158
44159- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
44160+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
44161 op->store_limit = object->store_limit;
44162
44163 if (fscache_submit_op(object, &op->op) < 0)
44164@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_
44165
44166 spin_unlock(&cookie->lock);
44167 radix_tree_preload_end();
44168- fscache_stat(&fscache_n_store_ops);
44169- fscache_stat(&fscache_n_stores_ok);
44170+ fscache_stat_unchecked(&fscache_n_store_ops);
44171+ fscache_stat_unchecked(&fscache_n_stores_ok);
44172
44173 /* the work queue now carries its own ref on the object */
44174 fscache_put_operation(&op->op);
44175@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_
44176 return 0;
44177
44178 already_queued:
44179- fscache_stat(&fscache_n_stores_again);
44180+ fscache_stat_unchecked(&fscache_n_stores_again);
44181 already_pending:
44182 spin_unlock(&cookie->stores_lock);
44183 spin_unlock(&object->lock);
44184 spin_unlock(&cookie->lock);
44185 radix_tree_preload_end();
44186 kfree(op);
44187- fscache_stat(&fscache_n_stores_ok);
44188+ fscache_stat_unchecked(&fscache_n_stores_ok);
44189 _leave(" = 0");
44190 return 0;
44191
44192@@ -851,14 +851,14 @@ nobufs:
44193 spin_unlock(&cookie->lock);
44194 radix_tree_preload_end();
44195 kfree(op);
44196- fscache_stat(&fscache_n_stores_nobufs);
44197+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
44198 _leave(" = -ENOBUFS");
44199 return -ENOBUFS;
44200
44201 nomem_free:
44202 kfree(op);
44203 nomem:
44204- fscache_stat(&fscache_n_stores_oom);
44205+ fscache_stat_unchecked(&fscache_n_stores_oom);
44206 _leave(" = -ENOMEM");
44207 return -ENOMEM;
44208 }
44209@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscac
44210 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
44211 ASSERTCMP(page, !=, NULL);
44212
44213- fscache_stat(&fscache_n_uncaches);
44214+ fscache_stat_unchecked(&fscache_n_uncaches);
44215
44216 /* cache withdrawal may beat us to it */
44217 if (!PageFsCache(page))
44218@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fs
44219 unsigned long loop;
44220
44221 #ifdef CONFIG_FSCACHE_STATS
44222- atomic_add(pagevec->nr, &fscache_n_marks);
44223+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
44224 #endif
44225
44226 for (loop = 0; loop < pagevec->nr; loop++) {
44227diff -urNp linux-3.1.4/fs/fscache/stats.c linux-3.1.4/fs/fscache/stats.c
44228--- linux-3.1.4/fs/fscache/stats.c 2011-11-11 15:19:27.000000000 -0500
44229+++ linux-3.1.4/fs/fscache/stats.c 2011-11-16 18:39:08.000000000 -0500
44230@@ -18,95 +18,95 @@
44231 /*
44232 * operation counters
44233 */
44234-atomic_t fscache_n_op_pend;
44235-atomic_t fscache_n_op_run;
44236-atomic_t fscache_n_op_enqueue;
44237-atomic_t fscache_n_op_requeue;
44238-atomic_t fscache_n_op_deferred_release;
44239-atomic_t fscache_n_op_release;
44240-atomic_t fscache_n_op_gc;
44241-atomic_t fscache_n_op_cancelled;
44242-atomic_t fscache_n_op_rejected;
44243-
44244-atomic_t fscache_n_attr_changed;
44245-atomic_t fscache_n_attr_changed_ok;
44246-atomic_t fscache_n_attr_changed_nobufs;
44247-atomic_t fscache_n_attr_changed_nomem;
44248-atomic_t fscache_n_attr_changed_calls;
44249-
44250-atomic_t fscache_n_allocs;
44251-atomic_t fscache_n_allocs_ok;
44252-atomic_t fscache_n_allocs_wait;
44253-atomic_t fscache_n_allocs_nobufs;
44254-atomic_t fscache_n_allocs_intr;
44255-atomic_t fscache_n_allocs_object_dead;
44256-atomic_t fscache_n_alloc_ops;
44257-atomic_t fscache_n_alloc_op_waits;
44258-
44259-atomic_t fscache_n_retrievals;
44260-atomic_t fscache_n_retrievals_ok;
44261-atomic_t fscache_n_retrievals_wait;
44262-atomic_t fscache_n_retrievals_nodata;
44263-atomic_t fscache_n_retrievals_nobufs;
44264-atomic_t fscache_n_retrievals_intr;
44265-atomic_t fscache_n_retrievals_nomem;
44266-atomic_t fscache_n_retrievals_object_dead;
44267-atomic_t fscache_n_retrieval_ops;
44268-atomic_t fscache_n_retrieval_op_waits;
44269-
44270-atomic_t fscache_n_stores;
44271-atomic_t fscache_n_stores_ok;
44272-atomic_t fscache_n_stores_again;
44273-atomic_t fscache_n_stores_nobufs;
44274-atomic_t fscache_n_stores_oom;
44275-atomic_t fscache_n_store_ops;
44276-atomic_t fscache_n_store_calls;
44277-atomic_t fscache_n_store_pages;
44278-atomic_t fscache_n_store_radix_deletes;
44279-atomic_t fscache_n_store_pages_over_limit;
44280-
44281-atomic_t fscache_n_store_vmscan_not_storing;
44282-atomic_t fscache_n_store_vmscan_gone;
44283-atomic_t fscache_n_store_vmscan_busy;
44284-atomic_t fscache_n_store_vmscan_cancelled;
44285-
44286-atomic_t fscache_n_marks;
44287-atomic_t fscache_n_uncaches;
44288-
44289-atomic_t fscache_n_acquires;
44290-atomic_t fscache_n_acquires_null;
44291-atomic_t fscache_n_acquires_no_cache;
44292-atomic_t fscache_n_acquires_ok;
44293-atomic_t fscache_n_acquires_nobufs;
44294-atomic_t fscache_n_acquires_oom;
44295-
44296-atomic_t fscache_n_updates;
44297-atomic_t fscache_n_updates_null;
44298-atomic_t fscache_n_updates_run;
44299-
44300-atomic_t fscache_n_relinquishes;
44301-atomic_t fscache_n_relinquishes_null;
44302-atomic_t fscache_n_relinquishes_waitcrt;
44303-atomic_t fscache_n_relinquishes_retire;
44304-
44305-atomic_t fscache_n_cookie_index;
44306-atomic_t fscache_n_cookie_data;
44307-atomic_t fscache_n_cookie_special;
44308-
44309-atomic_t fscache_n_object_alloc;
44310-atomic_t fscache_n_object_no_alloc;
44311-atomic_t fscache_n_object_lookups;
44312-atomic_t fscache_n_object_lookups_negative;
44313-atomic_t fscache_n_object_lookups_positive;
44314-atomic_t fscache_n_object_lookups_timed_out;
44315-atomic_t fscache_n_object_created;
44316-atomic_t fscache_n_object_avail;
44317-atomic_t fscache_n_object_dead;
44318-
44319-atomic_t fscache_n_checkaux_none;
44320-atomic_t fscache_n_checkaux_okay;
44321-atomic_t fscache_n_checkaux_update;
44322-atomic_t fscache_n_checkaux_obsolete;
44323+atomic_unchecked_t fscache_n_op_pend;
44324+atomic_unchecked_t fscache_n_op_run;
44325+atomic_unchecked_t fscache_n_op_enqueue;
44326+atomic_unchecked_t fscache_n_op_requeue;
44327+atomic_unchecked_t fscache_n_op_deferred_release;
44328+atomic_unchecked_t fscache_n_op_release;
44329+atomic_unchecked_t fscache_n_op_gc;
44330+atomic_unchecked_t fscache_n_op_cancelled;
44331+atomic_unchecked_t fscache_n_op_rejected;
44332+
44333+atomic_unchecked_t fscache_n_attr_changed;
44334+atomic_unchecked_t fscache_n_attr_changed_ok;
44335+atomic_unchecked_t fscache_n_attr_changed_nobufs;
44336+atomic_unchecked_t fscache_n_attr_changed_nomem;
44337+atomic_unchecked_t fscache_n_attr_changed_calls;
44338+
44339+atomic_unchecked_t fscache_n_allocs;
44340+atomic_unchecked_t fscache_n_allocs_ok;
44341+atomic_unchecked_t fscache_n_allocs_wait;
44342+atomic_unchecked_t fscache_n_allocs_nobufs;
44343+atomic_unchecked_t fscache_n_allocs_intr;
44344+atomic_unchecked_t fscache_n_allocs_object_dead;
44345+atomic_unchecked_t fscache_n_alloc_ops;
44346+atomic_unchecked_t fscache_n_alloc_op_waits;
44347+
44348+atomic_unchecked_t fscache_n_retrievals;
44349+atomic_unchecked_t fscache_n_retrievals_ok;
44350+atomic_unchecked_t fscache_n_retrievals_wait;
44351+atomic_unchecked_t fscache_n_retrievals_nodata;
44352+atomic_unchecked_t fscache_n_retrievals_nobufs;
44353+atomic_unchecked_t fscache_n_retrievals_intr;
44354+atomic_unchecked_t fscache_n_retrievals_nomem;
44355+atomic_unchecked_t fscache_n_retrievals_object_dead;
44356+atomic_unchecked_t fscache_n_retrieval_ops;
44357+atomic_unchecked_t fscache_n_retrieval_op_waits;
44358+
44359+atomic_unchecked_t fscache_n_stores;
44360+atomic_unchecked_t fscache_n_stores_ok;
44361+atomic_unchecked_t fscache_n_stores_again;
44362+atomic_unchecked_t fscache_n_stores_nobufs;
44363+atomic_unchecked_t fscache_n_stores_oom;
44364+atomic_unchecked_t fscache_n_store_ops;
44365+atomic_unchecked_t fscache_n_store_calls;
44366+atomic_unchecked_t fscache_n_store_pages;
44367+atomic_unchecked_t fscache_n_store_radix_deletes;
44368+atomic_unchecked_t fscache_n_store_pages_over_limit;
44369+
44370+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
44371+atomic_unchecked_t fscache_n_store_vmscan_gone;
44372+atomic_unchecked_t fscache_n_store_vmscan_busy;
44373+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
44374+
44375+atomic_unchecked_t fscache_n_marks;
44376+atomic_unchecked_t fscache_n_uncaches;
44377+
44378+atomic_unchecked_t fscache_n_acquires;
44379+atomic_unchecked_t fscache_n_acquires_null;
44380+atomic_unchecked_t fscache_n_acquires_no_cache;
44381+atomic_unchecked_t fscache_n_acquires_ok;
44382+atomic_unchecked_t fscache_n_acquires_nobufs;
44383+atomic_unchecked_t fscache_n_acquires_oom;
44384+
44385+atomic_unchecked_t fscache_n_updates;
44386+atomic_unchecked_t fscache_n_updates_null;
44387+atomic_unchecked_t fscache_n_updates_run;
44388+
44389+atomic_unchecked_t fscache_n_relinquishes;
44390+atomic_unchecked_t fscache_n_relinquishes_null;
44391+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
44392+atomic_unchecked_t fscache_n_relinquishes_retire;
44393+
44394+atomic_unchecked_t fscache_n_cookie_index;
44395+atomic_unchecked_t fscache_n_cookie_data;
44396+atomic_unchecked_t fscache_n_cookie_special;
44397+
44398+atomic_unchecked_t fscache_n_object_alloc;
44399+atomic_unchecked_t fscache_n_object_no_alloc;
44400+atomic_unchecked_t fscache_n_object_lookups;
44401+atomic_unchecked_t fscache_n_object_lookups_negative;
44402+atomic_unchecked_t fscache_n_object_lookups_positive;
44403+atomic_unchecked_t fscache_n_object_lookups_timed_out;
44404+atomic_unchecked_t fscache_n_object_created;
44405+atomic_unchecked_t fscache_n_object_avail;
44406+atomic_unchecked_t fscache_n_object_dead;
44407+
44408+atomic_unchecked_t fscache_n_checkaux_none;
44409+atomic_unchecked_t fscache_n_checkaux_okay;
44410+atomic_unchecked_t fscache_n_checkaux_update;
44411+atomic_unchecked_t fscache_n_checkaux_obsolete;
44412
44413 atomic_t fscache_n_cop_alloc_object;
44414 atomic_t fscache_n_cop_lookup_object;
44415@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
44416 seq_puts(m, "FS-Cache statistics\n");
44417
44418 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
44419- atomic_read(&fscache_n_cookie_index),
44420- atomic_read(&fscache_n_cookie_data),
44421- atomic_read(&fscache_n_cookie_special));
44422+ atomic_read_unchecked(&fscache_n_cookie_index),
44423+ atomic_read_unchecked(&fscache_n_cookie_data),
44424+ atomic_read_unchecked(&fscache_n_cookie_special));
44425
44426 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
44427- atomic_read(&fscache_n_object_alloc),
44428- atomic_read(&fscache_n_object_no_alloc),
44429- atomic_read(&fscache_n_object_avail),
44430- atomic_read(&fscache_n_object_dead));
44431+ atomic_read_unchecked(&fscache_n_object_alloc),
44432+ atomic_read_unchecked(&fscache_n_object_no_alloc),
44433+ atomic_read_unchecked(&fscache_n_object_avail),
44434+ atomic_read_unchecked(&fscache_n_object_dead));
44435 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
44436- atomic_read(&fscache_n_checkaux_none),
44437- atomic_read(&fscache_n_checkaux_okay),
44438- atomic_read(&fscache_n_checkaux_update),
44439- atomic_read(&fscache_n_checkaux_obsolete));
44440+ atomic_read_unchecked(&fscache_n_checkaux_none),
44441+ atomic_read_unchecked(&fscache_n_checkaux_okay),
44442+ atomic_read_unchecked(&fscache_n_checkaux_update),
44443+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
44444
44445 seq_printf(m, "Pages : mrk=%u unc=%u\n",
44446- atomic_read(&fscache_n_marks),
44447- atomic_read(&fscache_n_uncaches));
44448+ atomic_read_unchecked(&fscache_n_marks),
44449+ atomic_read_unchecked(&fscache_n_uncaches));
44450
44451 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
44452 " oom=%u\n",
44453- atomic_read(&fscache_n_acquires),
44454- atomic_read(&fscache_n_acquires_null),
44455- atomic_read(&fscache_n_acquires_no_cache),
44456- atomic_read(&fscache_n_acquires_ok),
44457- atomic_read(&fscache_n_acquires_nobufs),
44458- atomic_read(&fscache_n_acquires_oom));
44459+ atomic_read_unchecked(&fscache_n_acquires),
44460+ atomic_read_unchecked(&fscache_n_acquires_null),
44461+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
44462+ atomic_read_unchecked(&fscache_n_acquires_ok),
44463+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
44464+ atomic_read_unchecked(&fscache_n_acquires_oom));
44465
44466 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
44467- atomic_read(&fscache_n_object_lookups),
44468- atomic_read(&fscache_n_object_lookups_negative),
44469- atomic_read(&fscache_n_object_lookups_positive),
44470- atomic_read(&fscache_n_object_created),
44471- atomic_read(&fscache_n_object_lookups_timed_out));
44472+ atomic_read_unchecked(&fscache_n_object_lookups),
44473+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
44474+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
44475+ atomic_read_unchecked(&fscache_n_object_created),
44476+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
44477
44478 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
44479- atomic_read(&fscache_n_updates),
44480- atomic_read(&fscache_n_updates_null),
44481- atomic_read(&fscache_n_updates_run));
44482+ atomic_read_unchecked(&fscache_n_updates),
44483+ atomic_read_unchecked(&fscache_n_updates_null),
44484+ atomic_read_unchecked(&fscache_n_updates_run));
44485
44486 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
44487- atomic_read(&fscache_n_relinquishes),
44488- atomic_read(&fscache_n_relinquishes_null),
44489- atomic_read(&fscache_n_relinquishes_waitcrt),
44490- atomic_read(&fscache_n_relinquishes_retire));
44491+ atomic_read_unchecked(&fscache_n_relinquishes),
44492+ atomic_read_unchecked(&fscache_n_relinquishes_null),
44493+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
44494+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
44495
44496 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
44497- atomic_read(&fscache_n_attr_changed),
44498- atomic_read(&fscache_n_attr_changed_ok),
44499- atomic_read(&fscache_n_attr_changed_nobufs),
44500- atomic_read(&fscache_n_attr_changed_nomem),
44501- atomic_read(&fscache_n_attr_changed_calls));
44502+ atomic_read_unchecked(&fscache_n_attr_changed),
44503+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
44504+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
44505+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
44506+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
44507
44508 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
44509- atomic_read(&fscache_n_allocs),
44510- atomic_read(&fscache_n_allocs_ok),
44511- atomic_read(&fscache_n_allocs_wait),
44512- atomic_read(&fscache_n_allocs_nobufs),
44513- atomic_read(&fscache_n_allocs_intr));
44514+ atomic_read_unchecked(&fscache_n_allocs),
44515+ atomic_read_unchecked(&fscache_n_allocs_ok),
44516+ atomic_read_unchecked(&fscache_n_allocs_wait),
44517+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
44518+ atomic_read_unchecked(&fscache_n_allocs_intr));
44519 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
44520- atomic_read(&fscache_n_alloc_ops),
44521- atomic_read(&fscache_n_alloc_op_waits),
44522- atomic_read(&fscache_n_allocs_object_dead));
44523+ atomic_read_unchecked(&fscache_n_alloc_ops),
44524+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
44525+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
44526
44527 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
44528 " int=%u oom=%u\n",
44529- atomic_read(&fscache_n_retrievals),
44530- atomic_read(&fscache_n_retrievals_ok),
44531- atomic_read(&fscache_n_retrievals_wait),
44532- atomic_read(&fscache_n_retrievals_nodata),
44533- atomic_read(&fscache_n_retrievals_nobufs),
44534- atomic_read(&fscache_n_retrievals_intr),
44535- atomic_read(&fscache_n_retrievals_nomem));
44536+ atomic_read_unchecked(&fscache_n_retrievals),
44537+ atomic_read_unchecked(&fscache_n_retrievals_ok),
44538+ atomic_read_unchecked(&fscache_n_retrievals_wait),
44539+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
44540+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
44541+ atomic_read_unchecked(&fscache_n_retrievals_intr),
44542+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
44543 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
44544- atomic_read(&fscache_n_retrieval_ops),
44545- atomic_read(&fscache_n_retrieval_op_waits),
44546- atomic_read(&fscache_n_retrievals_object_dead));
44547+ atomic_read_unchecked(&fscache_n_retrieval_ops),
44548+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
44549+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
44550
44551 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
44552- atomic_read(&fscache_n_stores),
44553- atomic_read(&fscache_n_stores_ok),
44554- atomic_read(&fscache_n_stores_again),
44555- atomic_read(&fscache_n_stores_nobufs),
44556- atomic_read(&fscache_n_stores_oom));
44557+ atomic_read_unchecked(&fscache_n_stores),
44558+ atomic_read_unchecked(&fscache_n_stores_ok),
44559+ atomic_read_unchecked(&fscache_n_stores_again),
44560+ atomic_read_unchecked(&fscache_n_stores_nobufs),
44561+ atomic_read_unchecked(&fscache_n_stores_oom));
44562 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
44563- atomic_read(&fscache_n_store_ops),
44564- atomic_read(&fscache_n_store_calls),
44565- atomic_read(&fscache_n_store_pages),
44566- atomic_read(&fscache_n_store_radix_deletes),
44567- atomic_read(&fscache_n_store_pages_over_limit));
44568+ atomic_read_unchecked(&fscache_n_store_ops),
44569+ atomic_read_unchecked(&fscache_n_store_calls),
44570+ atomic_read_unchecked(&fscache_n_store_pages),
44571+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
44572+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
44573
44574 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
44575- atomic_read(&fscache_n_store_vmscan_not_storing),
44576- atomic_read(&fscache_n_store_vmscan_gone),
44577- atomic_read(&fscache_n_store_vmscan_busy),
44578- atomic_read(&fscache_n_store_vmscan_cancelled));
44579+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
44580+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
44581+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
44582+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
44583
44584 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
44585- atomic_read(&fscache_n_op_pend),
44586- atomic_read(&fscache_n_op_run),
44587- atomic_read(&fscache_n_op_enqueue),
44588- atomic_read(&fscache_n_op_cancelled),
44589- atomic_read(&fscache_n_op_rejected));
44590+ atomic_read_unchecked(&fscache_n_op_pend),
44591+ atomic_read_unchecked(&fscache_n_op_run),
44592+ atomic_read_unchecked(&fscache_n_op_enqueue),
44593+ atomic_read_unchecked(&fscache_n_op_cancelled),
44594+ atomic_read_unchecked(&fscache_n_op_rejected));
44595 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
44596- atomic_read(&fscache_n_op_deferred_release),
44597- atomic_read(&fscache_n_op_release),
44598- atomic_read(&fscache_n_op_gc));
44599+ atomic_read_unchecked(&fscache_n_op_deferred_release),
44600+ atomic_read_unchecked(&fscache_n_op_release),
44601+ atomic_read_unchecked(&fscache_n_op_gc));
44602
44603 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
44604 atomic_read(&fscache_n_cop_alloc_object),
44605diff -urNp linux-3.1.4/fs/fs_struct.c linux-3.1.4/fs/fs_struct.c
44606--- linux-3.1.4/fs/fs_struct.c 2011-11-11 15:19:27.000000000 -0500
44607+++ linux-3.1.4/fs/fs_struct.c 2011-11-16 18:40:29.000000000 -0500
44608@@ -4,6 +4,7 @@
44609 #include <linux/path.h>
44610 #include <linux/slab.h>
44611 #include <linux/fs_struct.h>
44612+#include <linux/grsecurity.h>
44613 #include "internal.h"
44614
44615 static inline void path_get_longterm(struct path *path)
44616@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
44617 old_root = fs->root;
44618 fs->root = *path;
44619 path_get_longterm(path);
44620+ gr_set_chroot_entries(current, path);
44621 write_seqcount_end(&fs->seq);
44622 spin_unlock(&fs->lock);
44623 if (old_root.dentry)
44624@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
44625 && fs->root.mnt == old_root->mnt) {
44626 path_get_longterm(new_root);
44627 fs->root = *new_root;
44628+ gr_set_chroot_entries(p, new_root);
44629 count++;
44630 }
44631 if (fs->pwd.dentry == old_root->dentry
44632@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
44633 spin_lock(&fs->lock);
44634 write_seqcount_begin(&fs->seq);
44635 tsk->fs = NULL;
44636- kill = !--fs->users;
44637+ gr_clear_chroot_entries(tsk);
44638+ kill = !atomic_dec_return(&fs->users);
44639 write_seqcount_end(&fs->seq);
44640 spin_unlock(&fs->lock);
44641 task_unlock(tsk);
44642@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
44643 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
44644 /* We don't need to lock fs - think why ;-) */
44645 if (fs) {
44646- fs->users = 1;
44647+ atomic_set(&fs->users, 1);
44648 fs->in_exec = 0;
44649 spin_lock_init(&fs->lock);
44650 seqcount_init(&fs->seq);
44651@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
44652 spin_lock(&old->lock);
44653 fs->root = old->root;
44654 path_get_longterm(&fs->root);
44655+ /* instead of calling gr_set_chroot_entries here,
44656+ we call it from every caller of this function
44657+ */
44658 fs->pwd = old->pwd;
44659 path_get_longterm(&fs->pwd);
44660 spin_unlock(&old->lock);
44661@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
44662
44663 task_lock(current);
44664 spin_lock(&fs->lock);
44665- kill = !--fs->users;
44666+ kill = !atomic_dec_return(&fs->users);
44667 current->fs = new_fs;
44668+ gr_set_chroot_entries(current, &new_fs->root);
44669 spin_unlock(&fs->lock);
44670 task_unlock(current);
44671
44672@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
44673
44674 /* to be mentioned only in INIT_TASK */
44675 struct fs_struct init_fs = {
44676- .users = 1,
44677+ .users = ATOMIC_INIT(1),
44678 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
44679 .seq = SEQCNT_ZERO,
44680 .umask = 0022,
44681@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
44682 task_lock(current);
44683
44684 spin_lock(&init_fs.lock);
44685- init_fs.users++;
44686+ atomic_inc(&init_fs.users);
44687 spin_unlock(&init_fs.lock);
44688
44689 spin_lock(&fs->lock);
44690 current->fs = &init_fs;
44691- kill = !--fs->users;
44692+ gr_set_chroot_entries(current, &current->fs->root);
44693+ kill = !atomic_dec_return(&fs->users);
44694 spin_unlock(&fs->lock);
44695
44696 task_unlock(current);
44697diff -urNp linux-3.1.4/fs/fuse/cuse.c linux-3.1.4/fs/fuse/cuse.c
44698--- linux-3.1.4/fs/fuse/cuse.c 2011-11-11 15:19:27.000000000 -0500
44699+++ linux-3.1.4/fs/fuse/cuse.c 2011-11-16 18:39:08.000000000 -0500
44700@@ -586,10 +586,12 @@ static int __init cuse_init(void)
44701 INIT_LIST_HEAD(&cuse_conntbl[i]);
44702
44703 /* inherit and extend fuse_dev_operations */
44704- cuse_channel_fops = fuse_dev_operations;
44705- cuse_channel_fops.owner = THIS_MODULE;
44706- cuse_channel_fops.open = cuse_channel_open;
44707- cuse_channel_fops.release = cuse_channel_release;
44708+ pax_open_kernel();
44709+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
44710+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
44711+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
44712+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
44713+ pax_close_kernel();
44714
44715 cuse_class = class_create(THIS_MODULE, "cuse");
44716 if (IS_ERR(cuse_class))
44717diff -urNp linux-3.1.4/fs/fuse/dev.c linux-3.1.4/fs/fuse/dev.c
44718--- linux-3.1.4/fs/fuse/dev.c 2011-11-11 15:19:27.000000000 -0500
44719+++ linux-3.1.4/fs/fuse/dev.c 2011-11-16 18:39:08.000000000 -0500
44720@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(stru
44721 ret = 0;
44722 pipe_lock(pipe);
44723
44724- if (!pipe->readers) {
44725+ if (!atomic_read(&pipe->readers)) {
44726 send_sig(SIGPIPE, current, 0);
44727 if (!ret)
44728 ret = -EPIPE;
44729diff -urNp linux-3.1.4/fs/fuse/dir.c linux-3.1.4/fs/fuse/dir.c
44730--- linux-3.1.4/fs/fuse/dir.c 2011-11-11 15:19:27.000000000 -0500
44731+++ linux-3.1.4/fs/fuse/dir.c 2011-11-16 18:39:08.000000000 -0500
44732@@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *de
44733 return link;
44734 }
44735
44736-static void free_link(char *link)
44737+static void free_link(const char *link)
44738 {
44739 if (!IS_ERR(link))
44740 free_page((unsigned long) link);
44741diff -urNp linux-3.1.4/fs/gfs2/inode.c linux-3.1.4/fs/gfs2/inode.c
44742--- linux-3.1.4/fs/gfs2/inode.c 2011-11-11 15:19:27.000000000 -0500
44743+++ linux-3.1.4/fs/gfs2/inode.c 2011-11-16 18:39:08.000000000 -0500
44744@@ -1517,7 +1517,7 @@ out:
44745
44746 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
44747 {
44748- char *s = nd_get_link(nd);
44749+ const char *s = nd_get_link(nd);
44750 if (!IS_ERR(s))
44751 kfree(s);
44752 }
44753diff -urNp linux-3.1.4/fs/hfs/btree.c linux-3.1.4/fs/hfs/btree.c
44754--- linux-3.1.4/fs/hfs/btree.c 2011-11-11 15:19:27.000000000 -0500
44755+++ linux-3.1.4/fs/hfs/btree.c 2011-11-18 18:48:11.000000000 -0500
44756@@ -46,11 +46,27 @@ struct hfs_btree *hfs_btree_open(struct
44757 case HFS_EXT_CNID:
44758 hfs_inode_read_fork(tree->inode, mdb->drXTExtRec, mdb->drXTFlSize,
44759 mdb->drXTFlSize, be32_to_cpu(mdb->drXTClpSiz));
44760+
44761+ if (HFS_I(tree->inode)->alloc_blocks >
44762+ HFS_I(tree->inode)->first_blocks) {
44763+ printk(KERN_ERR "hfs: invalid btree extent records\n");
44764+ unlock_new_inode(tree->inode);
44765+ goto free_inode;
44766+ }
44767+
44768 tree->inode->i_mapping->a_ops = &hfs_btree_aops;
44769 break;
44770 case HFS_CAT_CNID:
44771 hfs_inode_read_fork(tree->inode, mdb->drCTExtRec, mdb->drCTFlSize,
44772 mdb->drCTFlSize, be32_to_cpu(mdb->drCTClpSiz));
44773+
44774+ if (!HFS_I(tree->inode)->first_blocks) {
44775+ printk(KERN_ERR "hfs: invalid btree extent records "
44776+ "(0 size).\n");
44777+ unlock_new_inode(tree->inode);
44778+ goto free_inode;
44779+ }
44780+
44781 tree->inode->i_mapping->a_ops = &hfs_btree_aops;
44782 break;
44783 default:
44784@@ -59,11 +75,6 @@ struct hfs_btree *hfs_btree_open(struct
44785 }
44786 unlock_new_inode(tree->inode);
44787
44788- if (!HFS_I(tree->inode)->first_blocks) {
44789- printk(KERN_ERR "hfs: invalid btree extent records (0 size).\n");
44790- goto free_inode;
44791- }
44792-
44793 mapping = tree->inode->i_mapping;
44794 page = read_mapping_page(mapping, 0, NULL);
44795 if (IS_ERR(page))
44796diff -urNp linux-3.1.4/fs/hfsplus/catalog.c linux-3.1.4/fs/hfsplus/catalog.c
44797--- linux-3.1.4/fs/hfsplus/catalog.c 2011-11-11 15:19:27.000000000 -0500
44798+++ linux-3.1.4/fs/hfsplus/catalog.c 2011-11-16 19:23:09.000000000 -0500
44799@@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
44800 int err;
44801 u16 type;
44802
44803+ pax_track_stack();
44804+
44805 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
44806 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
44807 if (err)
44808@@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
44809 int entry_size;
44810 int err;
44811
44812+ pax_track_stack();
44813+
44814 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
44815 str->name, cnid, inode->i_nlink);
44816 err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
44817@@ -353,6 +357,8 @@ int hfsplus_rename_cat(u32 cnid,
44818 int entry_size, type;
44819 int err;
44820
44821+ pax_track_stack();
44822+
44823 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
44824 cnid, src_dir->i_ino, src_name->name,
44825 dst_dir->i_ino, dst_name->name);
44826diff -urNp linux-3.1.4/fs/hfsplus/dir.c linux-3.1.4/fs/hfsplus/dir.c
44827--- linux-3.1.4/fs/hfsplus/dir.c 2011-11-11 15:19:27.000000000 -0500
44828+++ linux-3.1.4/fs/hfsplus/dir.c 2011-11-16 18:40:29.000000000 -0500
44829@@ -131,6 +131,8 @@ static int hfsplus_readdir(struct file *
44830 struct hfsplus_readdir_data *rd;
44831 u16 type;
44832
44833+ pax_track_stack();
44834+
44835 if (filp->f_pos >= inode->i_size)
44836 return 0;
44837
44838diff -urNp linux-3.1.4/fs/hfsplus/inode.c linux-3.1.4/fs/hfsplus/inode.c
44839--- linux-3.1.4/fs/hfsplus/inode.c 2011-11-11 15:19:27.000000000 -0500
44840+++ linux-3.1.4/fs/hfsplus/inode.c 2011-11-16 18:40:29.000000000 -0500
44841@@ -501,6 +501,8 @@ int hfsplus_cat_read_inode(struct inode
44842 int res = 0;
44843 u16 type;
44844
44845+ pax_track_stack();
44846+
44847 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
44848
44849 HFSPLUS_I(inode)->linkid = 0;
44850@@ -564,6 +566,8 @@ int hfsplus_cat_write_inode(struct inode
44851 struct hfs_find_data fd;
44852 hfsplus_cat_entry entry;
44853
44854+ pax_track_stack();
44855+
44856 if (HFSPLUS_IS_RSRC(inode))
44857 main_inode = HFSPLUS_I(inode)->rsrc_inode;
44858
44859diff -urNp linux-3.1.4/fs/hfsplus/ioctl.c linux-3.1.4/fs/hfsplus/ioctl.c
44860--- linux-3.1.4/fs/hfsplus/ioctl.c 2011-11-11 15:19:27.000000000 -0500
44861+++ linux-3.1.4/fs/hfsplus/ioctl.c 2011-11-16 18:40:29.000000000 -0500
44862@@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
44863 struct hfsplus_cat_file *file;
44864 int res;
44865
44866+ pax_track_stack();
44867+
44868 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
44869 return -EOPNOTSUPP;
44870
44871@@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
44872 struct hfsplus_cat_file *file;
44873 ssize_t res = 0;
44874
44875+ pax_track_stack();
44876+
44877 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
44878 return -EOPNOTSUPP;
44879
44880diff -urNp linux-3.1.4/fs/hfsplus/super.c linux-3.1.4/fs/hfsplus/super.c
44881--- linux-3.1.4/fs/hfsplus/super.c 2011-11-11 15:19:27.000000000 -0500
44882+++ linux-3.1.4/fs/hfsplus/super.c 2011-11-16 19:23:30.000000000 -0500
44883@@ -347,6 +347,8 @@ static int hfsplus_fill_super(struct sup
44884 u64 last_fs_block, last_fs_page;
44885 int err;
44886
44887+ pax_track_stack();
44888+
44889 err = -EINVAL;
44890 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
44891 if (!sbi)
44892diff -urNp linux-3.1.4/fs/hugetlbfs/inode.c linux-3.1.4/fs/hugetlbfs/inode.c
44893--- linux-3.1.4/fs/hugetlbfs/inode.c 2011-11-11 15:19:27.000000000 -0500
44894+++ linux-3.1.4/fs/hugetlbfs/inode.c 2011-11-16 18:40:29.000000000 -0500
44895@@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs
44896 .kill_sb = kill_litter_super,
44897 };
44898
44899-static struct vfsmount *hugetlbfs_vfsmount;
44900+struct vfsmount *hugetlbfs_vfsmount;
44901
44902 static int can_do_hugetlb_shm(void)
44903 {
44904diff -urNp linux-3.1.4/fs/inode.c linux-3.1.4/fs/inode.c
44905--- linux-3.1.4/fs/inode.c 2011-11-11 15:19:27.000000000 -0500
44906+++ linux-3.1.4/fs/inode.c 2011-11-16 18:39:08.000000000 -0500
44907@@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
44908
44909 #ifdef CONFIG_SMP
44910 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
44911- static atomic_t shared_last_ino;
44912- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
44913+ static atomic_unchecked_t shared_last_ino;
44914+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
44915
44916 res = next - LAST_INO_BATCH;
44917 }
44918diff -urNp linux-3.1.4/fs/jbd/checkpoint.c linux-3.1.4/fs/jbd/checkpoint.c
44919--- linux-3.1.4/fs/jbd/checkpoint.c 2011-11-11 15:19:27.000000000 -0500
44920+++ linux-3.1.4/fs/jbd/checkpoint.c 2011-11-16 18:40:29.000000000 -0500
44921@@ -358,6 +358,8 @@ int log_do_checkpoint(journal_t *journal
44922 tid_t this_tid;
44923 int result;
44924
44925+ pax_track_stack();
44926+
44927 jbd_debug(1, "Start checkpoint\n");
44928
44929 /*
44930diff -urNp linux-3.1.4/fs/jffs2/compr_rtime.c linux-3.1.4/fs/jffs2/compr_rtime.c
44931--- linux-3.1.4/fs/jffs2/compr_rtime.c 2011-11-11 15:19:27.000000000 -0500
44932+++ linux-3.1.4/fs/jffs2/compr_rtime.c 2011-11-16 18:40:29.000000000 -0500
44933@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
44934 int outpos = 0;
44935 int pos=0;
44936
44937+ pax_track_stack();
44938+
44939 memset(positions,0,sizeof(positions));
44940
44941 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
44942@@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
44943 int outpos = 0;
44944 int pos=0;
44945
44946+ pax_track_stack();
44947+
44948 memset(positions,0,sizeof(positions));
44949
44950 while (outpos<destlen) {
44951diff -urNp linux-3.1.4/fs/jffs2/compr_rubin.c linux-3.1.4/fs/jffs2/compr_rubin.c
44952--- linux-3.1.4/fs/jffs2/compr_rubin.c 2011-11-11 15:19:27.000000000 -0500
44953+++ linux-3.1.4/fs/jffs2/compr_rubin.c 2011-11-16 18:40:29.000000000 -0500
44954@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
44955 int ret;
44956 uint32_t mysrclen, mydstlen;
44957
44958+ pax_track_stack();
44959+
44960 mysrclen = *sourcelen;
44961 mydstlen = *dstlen - 8;
44962
44963diff -urNp linux-3.1.4/fs/jffs2/erase.c linux-3.1.4/fs/jffs2/erase.c
44964--- linux-3.1.4/fs/jffs2/erase.c 2011-11-11 15:19:27.000000000 -0500
44965+++ linux-3.1.4/fs/jffs2/erase.c 2011-11-16 18:39:08.000000000 -0500
44966@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
44967 struct jffs2_unknown_node marker = {
44968 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
44969 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44970- .totlen = cpu_to_je32(c->cleanmarker_size)
44971+ .totlen = cpu_to_je32(c->cleanmarker_size),
44972+ .hdr_crc = cpu_to_je32(0)
44973 };
44974
44975 jffs2_prealloc_raw_node_refs(c, jeb, 1);
44976diff -urNp linux-3.1.4/fs/jffs2/wbuf.c linux-3.1.4/fs/jffs2/wbuf.c
44977--- linux-3.1.4/fs/jffs2/wbuf.c 2011-11-11 15:19:27.000000000 -0500
44978+++ linux-3.1.4/fs/jffs2/wbuf.c 2011-11-16 18:39:08.000000000 -0500
44979@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
44980 {
44981 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
44982 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44983- .totlen = constant_cpu_to_je32(8)
44984+ .totlen = constant_cpu_to_je32(8),
44985+ .hdr_crc = constant_cpu_to_je32(0)
44986 };
44987
44988 /*
44989diff -urNp linux-3.1.4/fs/jffs2/xattr.c linux-3.1.4/fs/jffs2/xattr.c
44990--- linux-3.1.4/fs/jffs2/xattr.c 2011-11-11 15:19:27.000000000 -0500
44991+++ linux-3.1.4/fs/jffs2/xattr.c 2011-11-16 18:40:29.000000000 -0500
44992@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
44993
44994 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
44995
44996+ pax_track_stack();
44997+
44998 /* Phase.1 : Merge same xref */
44999 for (i=0; i < XREF_TMPHASH_SIZE; i++)
45000 xref_tmphash[i] = NULL;
45001diff -urNp linux-3.1.4/fs/jfs/super.c linux-3.1.4/fs/jfs/super.c
45002--- linux-3.1.4/fs/jfs/super.c 2011-11-11 15:19:27.000000000 -0500
45003+++ linux-3.1.4/fs/jfs/super.c 2011-11-16 18:39:08.000000000 -0500
45004@@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
45005
45006 jfs_inode_cachep =
45007 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
45008- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
45009+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
45010 init_once);
45011 if (jfs_inode_cachep == NULL)
45012 return -ENOMEM;
45013diff -urNp linux-3.1.4/fs/Kconfig.binfmt linux-3.1.4/fs/Kconfig.binfmt
45014--- linux-3.1.4/fs/Kconfig.binfmt 2011-11-11 15:19:27.000000000 -0500
45015+++ linux-3.1.4/fs/Kconfig.binfmt 2011-11-16 18:39:08.000000000 -0500
45016@@ -86,7 +86,7 @@ config HAVE_AOUT
45017
45018 config BINFMT_AOUT
45019 tristate "Kernel support for a.out and ECOFF binaries"
45020- depends on HAVE_AOUT
45021+ depends on HAVE_AOUT && BROKEN
45022 ---help---
45023 A.out (Assembler.OUTput) is a set of formats for libraries and
45024 executables used in the earliest versions of UNIX. Linux used
45025diff -urNp linux-3.1.4/fs/libfs.c linux-3.1.4/fs/libfs.c
45026--- linux-3.1.4/fs/libfs.c 2011-11-11 15:19:27.000000000 -0500
45027+++ linux-3.1.4/fs/libfs.c 2011-11-16 18:39:08.000000000 -0500
45028@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, v
45029
45030 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
45031 struct dentry *next;
45032+ char d_name[sizeof(next->d_iname)];
45033+ const unsigned char *name;
45034+
45035 next = list_entry(p, struct dentry, d_u.d_child);
45036 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
45037 if (!simple_positive(next)) {
45038@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, v
45039
45040 spin_unlock(&next->d_lock);
45041 spin_unlock(&dentry->d_lock);
45042- if (filldir(dirent, next->d_name.name,
45043+ name = next->d_name.name;
45044+ if (name == next->d_iname) {
45045+ memcpy(d_name, name, next->d_name.len);
45046+ name = d_name;
45047+ }
45048+ if (filldir(dirent, name,
45049 next->d_name.len, filp->f_pos,
45050 next->d_inode->i_ino,
45051 dt_type(next->d_inode)) < 0)
45052diff -urNp linux-3.1.4/fs/lockd/clntproc.c linux-3.1.4/fs/lockd/clntproc.c
45053--- linux-3.1.4/fs/lockd/clntproc.c 2011-11-11 15:19:27.000000000 -0500
45054+++ linux-3.1.4/fs/lockd/clntproc.c 2011-11-16 18:40:29.000000000 -0500
45055@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
45056 /*
45057 * Cookie counter for NLM requests
45058 */
45059-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
45060+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
45061
45062 void nlmclnt_next_cookie(struct nlm_cookie *c)
45063 {
45064- u32 cookie = atomic_inc_return(&nlm_cookie);
45065+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
45066
45067 memcpy(c->data, &cookie, 4);
45068 c->len=4;
45069@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
45070 struct nlm_rqst reqst, *req;
45071 int status;
45072
45073+ pax_track_stack();
45074+
45075 req = &reqst;
45076 memset(req, 0, sizeof(*req));
45077 locks_init_lock(&req->a_args.lock.fl);
45078diff -urNp linux-3.1.4/fs/locks.c linux-3.1.4/fs/locks.c
45079--- linux-3.1.4/fs/locks.c 2011-11-11 15:19:27.000000000 -0500
45080+++ linux-3.1.4/fs/locks.c 2011-11-16 18:39:08.000000000 -0500
45081@@ -2022,16 +2022,16 @@ void locks_remove_flock(struct file *fil
45082 return;
45083
45084 if (filp->f_op && filp->f_op->flock) {
45085- struct file_lock fl = {
45086+ struct file_lock flock = {
45087 .fl_pid = current->tgid,
45088 .fl_file = filp,
45089 .fl_flags = FL_FLOCK,
45090 .fl_type = F_UNLCK,
45091 .fl_end = OFFSET_MAX,
45092 };
45093- filp->f_op->flock(filp, F_SETLKW, &fl);
45094- if (fl.fl_ops && fl.fl_ops->fl_release_private)
45095- fl.fl_ops->fl_release_private(&fl);
45096+ filp->f_op->flock(filp, F_SETLKW, &flock);
45097+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
45098+ flock.fl_ops->fl_release_private(&flock);
45099 }
45100
45101 lock_flocks();
45102diff -urNp linux-3.1.4/fs/logfs/super.c linux-3.1.4/fs/logfs/super.c
45103--- linux-3.1.4/fs/logfs/super.c 2011-11-11 15:19:27.000000000 -0500
45104+++ linux-3.1.4/fs/logfs/super.c 2011-11-16 18:40:29.000000000 -0500
45105@@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
45106 struct logfs_disk_super _ds1, *ds1 = &_ds1;
45107 int err, valid0, valid1;
45108
45109+ pax_track_stack();
45110+
45111 /* read first superblock */
45112 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
45113 if (err)
45114diff -urNp linux-3.1.4/fs/namei.c linux-3.1.4/fs/namei.c
45115--- linux-3.1.4/fs/namei.c 2011-11-11 15:19:27.000000000 -0500
45116+++ linux-3.1.4/fs/namei.c 2011-11-17 00:36:54.000000000 -0500
45117@@ -283,14 +283,22 @@ int generic_permission(struct inode *ino
45118
45119 if (S_ISDIR(inode->i_mode)) {
45120 /* DACs are overridable for directories */
45121- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
45122- return 0;
45123 if (!(mask & MAY_WRITE))
45124 if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
45125 return 0;
45126+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
45127+ return 0;
45128 return -EACCES;
45129 }
45130 /*
45131+ * Searching includes executable on directories, else just read.
45132+ */
45133+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
45134+ if (mask == MAY_READ)
45135+ if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
45136+ return 0;
45137+
45138+ /*
45139 * Read/write DACs are always overridable.
45140 * Executable DACs are overridable when there is
45141 * at least one exec bit set.
45142@@ -299,14 +307,6 @@ int generic_permission(struct inode *ino
45143 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
45144 return 0;
45145
45146- /*
45147- * Searching includes executable on directories, else just read.
45148- */
45149- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
45150- if (mask == MAY_READ)
45151- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
45152- return 0;
45153-
45154 return -EACCES;
45155 }
45156
45157@@ -653,11 +653,19 @@ follow_link(struct path *link, struct na
45158 return error;
45159 }
45160
45161+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
45162+ dentry->d_inode, dentry, nd->path.mnt)) {
45163+ error = -EACCES;
45164+ *p = ERR_PTR(error); /* no ->put_link(), please */
45165+ path_put(&nd->path);
45166+ return error;
45167+ }
45168+
45169 nd->last_type = LAST_BIND;
45170 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
45171 error = PTR_ERR(*p);
45172 if (!IS_ERR(*p)) {
45173- char *s = nd_get_link(nd);
45174+ const char *s = nd_get_link(nd);
45175 error = 0;
45176 if (s)
45177 error = __vfs_follow_link(nd, s);
45178@@ -1622,6 +1630,12 @@ static int path_lookupat(int dfd, const
45179 if (!err)
45180 err = complete_walk(nd);
45181
45182+ if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
45183+ if (!err)
45184+ path_put(&nd->path);
45185+ err = -ENOENT;
45186+ }
45187+
45188 if (!err && nd->flags & LOOKUP_DIRECTORY) {
45189 if (!nd->inode->i_op->lookup) {
45190 path_put(&nd->path);
45191@@ -1649,6 +1663,9 @@ static int do_path_lookup(int dfd, const
45192 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
45193
45194 if (likely(!retval)) {
45195+ if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
45196+ return -ENOENT;
45197+
45198 if (unlikely(!audit_dummy_context())) {
45199 if (nd->path.dentry && nd->inode)
45200 audit_inode(name, nd->path.dentry);
45201@@ -2049,7 +2066,27 @@ static int may_open(struct path *path, i
45202 /*
45203 * Ensure there are no outstanding leases on the file.
45204 */
45205- return break_lease(inode, flag);
45206+ error = break_lease(inode, flag);
45207+
45208+ if (error)
45209+ return error;
45210+
45211+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
45212+ error = -EPERM;
45213+ goto exit;
45214+ }
45215+
45216+ if (gr_handle_rawio(inode)) {
45217+ error = -EPERM;
45218+ goto exit;
45219+ }
45220+
45221+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
45222+ error = -EACCES;
45223+ goto exit;
45224+ }
45225+exit:
45226+ return error;
45227 }
45228
45229 static int handle_truncate(struct file *filp)
45230@@ -2110,6 +2147,10 @@ static struct file *do_last(struct namei
45231 error = complete_walk(nd);
45232 if (error)
45233 return ERR_PTR(error);
45234+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
45235+ error = -ENOENT;
45236+ goto exit;
45237+ }
45238 audit_inode(pathname, nd->path.dentry);
45239 if (open_flag & O_CREAT) {
45240 error = -EISDIR;
45241@@ -2120,6 +2161,10 @@ static struct file *do_last(struct namei
45242 error = complete_walk(nd);
45243 if (error)
45244 return ERR_PTR(error);
45245+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
45246+ error = -ENOENT;
45247+ goto exit;
45248+ }
45249 audit_inode(pathname, dir);
45250 goto ok;
45251 }
45252@@ -2142,6 +2187,11 @@ static struct file *do_last(struct namei
45253 if (error)
45254 return ERR_PTR(-ECHILD);
45255
45256+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
45257+ error = -ENOENT;
45258+ goto exit;
45259+ }
45260+
45261 error = -ENOTDIR;
45262 if (nd->flags & LOOKUP_DIRECTORY) {
45263 if (!nd->inode->i_op->lookup)
45264@@ -2181,6 +2231,12 @@ static struct file *do_last(struct namei
45265 /* Negative dentry, just create the file */
45266 if (!dentry->d_inode) {
45267 int mode = op->mode;
45268+
45269+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
45270+ error = -EACCES;
45271+ goto exit_mutex_unlock;
45272+ }
45273+
45274 if (!IS_POSIXACL(dir->d_inode))
45275 mode &= ~current_umask();
45276 /*
45277@@ -2204,6 +2260,8 @@ static struct file *do_last(struct namei
45278 error = vfs_create(dir->d_inode, dentry, mode, nd);
45279 if (error)
45280 goto exit_mutex_unlock;
45281+ else
45282+ gr_handle_create(path->dentry, path->mnt);
45283 mutex_unlock(&dir->d_inode->i_mutex);
45284 dput(nd->path.dentry);
45285 nd->path.dentry = dentry;
45286@@ -2213,6 +2271,19 @@ static struct file *do_last(struct namei
45287 /*
45288 * It already exists.
45289 */
45290+
45291+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
45292+ error = -ENOENT;
45293+ goto exit_mutex_unlock;
45294+ }
45295+
45296+ /* only check if O_CREAT is specified, all other checks need to go
45297+ into may_open */
45298+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
45299+ error = -EACCES;
45300+ goto exit_mutex_unlock;
45301+ }
45302+
45303 mutex_unlock(&dir->d_inode->i_mutex);
45304 audit_inode(pathname, path->dentry);
45305
45306@@ -2425,6 +2496,11 @@ struct dentry *kern_path_create(int dfd,
45307 *path = nd.path;
45308 return dentry;
45309 eexist:
45310+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
45311+ dput(dentry);
45312+ dentry = ERR_PTR(-ENOENT);
45313+ goto fail;
45314+ }
45315 dput(dentry);
45316 dentry = ERR_PTR(-EEXIST);
45317 fail:
45318@@ -2447,6 +2523,20 @@ struct dentry *user_path_create(int dfd,
45319 }
45320 EXPORT_SYMBOL(user_path_create);
45321
45322+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
45323+{
45324+ char *tmp = getname(pathname);
45325+ struct dentry *res;
45326+ if (IS_ERR(tmp))
45327+ return ERR_CAST(tmp);
45328+ res = kern_path_create(dfd, tmp, path, is_dir);
45329+ if (IS_ERR(res))
45330+ putname(tmp);
45331+ else
45332+ *to = tmp;
45333+ return res;
45334+}
45335+
45336 int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
45337 {
45338 int error = may_create(dir, dentry);
45339@@ -2514,6 +2604,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
45340 error = mnt_want_write(path.mnt);
45341 if (error)
45342 goto out_dput;
45343+
45344+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
45345+ error = -EPERM;
45346+ goto out_drop_write;
45347+ }
45348+
45349+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
45350+ error = -EACCES;
45351+ goto out_drop_write;
45352+ }
45353+
45354 error = security_path_mknod(&path, dentry, mode, dev);
45355 if (error)
45356 goto out_drop_write;
45357@@ -2531,6 +2632,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
45358 }
45359 out_drop_write:
45360 mnt_drop_write(path.mnt);
45361+
45362+ if (!error)
45363+ gr_handle_create(dentry, path.mnt);
45364 out_dput:
45365 dput(dentry);
45366 mutex_unlock(&path.dentry->d_inode->i_mutex);
45367@@ -2580,12 +2684,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
45368 error = mnt_want_write(path.mnt);
45369 if (error)
45370 goto out_dput;
45371+
45372+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
45373+ error = -EACCES;
45374+ goto out_drop_write;
45375+ }
45376+
45377 error = security_path_mkdir(&path, dentry, mode);
45378 if (error)
45379 goto out_drop_write;
45380 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
45381 out_drop_write:
45382 mnt_drop_write(path.mnt);
45383+
45384+ if (!error)
45385+ gr_handle_create(dentry, path.mnt);
45386 out_dput:
45387 dput(dentry);
45388 mutex_unlock(&path.dentry->d_inode->i_mutex);
45389@@ -2665,6 +2778,8 @@ static long do_rmdir(int dfd, const char
45390 char * name;
45391 struct dentry *dentry;
45392 struct nameidata nd;
45393+ ino_t saved_ino = 0;
45394+ dev_t saved_dev = 0;
45395
45396 error = user_path_parent(dfd, pathname, &nd, &name);
45397 if (error)
45398@@ -2693,6 +2808,15 @@ static long do_rmdir(int dfd, const char
45399 error = -ENOENT;
45400 goto exit3;
45401 }
45402+
45403+ saved_ino = dentry->d_inode->i_ino;
45404+ saved_dev = gr_get_dev_from_dentry(dentry);
45405+
45406+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
45407+ error = -EACCES;
45408+ goto exit3;
45409+ }
45410+
45411 error = mnt_want_write(nd.path.mnt);
45412 if (error)
45413 goto exit3;
45414@@ -2700,6 +2824,8 @@ static long do_rmdir(int dfd, const char
45415 if (error)
45416 goto exit4;
45417 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
45418+ if (!error && (saved_dev || saved_ino))
45419+ gr_handle_delete(saved_ino, saved_dev);
45420 exit4:
45421 mnt_drop_write(nd.path.mnt);
45422 exit3:
45423@@ -2762,6 +2888,8 @@ static long do_unlinkat(int dfd, const c
45424 struct dentry *dentry;
45425 struct nameidata nd;
45426 struct inode *inode = NULL;
45427+ ino_t saved_ino = 0;
45428+ dev_t saved_dev = 0;
45429
45430 error = user_path_parent(dfd, pathname, &nd, &name);
45431 if (error)
45432@@ -2784,6 +2912,16 @@ static long do_unlinkat(int dfd, const c
45433 if (!inode)
45434 goto slashes;
45435 ihold(inode);
45436+
45437+ if (inode->i_nlink <= 1) {
45438+ saved_ino = inode->i_ino;
45439+ saved_dev = gr_get_dev_from_dentry(dentry);
45440+ }
45441+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
45442+ error = -EACCES;
45443+ goto exit2;
45444+ }
45445+
45446 error = mnt_want_write(nd.path.mnt);
45447 if (error)
45448 goto exit2;
45449@@ -2791,6 +2929,8 @@ static long do_unlinkat(int dfd, const c
45450 if (error)
45451 goto exit3;
45452 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
45453+ if (!error && (saved_ino || saved_dev))
45454+ gr_handle_delete(saved_ino, saved_dev);
45455 exit3:
45456 mnt_drop_write(nd.path.mnt);
45457 exit2:
45458@@ -2866,10 +3006,18 @@ SYSCALL_DEFINE3(symlinkat, const char __
45459 error = mnt_want_write(path.mnt);
45460 if (error)
45461 goto out_dput;
45462+
45463+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
45464+ error = -EACCES;
45465+ goto out_drop_write;
45466+ }
45467+
45468 error = security_path_symlink(&path, dentry, from);
45469 if (error)
45470 goto out_drop_write;
45471 error = vfs_symlink(path.dentry->d_inode, dentry, from);
45472+ if (!error)
45473+ gr_handle_create(dentry, path.mnt);
45474 out_drop_write:
45475 mnt_drop_write(path.mnt);
45476 out_dput:
45477@@ -2941,6 +3089,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
45478 {
45479 struct dentry *new_dentry;
45480 struct path old_path, new_path;
45481+ char *to;
45482 int how = 0;
45483 int error;
45484
45485@@ -2964,7 +3113,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
45486 if (error)
45487 return error;
45488
45489- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
45490+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
45491 error = PTR_ERR(new_dentry);
45492 if (IS_ERR(new_dentry))
45493 goto out;
45494@@ -2975,13 +3124,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
45495 error = mnt_want_write(new_path.mnt);
45496 if (error)
45497 goto out_dput;
45498+
45499+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
45500+ old_path.dentry->d_inode,
45501+ old_path.dentry->d_inode->i_mode, to)) {
45502+ error = -EACCES;
45503+ goto out_drop_write;
45504+ }
45505+
45506+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
45507+ old_path.dentry, old_path.mnt, to)) {
45508+ error = -EACCES;
45509+ goto out_drop_write;
45510+ }
45511+
45512 error = security_path_link(old_path.dentry, &new_path, new_dentry);
45513 if (error)
45514 goto out_drop_write;
45515 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
45516+ if (!error)
45517+ gr_handle_create(new_dentry, new_path.mnt);
45518 out_drop_write:
45519 mnt_drop_write(new_path.mnt);
45520 out_dput:
45521+ putname(to);
45522 dput(new_dentry);
45523 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
45524 path_put(&new_path);
45525@@ -3153,6 +3319,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
45526 char *to;
45527 int error;
45528
45529+ pax_track_stack();
45530+
45531 error = user_path_parent(olddfd, oldname, &oldnd, &from);
45532 if (error)
45533 goto exit;
45534@@ -3209,6 +3377,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
45535 if (new_dentry == trap)
45536 goto exit5;
45537
45538+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
45539+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
45540+ to);
45541+ if (error)
45542+ goto exit5;
45543+
45544 error = mnt_want_write(oldnd.path.mnt);
45545 if (error)
45546 goto exit5;
45547@@ -3218,6 +3392,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
45548 goto exit6;
45549 error = vfs_rename(old_dir->d_inode, old_dentry,
45550 new_dir->d_inode, new_dentry);
45551+ if (!error)
45552+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
45553+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
45554 exit6:
45555 mnt_drop_write(oldnd.path.mnt);
45556 exit5:
45557@@ -3243,6 +3420,8 @@ SYSCALL_DEFINE2(rename, const char __use
45558
45559 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
45560 {
45561+ char tmpbuf[64];
45562+ const char *newlink;
45563 int len;
45564
45565 len = PTR_ERR(link);
45566@@ -3252,7 +3431,14 @@ int vfs_readlink(struct dentry *dentry,
45567 len = strlen(link);
45568 if (len > (unsigned) buflen)
45569 len = buflen;
45570- if (copy_to_user(buffer, link, len))
45571+
45572+ if (len < sizeof(tmpbuf)) {
45573+ memcpy(tmpbuf, link, len);
45574+ newlink = tmpbuf;
45575+ } else
45576+ newlink = link;
45577+
45578+ if (copy_to_user(buffer, newlink, len))
45579 len = -EFAULT;
45580 out:
45581 return len;
45582diff -urNp linux-3.1.4/fs/namespace.c linux-3.1.4/fs/namespace.c
45583--- linux-3.1.4/fs/namespace.c 2011-11-11 15:19:27.000000000 -0500
45584+++ linux-3.1.4/fs/namespace.c 2011-11-16 18:40:29.000000000 -0500
45585@@ -1329,6 +1329,9 @@ static int do_umount(struct vfsmount *mn
45586 if (!(sb->s_flags & MS_RDONLY))
45587 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
45588 up_write(&sb->s_umount);
45589+
45590+ gr_log_remount(mnt->mnt_devname, retval);
45591+
45592 return retval;
45593 }
45594
45595@@ -1348,6 +1351,9 @@ static int do_umount(struct vfsmount *mn
45596 br_write_unlock(vfsmount_lock);
45597 up_write(&namespace_sem);
45598 release_mounts(&umount_list);
45599+
45600+ gr_log_unmount(mnt->mnt_devname, retval);
45601+
45602 return retval;
45603 }
45604
45605@@ -2339,6 +2345,16 @@ long do_mount(char *dev_name, char *dir_
45606 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
45607 MS_STRICTATIME);
45608
45609+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
45610+ retval = -EPERM;
45611+ goto dput_out;
45612+ }
45613+
45614+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
45615+ retval = -EPERM;
45616+ goto dput_out;
45617+ }
45618+
45619 if (flags & MS_REMOUNT)
45620 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
45621 data_page);
45622@@ -2353,6 +2369,9 @@ long do_mount(char *dev_name, char *dir_
45623 dev_name, data_page);
45624 dput_out:
45625 path_put(&path);
45626+
45627+ gr_log_mount(dev_name, dir_name, retval);
45628+
45629 return retval;
45630 }
45631
45632@@ -2576,6 +2595,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
45633 if (error)
45634 goto out2;
45635
45636+ if (gr_handle_chroot_pivot()) {
45637+ error = -EPERM;
45638+ goto out2;
45639+ }
45640+
45641 get_fs_root(current->fs, &root);
45642 error = lock_mount(&old);
45643 if (error)
45644diff -urNp linux-3.1.4/fs/ncpfs/dir.c linux-3.1.4/fs/ncpfs/dir.c
45645--- linux-3.1.4/fs/ncpfs/dir.c 2011-11-11 15:19:27.000000000 -0500
45646+++ linux-3.1.4/fs/ncpfs/dir.c 2011-11-16 18:40:29.000000000 -0500
45647@@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
45648 int res, val = 0, len;
45649 __u8 __name[NCP_MAXPATHLEN + 1];
45650
45651+ pax_track_stack();
45652+
45653 if (dentry == dentry->d_sb->s_root)
45654 return 1;
45655
45656@@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
45657 int error, res, len;
45658 __u8 __name[NCP_MAXPATHLEN + 1];
45659
45660+ pax_track_stack();
45661+
45662 error = -EIO;
45663 if (!ncp_conn_valid(server))
45664 goto finished;
45665@@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
45666 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
45667 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
45668
45669+ pax_track_stack();
45670+
45671 ncp_age_dentry(server, dentry);
45672 len = sizeof(__name);
45673 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
45674@@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
45675 int error, len;
45676 __u8 __name[NCP_MAXPATHLEN + 1];
45677
45678+ pax_track_stack();
45679+
45680 DPRINTK("ncp_mkdir: making %s/%s\n",
45681 dentry->d_parent->d_name.name, dentry->d_name.name);
45682
45683@@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_
45684 int old_len, new_len;
45685 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
45686
45687+ pax_track_stack();
45688+
45689 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
45690 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
45691 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
45692diff -urNp linux-3.1.4/fs/ncpfs/inode.c linux-3.1.4/fs/ncpfs/inode.c
45693--- linux-3.1.4/fs/ncpfs/inode.c 2011-11-11 15:19:27.000000000 -0500
45694+++ linux-3.1.4/fs/ncpfs/inode.c 2011-11-16 18:40:29.000000000 -0500
45695@@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
45696 #endif
45697 struct ncp_entry_info finfo;
45698
45699+ pax_track_stack();
45700+
45701 memset(&data, 0, sizeof(data));
45702 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
45703 if (!server)
45704diff -urNp linux-3.1.4/fs/nfs/blocklayout/blocklayout.c linux-3.1.4/fs/nfs/blocklayout/blocklayout.c
45705--- linux-3.1.4/fs/nfs/blocklayout/blocklayout.c 2011-11-11 15:19:27.000000000 -0500
45706+++ linux-3.1.4/fs/nfs/blocklayout/blocklayout.c 2011-11-16 18:39:08.000000000 -0500
45707@@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block
45708 */
45709 struct parallel_io {
45710 struct kref refcnt;
45711- struct rpc_call_ops call_ops;
45712+ rpc_call_ops_no_const call_ops;
45713 void (*pnfs_callback) (void *data);
45714 void *data;
45715 };
45716diff -urNp linux-3.1.4/fs/nfs/inode.c linux-3.1.4/fs/nfs/inode.c
45717--- linux-3.1.4/fs/nfs/inode.c 2011-11-26 19:57:29.000000000 -0500
45718+++ linux-3.1.4/fs/nfs/inode.c 2011-11-26 20:00:43.000000000 -0500
45719@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
45720 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
45721 nfsi->attrtimeo_timestamp = jiffies;
45722
45723- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
45724+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
45725 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
45726 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
45727 else
45728@@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const st
45729 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
45730 }
45731
45732-static atomic_long_t nfs_attr_generation_counter;
45733+static atomic_long_unchecked_t nfs_attr_generation_counter;
45734
45735 static unsigned long nfs_read_attr_generation_counter(void)
45736 {
45737- return atomic_long_read(&nfs_attr_generation_counter);
45738+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
45739 }
45740
45741 unsigned long nfs_inc_attr_generation_counter(void)
45742 {
45743- return atomic_long_inc_return(&nfs_attr_generation_counter);
45744+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
45745 }
45746
45747 void nfs_fattr_init(struct nfs_fattr *fattr)
45748diff -urNp linux-3.1.4/fs/nfsd/nfs4state.c linux-3.1.4/fs/nfsd/nfs4state.c
45749--- linux-3.1.4/fs/nfsd/nfs4state.c 2011-11-11 15:19:27.000000000 -0500
45750+++ linux-3.1.4/fs/nfsd/nfs4state.c 2011-11-16 18:40:29.000000000 -0500
45751@@ -3999,6 +3999,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
45752 unsigned int strhashval;
45753 int err;
45754
45755+ pax_track_stack();
45756+
45757 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
45758 (long long) lock->lk_offset,
45759 (long long) lock->lk_length);
45760diff -urNp linux-3.1.4/fs/nfsd/nfs4xdr.c linux-3.1.4/fs/nfsd/nfs4xdr.c
45761--- linux-3.1.4/fs/nfsd/nfs4xdr.c 2011-11-11 15:19:27.000000000 -0500
45762+++ linux-3.1.4/fs/nfsd/nfs4xdr.c 2011-11-16 18:40:29.000000000 -0500
45763@@ -1875,6 +1875,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
45764 .dentry = dentry,
45765 };
45766
45767+ pax_track_stack();
45768+
45769 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
45770 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
45771 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
45772diff -urNp linux-3.1.4/fs/nfsd/vfs.c linux-3.1.4/fs/nfsd/vfs.c
45773--- linux-3.1.4/fs/nfsd/vfs.c 2011-11-11 15:19:27.000000000 -0500
45774+++ linux-3.1.4/fs/nfsd/vfs.c 2011-11-16 18:39:08.000000000 -0500
45775@@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
45776 } else {
45777 oldfs = get_fs();
45778 set_fs(KERNEL_DS);
45779- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
45780+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
45781 set_fs(oldfs);
45782 }
45783
45784@@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
45785
45786 /* Write the data. */
45787 oldfs = get_fs(); set_fs(KERNEL_DS);
45788- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
45789+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
45790 set_fs(oldfs);
45791 if (host_err < 0)
45792 goto out_nfserr;
45793@@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
45794 */
45795
45796 oldfs = get_fs(); set_fs(KERNEL_DS);
45797- host_err = inode->i_op->readlink(dentry, buf, *lenp);
45798+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
45799 set_fs(oldfs);
45800
45801 if (host_err < 0)
45802diff -urNp linux-3.1.4/fs/notify/fanotify/fanotify_user.c linux-3.1.4/fs/notify/fanotify/fanotify_user.c
45803--- linux-3.1.4/fs/notify/fanotify/fanotify_user.c 2011-11-11 15:19:27.000000000 -0500
45804+++ linux-3.1.4/fs/notify/fanotify/fanotify_user.c 2011-11-16 18:39:08.000000000 -0500
45805@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
45806 goto out_close_fd;
45807
45808 ret = -EFAULT;
45809- if (copy_to_user(buf, &fanotify_event_metadata,
45810+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
45811+ copy_to_user(buf, &fanotify_event_metadata,
45812 fanotify_event_metadata.event_len))
45813 goto out_kill_access_response;
45814
45815diff -urNp linux-3.1.4/fs/notify/notification.c linux-3.1.4/fs/notify/notification.c
45816--- linux-3.1.4/fs/notify/notification.c 2011-11-11 15:19:27.000000000 -0500
45817+++ linux-3.1.4/fs/notify/notification.c 2011-11-16 18:39:08.000000000 -0500
45818@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
45819 * get set to 0 so it will never get 'freed'
45820 */
45821 static struct fsnotify_event *q_overflow_event;
45822-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45823+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45824
45825 /**
45826 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
45827@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
45828 */
45829 u32 fsnotify_get_cookie(void)
45830 {
45831- return atomic_inc_return(&fsnotify_sync_cookie);
45832+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
45833 }
45834 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
45835
45836diff -urNp linux-3.1.4/fs/ntfs/dir.c linux-3.1.4/fs/ntfs/dir.c
45837--- linux-3.1.4/fs/ntfs/dir.c 2011-11-11 15:19:27.000000000 -0500
45838+++ linux-3.1.4/fs/ntfs/dir.c 2011-11-16 18:39:08.000000000 -0500
45839@@ -1329,7 +1329,7 @@ find_next_index_buffer:
45840 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
45841 ~(s64)(ndir->itype.index.block_size - 1)));
45842 /* Bounds checks. */
45843- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
45844+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
45845 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
45846 "inode 0x%lx or driver bug.", vdir->i_ino);
45847 goto err_out;
45848diff -urNp linux-3.1.4/fs/ntfs/file.c linux-3.1.4/fs/ntfs/file.c
45849--- linux-3.1.4/fs/ntfs/file.c 2011-11-11 15:19:27.000000000 -0500
45850+++ linux-3.1.4/fs/ntfs/file.c 2011-11-16 18:39:08.000000000 -0500
45851@@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_
45852 #endif /* NTFS_RW */
45853 };
45854
45855-const struct file_operations ntfs_empty_file_ops = {};
45856+const struct file_operations ntfs_empty_file_ops __read_only;
45857
45858-const struct inode_operations ntfs_empty_inode_ops = {};
45859+const struct inode_operations ntfs_empty_inode_ops __read_only;
45860diff -urNp linux-3.1.4/fs/ocfs2/localalloc.c linux-3.1.4/fs/ocfs2/localalloc.c
45861--- linux-3.1.4/fs/ocfs2/localalloc.c 2011-11-11 15:19:27.000000000 -0500
45862+++ linux-3.1.4/fs/ocfs2/localalloc.c 2011-11-16 18:39:08.000000000 -0500
45863@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
45864 goto bail;
45865 }
45866
45867- atomic_inc(&osb->alloc_stats.moves);
45868+ atomic_inc_unchecked(&osb->alloc_stats.moves);
45869
45870 bail:
45871 if (handle)
45872diff -urNp linux-3.1.4/fs/ocfs2/namei.c linux-3.1.4/fs/ocfs2/namei.c
45873--- linux-3.1.4/fs/ocfs2/namei.c 2011-11-11 15:19:27.000000000 -0500
45874+++ linux-3.1.4/fs/ocfs2/namei.c 2011-11-16 18:40:29.000000000 -0500
45875@@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
45876 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
45877 struct ocfs2_dir_lookup_result target_insert = { NULL, };
45878
45879+ pax_track_stack();
45880+
45881 /* At some point it might be nice to break this function up a
45882 * bit. */
45883
45884diff -urNp linux-3.1.4/fs/ocfs2/ocfs2.h linux-3.1.4/fs/ocfs2/ocfs2.h
45885--- linux-3.1.4/fs/ocfs2/ocfs2.h 2011-11-11 15:19:27.000000000 -0500
45886+++ linux-3.1.4/fs/ocfs2/ocfs2.h 2011-11-16 18:39:08.000000000 -0500
45887@@ -235,11 +235,11 @@ enum ocfs2_vol_state
45888
45889 struct ocfs2_alloc_stats
45890 {
45891- atomic_t moves;
45892- atomic_t local_data;
45893- atomic_t bitmap_data;
45894- atomic_t bg_allocs;
45895- atomic_t bg_extends;
45896+ atomic_unchecked_t moves;
45897+ atomic_unchecked_t local_data;
45898+ atomic_unchecked_t bitmap_data;
45899+ atomic_unchecked_t bg_allocs;
45900+ atomic_unchecked_t bg_extends;
45901 };
45902
45903 enum ocfs2_local_alloc_state
45904diff -urNp linux-3.1.4/fs/ocfs2/suballoc.c linux-3.1.4/fs/ocfs2/suballoc.c
45905--- linux-3.1.4/fs/ocfs2/suballoc.c 2011-11-11 15:19:27.000000000 -0500
45906+++ linux-3.1.4/fs/ocfs2/suballoc.c 2011-11-16 18:39:08.000000000 -0500
45907@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
45908 mlog_errno(status);
45909 goto bail;
45910 }
45911- atomic_inc(&osb->alloc_stats.bg_extends);
45912+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
45913
45914 /* You should never ask for this much metadata */
45915 BUG_ON(bits_wanted >
45916@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
45917 mlog_errno(status);
45918 goto bail;
45919 }
45920- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45921+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45922
45923 *suballoc_loc = res.sr_bg_blkno;
45924 *suballoc_bit_start = res.sr_bit_offset;
45925@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
45926 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
45927 res->sr_bits);
45928
45929- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45930+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45931
45932 BUG_ON(res->sr_bits != 1);
45933
45934@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
45935 mlog_errno(status);
45936 goto bail;
45937 }
45938- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45939+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45940
45941 BUG_ON(res.sr_bits != 1);
45942
45943@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
45944 cluster_start,
45945 num_clusters);
45946 if (!status)
45947- atomic_inc(&osb->alloc_stats.local_data);
45948+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
45949 } else {
45950 if (min_clusters > (osb->bitmap_cpg - 1)) {
45951 /* The only paths asking for contiguousness
45952@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
45953 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
45954 res.sr_bg_blkno,
45955 res.sr_bit_offset);
45956- atomic_inc(&osb->alloc_stats.bitmap_data);
45957+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
45958 *num_clusters = res.sr_bits;
45959 }
45960 }
45961diff -urNp linux-3.1.4/fs/ocfs2/super.c linux-3.1.4/fs/ocfs2/super.c
45962--- linux-3.1.4/fs/ocfs2/super.c 2011-11-11 15:19:27.000000000 -0500
45963+++ linux-3.1.4/fs/ocfs2/super.c 2011-11-16 18:39:08.000000000 -0500
45964@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
45965 "%10s => GlobalAllocs: %d LocalAllocs: %d "
45966 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
45967 "Stats",
45968- atomic_read(&osb->alloc_stats.bitmap_data),
45969- atomic_read(&osb->alloc_stats.local_data),
45970- atomic_read(&osb->alloc_stats.bg_allocs),
45971- atomic_read(&osb->alloc_stats.moves),
45972- atomic_read(&osb->alloc_stats.bg_extends));
45973+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
45974+ atomic_read_unchecked(&osb->alloc_stats.local_data),
45975+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
45976+ atomic_read_unchecked(&osb->alloc_stats.moves),
45977+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
45978
45979 out += snprintf(buf + out, len - out,
45980 "%10s => State: %u Descriptor: %llu Size: %u bits "
45981@@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct
45982 spin_lock_init(&osb->osb_xattr_lock);
45983 ocfs2_init_steal_slots(osb);
45984
45985- atomic_set(&osb->alloc_stats.moves, 0);
45986- atomic_set(&osb->alloc_stats.local_data, 0);
45987- atomic_set(&osb->alloc_stats.bitmap_data, 0);
45988- atomic_set(&osb->alloc_stats.bg_allocs, 0);
45989- atomic_set(&osb->alloc_stats.bg_extends, 0);
45990+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
45991+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
45992+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
45993+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
45994+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
45995
45996 /* Copy the blockcheck stats from the superblock probe */
45997 osb->osb_ecc_stats = *stats;
45998diff -urNp linux-3.1.4/fs/ocfs2/symlink.c linux-3.1.4/fs/ocfs2/symlink.c
45999--- linux-3.1.4/fs/ocfs2/symlink.c 2011-11-11 15:19:27.000000000 -0500
46000+++ linux-3.1.4/fs/ocfs2/symlink.c 2011-11-16 18:39:08.000000000 -0500
46001@@ -142,7 +142,7 @@ bail:
46002
46003 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46004 {
46005- char *link = nd_get_link(nd);
46006+ const char *link = nd_get_link(nd);
46007 if (!IS_ERR(link))
46008 kfree(link);
46009 }
46010diff -urNp linux-3.1.4/fs/open.c linux-3.1.4/fs/open.c
46011--- linux-3.1.4/fs/open.c 2011-11-11 15:19:27.000000000 -0500
46012+++ linux-3.1.4/fs/open.c 2011-11-17 19:07:55.000000000 -0500
46013@@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
46014 error = locks_verify_truncate(inode, NULL, length);
46015 if (!error)
46016 error = security_path_truncate(&path);
46017+
46018+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
46019+ error = -EACCES;
46020+
46021 if (!error)
46022 error = do_truncate(path.dentry, length, 0, NULL);
46023
46024@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
46025 if (__mnt_is_readonly(path.mnt))
46026 res = -EROFS;
46027
46028+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
46029+ res = -EACCES;
46030+
46031 out_path_release:
46032 path_put(&path);
46033 out:
46034@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
46035 if (error)
46036 goto dput_and_out;
46037
46038+ gr_log_chdir(path.dentry, path.mnt);
46039+
46040 set_fs_pwd(current->fs, &path);
46041
46042 dput_and_out:
46043@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
46044 goto out_putf;
46045
46046 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
46047+
46048+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
46049+ error = -EPERM;
46050+
46051+ if (!error)
46052+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
46053+
46054 if (!error)
46055 set_fs_pwd(current->fs, &file->f_path);
46056 out_putf:
46057@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __use
46058 if (error)
46059 goto dput_and_out;
46060
46061+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
46062+ goto dput_and_out;
46063+
46064 set_fs_root(current->fs, &path);
46065+
46066+ gr_handle_chroot_chdir(&path);
46067+
46068 error = 0;
46069 dput_and_out:
46070 path_put(&path);
46071@@ -456,6 +478,16 @@ static int chmod_common(struct path *pat
46072 if (error)
46073 return error;
46074 mutex_lock(&inode->i_mutex);
46075+
46076+ if (!gr_acl_handle_fchmod(path->dentry, path->mnt, mode)) {
46077+ error = -EACCES;
46078+ goto out_unlock;
46079+ }
46080+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
46081+ error = -EACCES;
46082+ goto out_unlock;
46083+ }
46084+
46085 error = security_path_chmod(path->dentry, path->mnt, mode);
46086 if (error)
46087 goto out_unlock;
46088@@ -506,6 +538,9 @@ static int chown_common(struct path *pat
46089 int error;
46090 struct iattr newattrs;
46091
46092+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
46093+ return -EACCES;
46094+
46095 newattrs.ia_valid = ATTR_CTIME;
46096 if (user != (uid_t) -1) {
46097 newattrs.ia_valid |= ATTR_UID;
46098diff -urNp linux-3.1.4/fs/partitions/ldm.c linux-3.1.4/fs/partitions/ldm.c
46099--- linux-3.1.4/fs/partitions/ldm.c 2011-11-11 15:19:27.000000000 -0500
46100+++ linux-3.1.4/fs/partitions/ldm.c 2011-11-17 19:08:15.000000000 -0500
46101@@ -1322,7 +1322,7 @@ static bool ldm_frag_add (const u8 *data
46102 goto found;
46103 }
46104
46105- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
46106+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
46107 if (!f) {
46108 ldm_crit ("Out of memory.");
46109 return false;
46110diff -urNp linux-3.1.4/fs/pipe.c linux-3.1.4/fs/pipe.c
46111--- linux-3.1.4/fs/pipe.c 2011-11-11 15:19:27.000000000 -0500
46112+++ linux-3.1.4/fs/pipe.c 2011-11-16 18:40:29.000000000 -0500
46113@@ -420,9 +420,9 @@ redo:
46114 }
46115 if (bufs) /* More to do? */
46116 continue;
46117- if (!pipe->writers)
46118+ if (!atomic_read(&pipe->writers))
46119 break;
46120- if (!pipe->waiting_writers) {
46121+ if (!atomic_read(&pipe->waiting_writers)) {
46122 /* syscall merging: Usually we must not sleep
46123 * if O_NONBLOCK is set, or if we got some data.
46124 * But if a writer sleeps in kernel space, then
46125@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
46126 mutex_lock(&inode->i_mutex);
46127 pipe = inode->i_pipe;
46128
46129- if (!pipe->readers) {
46130+ if (!atomic_read(&pipe->readers)) {
46131 send_sig(SIGPIPE, current, 0);
46132 ret = -EPIPE;
46133 goto out;
46134@@ -530,7 +530,7 @@ redo1:
46135 for (;;) {
46136 int bufs;
46137
46138- if (!pipe->readers) {
46139+ if (!atomic_read(&pipe->readers)) {
46140 send_sig(SIGPIPE, current, 0);
46141 if (!ret)
46142 ret = -EPIPE;
46143@@ -616,9 +616,9 @@ redo2:
46144 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
46145 do_wakeup = 0;
46146 }
46147- pipe->waiting_writers++;
46148+ atomic_inc(&pipe->waiting_writers);
46149 pipe_wait(pipe);
46150- pipe->waiting_writers--;
46151+ atomic_dec(&pipe->waiting_writers);
46152 }
46153 out:
46154 mutex_unlock(&inode->i_mutex);
46155@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
46156 mask = 0;
46157 if (filp->f_mode & FMODE_READ) {
46158 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
46159- if (!pipe->writers && filp->f_version != pipe->w_counter)
46160+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
46161 mask |= POLLHUP;
46162 }
46163
46164@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
46165 * Most Unices do not set POLLERR for FIFOs but on Linux they
46166 * behave exactly like pipes for poll().
46167 */
46168- if (!pipe->readers)
46169+ if (!atomic_read(&pipe->readers))
46170 mask |= POLLERR;
46171 }
46172
46173@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
46174
46175 mutex_lock(&inode->i_mutex);
46176 pipe = inode->i_pipe;
46177- pipe->readers -= decr;
46178- pipe->writers -= decw;
46179+ atomic_sub(decr, &pipe->readers);
46180+ atomic_sub(decw, &pipe->writers);
46181
46182- if (!pipe->readers && !pipe->writers) {
46183+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
46184 free_pipe_info(inode);
46185 } else {
46186 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
46187@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
46188
46189 if (inode->i_pipe) {
46190 ret = 0;
46191- inode->i_pipe->readers++;
46192+ atomic_inc(&inode->i_pipe->readers);
46193 }
46194
46195 mutex_unlock(&inode->i_mutex);
46196@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
46197
46198 if (inode->i_pipe) {
46199 ret = 0;
46200- inode->i_pipe->writers++;
46201+ atomic_inc(&inode->i_pipe->writers);
46202 }
46203
46204 mutex_unlock(&inode->i_mutex);
46205@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
46206 if (inode->i_pipe) {
46207 ret = 0;
46208 if (filp->f_mode & FMODE_READ)
46209- inode->i_pipe->readers++;
46210+ atomic_inc(&inode->i_pipe->readers);
46211 if (filp->f_mode & FMODE_WRITE)
46212- inode->i_pipe->writers++;
46213+ atomic_inc(&inode->i_pipe->writers);
46214 }
46215
46216 mutex_unlock(&inode->i_mutex);
46217@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
46218 inode->i_pipe = NULL;
46219 }
46220
46221-static struct vfsmount *pipe_mnt __read_mostly;
46222+struct vfsmount *pipe_mnt __read_mostly;
46223
46224 /*
46225 * pipefs_dname() is called from d_path().
46226@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
46227 goto fail_iput;
46228 inode->i_pipe = pipe;
46229
46230- pipe->readers = pipe->writers = 1;
46231+ atomic_set(&pipe->readers, 1);
46232+ atomic_set(&pipe->writers, 1);
46233 inode->i_fop = &rdwr_pipefifo_fops;
46234
46235 /*
46236diff -urNp linux-3.1.4/fs/proc/array.c linux-3.1.4/fs/proc/array.c
46237--- linux-3.1.4/fs/proc/array.c 2011-11-11 15:19:27.000000000 -0500
46238+++ linux-3.1.4/fs/proc/array.c 2011-11-17 18:42:02.000000000 -0500
46239@@ -60,6 +60,7 @@
46240 #include <linux/tty.h>
46241 #include <linux/string.h>
46242 #include <linux/mman.h>
46243+#include <linux/grsecurity.h>
46244 #include <linux/proc_fs.h>
46245 #include <linux/ioport.h>
46246 #include <linux/uaccess.h>
46247@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
46248 seq_putc(m, '\n');
46249 }
46250
46251+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46252+static inline void task_pax(struct seq_file *m, struct task_struct *p)
46253+{
46254+ if (p->mm)
46255+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
46256+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
46257+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
46258+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
46259+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
46260+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
46261+ else
46262+ seq_printf(m, "PaX:\t-----\n");
46263+}
46264+#endif
46265+
46266 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
46267 struct pid *pid, struct task_struct *task)
46268 {
46269@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
46270 task_cpus_allowed(m, task);
46271 cpuset_task_status_allowed(m, task);
46272 task_context_switch_counts(m, task);
46273+
46274+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46275+ task_pax(m, task);
46276+#endif
46277+
46278+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
46279+ task_grsec_rbac(m, task);
46280+#endif
46281+
46282 return 0;
46283 }
46284
46285+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46286+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46287+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
46288+ _mm->pax_flags & MF_PAX_SEGMEXEC))
46289+#endif
46290+
46291 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
46292 struct pid *pid, struct task_struct *task, int whole)
46293 {
46294@@ -378,6 +409,8 @@ static int do_task_stat(struct seq_file
46295 char tcomm[sizeof(task->comm)];
46296 unsigned long flags;
46297
46298+ pax_track_stack();
46299+
46300 state = *get_task_state(task);
46301 vsize = eip = esp = 0;
46302 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
46303@@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
46304 gtime = task->gtime;
46305 }
46306
46307+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46308+ if (PAX_RAND_FLAGS(mm)) {
46309+ eip = 0;
46310+ esp = 0;
46311+ wchan = 0;
46312+ }
46313+#endif
46314+#ifdef CONFIG_GRKERNSEC_HIDESYM
46315+ wchan = 0;
46316+ eip =0;
46317+ esp =0;
46318+#endif
46319+
46320 /* scale priority and nice values from timeslices to -20..20 */
46321 /* to make it look like a "normal" Unix priority/nice value */
46322 priority = task_prio(task);
46323@@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
46324 vsize,
46325 mm ? get_mm_rss(mm) : 0,
46326 rsslim,
46327+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46328+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
46329+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
46330+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
46331+#else
46332 mm ? (permitted ? mm->start_code : 1) : 0,
46333 mm ? (permitted ? mm->end_code : 1) : 0,
46334 (permitted && mm) ? mm->start_stack : 0,
46335+#endif
46336 esp,
46337 eip,
46338 /* The signal information here is obsolete.
46339@@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
46340
46341 return 0;
46342 }
46343+
46344+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
46345+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
46346+{
46347+ u32 curr_ip = 0;
46348+ unsigned long flags;
46349+
46350+ if (lock_task_sighand(task, &flags)) {
46351+ curr_ip = task->signal->curr_ip;
46352+ unlock_task_sighand(task, &flags);
46353+ }
46354+
46355+ return sprintf(buffer, "%pI4\n", &curr_ip);
46356+}
46357+#endif
46358diff -urNp linux-3.1.4/fs/proc/base.c linux-3.1.4/fs/proc/base.c
46359--- linux-3.1.4/fs/proc/base.c 2011-11-11 15:19:27.000000000 -0500
46360+++ linux-3.1.4/fs/proc/base.c 2011-11-17 18:43:19.000000000 -0500
46361@@ -107,6 +107,22 @@ struct pid_entry {
46362 union proc_op op;
46363 };
46364
46365+struct getdents_callback {
46366+ struct linux_dirent __user * current_dir;
46367+ struct linux_dirent __user * previous;
46368+ struct file * file;
46369+ int count;
46370+ int error;
46371+};
46372+
46373+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
46374+ loff_t offset, u64 ino, unsigned int d_type)
46375+{
46376+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
46377+ buf->error = -EINVAL;
46378+ return 0;
46379+}
46380+
46381 #define NOD(NAME, MODE, IOP, FOP, OP) { \
46382 .name = (NAME), \
46383 .len = sizeof(NAME) - 1, \
46384@@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_per
46385 if (task == current)
46386 return mm;
46387
46388+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
46389+ return ERR_PTR(-EPERM);
46390+
46391 /*
46392 * If current is actively ptrace'ing, and would also be
46393 * permitted to freshly attach with ptrace now, permit it.
46394@@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_
46395 if (!mm->arg_end)
46396 goto out_mm; /* Shh! No looking before we're done */
46397
46398+ if (gr_acl_handle_procpidmem(task))
46399+ goto out_mm;
46400+
46401 len = mm->arg_end - mm->arg_start;
46402
46403 if (len > PAGE_SIZE)
46404@@ -309,12 +331,28 @@ out:
46405 return res;
46406 }
46407
46408+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46409+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46410+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
46411+ _mm->pax_flags & MF_PAX_SEGMEXEC))
46412+#endif
46413+
46414 static int proc_pid_auxv(struct task_struct *task, char *buffer)
46415 {
46416 struct mm_struct *mm = mm_for_maps(task);
46417 int res = PTR_ERR(mm);
46418 if (mm && !IS_ERR(mm)) {
46419 unsigned int nwords = 0;
46420+
46421+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46422+ /* allow if we're currently ptracing this task */
46423+ if (PAX_RAND_FLAGS(mm) &&
46424+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
46425+ mmput(mm);
46426+ return 0;
46427+ }
46428+#endif
46429+
46430 do {
46431 nwords += 2;
46432 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
46433@@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_str
46434 }
46435
46436
46437-#ifdef CONFIG_KALLSYMS
46438+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46439 /*
46440 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
46441 * Returns the resolved symbol. If that fails, simply return the address.
46442@@ -367,7 +405,7 @@ static void unlock_trace(struct task_str
46443 mutex_unlock(&task->signal->cred_guard_mutex);
46444 }
46445
46446-#ifdef CONFIG_STACKTRACE
46447+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46448
46449 #define MAX_STACK_TRACE_DEPTH 64
46450
46451@@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_s
46452 return count;
46453 }
46454
46455-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
46456+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
46457 static int proc_pid_syscall(struct task_struct *task, char *buffer)
46458 {
46459 long nr;
46460@@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_
46461 /************************************************************************/
46462
46463 /* permission checks */
46464-static int proc_fd_access_allowed(struct inode *inode)
46465+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
46466 {
46467 struct task_struct *task;
46468 int allowed = 0;
46469@@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct
46470 */
46471 task = get_proc_task(inode);
46472 if (task) {
46473- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
46474+ if (log)
46475+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
46476+ else
46477+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
46478 put_task_struct(task);
46479 }
46480 return allowed;
46481@@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file
46482 if (!task)
46483 goto out_no_task;
46484
46485+ if (gr_acl_handle_procpidmem(task))
46486+ goto out;
46487+
46488 ret = -ENOMEM;
46489 page = (char *)__get_free_page(GFP_TEMPORARY);
46490 if (!page)
46491@@ -1613,7 +1657,7 @@ static void *proc_pid_follow_link(struct
46492 path_put(&nd->path);
46493
46494 /* Are we allowed to snoop on the tasks file descriptors? */
46495- if (!proc_fd_access_allowed(inode))
46496+ if (!proc_fd_access_allowed(inode,0))
46497 goto out;
46498
46499 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
46500@@ -1652,8 +1696,18 @@ static int proc_pid_readlink(struct dent
46501 struct path path;
46502
46503 /* Are we allowed to snoop on the tasks file descriptors? */
46504- if (!proc_fd_access_allowed(inode))
46505- goto out;
46506+ /* logging this is needed for learning on chromium to work properly,
46507+ but we don't want to flood the logs from 'ps' which does a readlink
46508+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
46509+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
46510+ */
46511+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
46512+ if (!proc_fd_access_allowed(inode,0))
46513+ goto out;
46514+ } else {
46515+ if (!proc_fd_access_allowed(inode,1))
46516+ goto out;
46517+ }
46518
46519 error = PROC_I(inode)->op.proc_get_link(inode, &path);
46520 if (error)
46521@@ -1718,7 +1772,11 @@ struct inode *proc_pid_make_inode(struct
46522 rcu_read_lock();
46523 cred = __task_cred(task);
46524 inode->i_uid = cred->euid;
46525+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46526+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
46527+#else
46528 inode->i_gid = cred->egid;
46529+#endif
46530 rcu_read_unlock();
46531 }
46532 security_task_to_inode(task, inode);
46533@@ -1736,6 +1794,9 @@ int pid_getattr(struct vfsmount *mnt, st
46534 struct inode *inode = dentry->d_inode;
46535 struct task_struct *task;
46536 const struct cred *cred;
46537+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46538+ const struct cred *tmpcred = current_cred();
46539+#endif
46540
46541 generic_fillattr(inode, stat);
46542
46543@@ -1743,13 +1804,41 @@ int pid_getattr(struct vfsmount *mnt, st
46544 stat->uid = 0;
46545 stat->gid = 0;
46546 task = pid_task(proc_pid(inode), PIDTYPE_PID);
46547+
46548+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
46549+ rcu_read_unlock();
46550+ return -ENOENT;
46551+ }
46552+
46553 if (task) {
46554+ cred = __task_cred(task);
46555+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46556+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
46557+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46558+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
46559+#endif
46560+ ) {
46561+#endif
46562 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
46563+#ifdef CONFIG_GRKERNSEC_PROC_USER
46564+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
46565+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46566+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
46567+#endif
46568 task_dumpable(task)) {
46569- cred = __task_cred(task);
46570 stat->uid = cred->euid;
46571+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46572+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
46573+#else
46574 stat->gid = cred->egid;
46575+#endif
46576 }
46577+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46578+ } else {
46579+ rcu_read_unlock();
46580+ return -ENOENT;
46581+ }
46582+#endif
46583 }
46584 rcu_read_unlock();
46585 return 0;
46586@@ -1786,11 +1875,20 @@ int pid_revalidate(struct dentry *dentry
46587
46588 if (task) {
46589 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
46590+#ifdef CONFIG_GRKERNSEC_PROC_USER
46591+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
46592+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46593+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
46594+#endif
46595 task_dumpable(task)) {
46596 rcu_read_lock();
46597 cred = __task_cred(task);
46598 inode->i_uid = cred->euid;
46599+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46600+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
46601+#else
46602 inode->i_gid = cred->egid;
46603+#endif
46604 rcu_read_unlock();
46605 } else {
46606 inode->i_uid = 0;
46607@@ -1908,7 +2006,8 @@ static int proc_fd_info(struct inode *in
46608 int fd = proc_fd(inode);
46609
46610 if (task) {
46611- files = get_files_struct(task);
46612+ if (!gr_acl_handle_procpidmem(task))
46613+ files = get_files_struct(task);
46614 put_task_struct(task);
46615 }
46616 if (files) {
46617@@ -2176,11 +2275,21 @@ static const struct file_operations proc
46618 */
46619 static int proc_fd_permission(struct inode *inode, int mask)
46620 {
46621+ struct task_struct *task;
46622 int rv = generic_permission(inode, mask);
46623- if (rv == 0)
46624- return 0;
46625+
46626 if (task_pid(current) == proc_pid(inode))
46627 rv = 0;
46628+
46629+ task = get_proc_task(inode);
46630+ if (task == NULL)
46631+ return rv;
46632+
46633+ if (gr_acl_handle_procpidmem(task))
46634+ rv = -EACCES;
46635+
46636+ put_task_struct(task);
46637+
46638 return rv;
46639 }
46640
46641@@ -2290,6 +2399,9 @@ static struct dentry *proc_pident_lookup
46642 if (!task)
46643 goto out_no_task;
46644
46645+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
46646+ goto out;
46647+
46648 /*
46649 * Yes, it does not scale. And it should not. Don't add
46650 * new entries into /proc/<tgid>/ without very good reasons.
46651@@ -2334,6 +2446,9 @@ static int proc_pident_readdir(struct fi
46652 if (!task)
46653 goto out_no_task;
46654
46655+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
46656+ goto out;
46657+
46658 ret = 0;
46659 i = filp->f_pos;
46660 switch (i) {
46661@@ -2604,7 +2719,7 @@ static void *proc_self_follow_link(struc
46662 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
46663 void *cookie)
46664 {
46665- char *s = nd_get_link(nd);
46666+ const char *s = nd_get_link(nd);
46667 if (!IS_ERR(s))
46668 __putname(s);
46669 }
46670@@ -2802,7 +2917,7 @@ static const struct pid_entry tgid_base_
46671 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
46672 #endif
46673 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
46674-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
46675+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
46676 INF("syscall", S_IRUGO, proc_pid_syscall),
46677 #endif
46678 INF("cmdline", S_IRUGO, proc_pid_cmdline),
46679@@ -2827,10 +2942,10 @@ static const struct pid_entry tgid_base_
46680 #ifdef CONFIG_SECURITY
46681 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
46682 #endif
46683-#ifdef CONFIG_KALLSYMS
46684+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46685 INF("wchan", S_IRUGO, proc_pid_wchan),
46686 #endif
46687-#ifdef CONFIG_STACKTRACE
46688+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46689 ONE("stack", S_IRUGO, proc_pid_stack),
46690 #endif
46691 #ifdef CONFIG_SCHEDSTATS
46692@@ -2864,6 +2979,9 @@ static const struct pid_entry tgid_base_
46693 #ifdef CONFIG_HARDWALL
46694 INF("hardwall", S_IRUGO, proc_pid_hardwall),
46695 #endif
46696+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
46697+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
46698+#endif
46699 };
46700
46701 static int proc_tgid_base_readdir(struct file * filp,
46702@@ -2989,7 +3107,14 @@ static struct dentry *proc_pid_instantia
46703 if (!inode)
46704 goto out;
46705
46706+#ifdef CONFIG_GRKERNSEC_PROC_USER
46707+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
46708+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46709+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
46710+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
46711+#else
46712 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
46713+#endif
46714 inode->i_op = &proc_tgid_base_inode_operations;
46715 inode->i_fop = &proc_tgid_base_operations;
46716 inode->i_flags|=S_IMMUTABLE;
46717@@ -3031,7 +3156,14 @@ struct dentry *proc_pid_lookup(struct in
46718 if (!task)
46719 goto out;
46720
46721+ if (!has_group_leader_pid(task))
46722+ goto out_put_task;
46723+
46724+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
46725+ goto out_put_task;
46726+
46727 result = proc_pid_instantiate(dir, dentry, task, NULL);
46728+out_put_task:
46729 put_task_struct(task);
46730 out:
46731 return result;
46732@@ -3096,6 +3228,11 @@ int proc_pid_readdir(struct file * filp,
46733 {
46734 unsigned int nr;
46735 struct task_struct *reaper;
46736+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46737+ const struct cred *tmpcred = current_cred();
46738+ const struct cred *itercred;
46739+#endif
46740+ filldir_t __filldir = filldir;
46741 struct tgid_iter iter;
46742 struct pid_namespace *ns;
46743
46744@@ -3119,8 +3256,27 @@ int proc_pid_readdir(struct file * filp,
46745 for (iter = next_tgid(ns, iter);
46746 iter.task;
46747 iter.tgid += 1, iter = next_tgid(ns, iter)) {
46748+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46749+ rcu_read_lock();
46750+ itercred = __task_cred(iter.task);
46751+#endif
46752+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
46753+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46754+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
46755+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46756+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
46757+#endif
46758+ )
46759+#endif
46760+ )
46761+ __filldir = &gr_fake_filldir;
46762+ else
46763+ __filldir = filldir;
46764+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46765+ rcu_read_unlock();
46766+#endif
46767 filp->f_pos = iter.tgid + TGID_OFFSET;
46768- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
46769+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
46770 put_task_struct(iter.task);
46771 goto out;
46772 }
46773@@ -3148,7 +3304,7 @@ static const struct pid_entry tid_base_s
46774 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
46775 #endif
46776 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
46777-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
46778+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
46779 INF("syscall", S_IRUGO, proc_pid_syscall),
46780 #endif
46781 INF("cmdline", S_IRUGO, proc_pid_cmdline),
46782@@ -3172,10 +3328,10 @@ static const struct pid_entry tid_base_s
46783 #ifdef CONFIG_SECURITY
46784 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
46785 #endif
46786-#ifdef CONFIG_KALLSYMS
46787+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46788 INF("wchan", S_IRUGO, proc_pid_wchan),
46789 #endif
46790-#ifdef CONFIG_STACKTRACE
46791+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46792 ONE("stack", S_IRUGO, proc_pid_stack),
46793 #endif
46794 #ifdef CONFIG_SCHEDSTATS
46795diff -urNp linux-3.1.4/fs/proc/cmdline.c linux-3.1.4/fs/proc/cmdline.c
46796--- linux-3.1.4/fs/proc/cmdline.c 2011-11-11 15:19:27.000000000 -0500
46797+++ linux-3.1.4/fs/proc/cmdline.c 2011-11-16 18:40:29.000000000 -0500
46798@@ -23,7 +23,11 @@ static const struct file_operations cmdl
46799
46800 static int __init proc_cmdline_init(void)
46801 {
46802+#ifdef CONFIG_GRKERNSEC_PROC_ADD
46803+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
46804+#else
46805 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
46806+#endif
46807 return 0;
46808 }
46809 module_init(proc_cmdline_init);
46810diff -urNp linux-3.1.4/fs/proc/devices.c linux-3.1.4/fs/proc/devices.c
46811--- linux-3.1.4/fs/proc/devices.c 2011-11-11 15:19:27.000000000 -0500
46812+++ linux-3.1.4/fs/proc/devices.c 2011-11-16 18:40:29.000000000 -0500
46813@@ -64,7 +64,11 @@ static const struct file_operations proc
46814
46815 static int __init proc_devices_init(void)
46816 {
46817+#ifdef CONFIG_GRKERNSEC_PROC_ADD
46818+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
46819+#else
46820 proc_create("devices", 0, NULL, &proc_devinfo_operations);
46821+#endif
46822 return 0;
46823 }
46824 module_init(proc_devices_init);
46825diff -urNp linux-3.1.4/fs/proc/inode.c linux-3.1.4/fs/proc/inode.c
46826--- linux-3.1.4/fs/proc/inode.c 2011-11-11 15:19:27.000000000 -0500
46827+++ linux-3.1.4/fs/proc/inode.c 2011-11-16 18:40:29.000000000 -0500
46828@@ -18,12 +18,18 @@
46829 #include <linux/module.h>
46830 #include <linux/sysctl.h>
46831 #include <linux/slab.h>
46832+#include <linux/grsecurity.h>
46833
46834 #include <asm/system.h>
46835 #include <asm/uaccess.h>
46836
46837 #include "internal.h"
46838
46839+#ifdef CONFIG_PROC_SYSCTL
46840+extern const struct inode_operations proc_sys_inode_operations;
46841+extern const struct inode_operations proc_sys_dir_operations;
46842+#endif
46843+
46844 static void proc_evict_inode(struct inode *inode)
46845 {
46846 struct proc_dir_entry *de;
46847@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inod
46848 ns_ops = PROC_I(inode)->ns_ops;
46849 if (ns_ops && ns_ops->put)
46850 ns_ops->put(PROC_I(inode)->ns);
46851+
46852+#ifdef CONFIG_PROC_SYSCTL
46853+ if (inode->i_op == &proc_sys_inode_operations ||
46854+ inode->i_op == &proc_sys_dir_operations)
46855+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
46856+#endif
46857+
46858 }
46859
46860 static struct kmem_cache * proc_inode_cachep;
46861@@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct supe
46862 if (de->mode) {
46863 inode->i_mode = de->mode;
46864 inode->i_uid = de->uid;
46865+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46866+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
46867+#else
46868 inode->i_gid = de->gid;
46869+#endif
46870 }
46871 if (de->size)
46872 inode->i_size = de->size;
46873diff -urNp linux-3.1.4/fs/proc/internal.h linux-3.1.4/fs/proc/internal.h
46874--- linux-3.1.4/fs/proc/internal.h 2011-11-11 15:19:27.000000000 -0500
46875+++ linux-3.1.4/fs/proc/internal.h 2011-11-16 18:40:29.000000000 -0500
46876@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
46877 struct pid *pid, struct task_struct *task);
46878 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
46879 struct pid *pid, struct task_struct *task);
46880+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
46881+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
46882+#endif
46883 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
46884
46885 extern const struct file_operations proc_maps_operations;
46886diff -urNp linux-3.1.4/fs/proc/Kconfig linux-3.1.4/fs/proc/Kconfig
46887--- linux-3.1.4/fs/proc/Kconfig 2011-11-11 15:19:27.000000000 -0500
46888+++ linux-3.1.4/fs/proc/Kconfig 2011-11-16 18:40:29.000000000 -0500
46889@@ -30,12 +30,12 @@ config PROC_FS
46890
46891 config PROC_KCORE
46892 bool "/proc/kcore support" if !ARM
46893- depends on PROC_FS && MMU
46894+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
46895
46896 config PROC_VMCORE
46897 bool "/proc/vmcore support"
46898- depends on PROC_FS && CRASH_DUMP
46899- default y
46900+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
46901+ default n
46902 help
46903 Exports the dump image of crashed kernel in ELF format.
46904
46905@@ -59,8 +59,8 @@ config PROC_SYSCTL
46906 limited in memory.
46907
46908 config PROC_PAGE_MONITOR
46909- default y
46910- depends on PROC_FS && MMU
46911+ default n
46912+ depends on PROC_FS && MMU && !GRKERNSEC
46913 bool "Enable /proc page monitoring" if EXPERT
46914 help
46915 Various /proc files exist to monitor process memory utilization:
46916diff -urNp linux-3.1.4/fs/proc/kcore.c linux-3.1.4/fs/proc/kcore.c
46917--- linux-3.1.4/fs/proc/kcore.c 2011-11-11 15:19:27.000000000 -0500
46918+++ linux-3.1.4/fs/proc/kcore.c 2011-11-16 18:40:29.000000000 -0500
46919@@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
46920 off_t offset = 0;
46921 struct kcore_list *m;
46922
46923+ pax_track_stack();
46924+
46925 /* setup ELF header */
46926 elf = (struct elfhdr *) bufp;
46927 bufp += sizeof(struct elfhdr);
46928@@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
46929 * the addresses in the elf_phdr on our list.
46930 */
46931 start = kc_offset_to_vaddr(*fpos - elf_buflen);
46932- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
46933+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
46934+ if (tsz > buflen)
46935 tsz = buflen;
46936-
46937+
46938 while (buflen) {
46939 struct kcore_list *m;
46940
46941@@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
46942 kfree(elf_buf);
46943 } else {
46944 if (kern_addr_valid(start)) {
46945- unsigned long n;
46946+ char *elf_buf;
46947+ mm_segment_t oldfs;
46948
46949- n = copy_to_user(buffer, (char *)start, tsz);
46950- /*
46951- * We cannot distingush between fault on source
46952- * and fault on destination. When this happens
46953- * we clear too and hope it will trigger the
46954- * EFAULT again.
46955- */
46956- if (n) {
46957- if (clear_user(buffer + tsz - n,
46958- n))
46959+ elf_buf = kmalloc(tsz, GFP_KERNEL);
46960+ if (!elf_buf)
46961+ return -ENOMEM;
46962+ oldfs = get_fs();
46963+ set_fs(KERNEL_DS);
46964+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
46965+ set_fs(oldfs);
46966+ if (copy_to_user(buffer, elf_buf, tsz)) {
46967+ kfree(elf_buf);
46968 return -EFAULT;
46969+ }
46970 }
46971+ set_fs(oldfs);
46972+ kfree(elf_buf);
46973 } else {
46974 if (clear_user(buffer, tsz))
46975 return -EFAULT;
46976@@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
46977
46978 static int open_kcore(struct inode *inode, struct file *filp)
46979 {
46980+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
46981+ return -EPERM;
46982+#endif
46983 if (!capable(CAP_SYS_RAWIO))
46984 return -EPERM;
46985 if (kcore_need_update)
46986diff -urNp linux-3.1.4/fs/proc/meminfo.c linux-3.1.4/fs/proc/meminfo.c
46987--- linux-3.1.4/fs/proc/meminfo.c 2011-11-11 15:19:27.000000000 -0500
46988+++ linux-3.1.4/fs/proc/meminfo.c 2011-11-16 18:40:29.000000000 -0500
46989@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
46990 unsigned long pages[NR_LRU_LISTS];
46991 int lru;
46992
46993+ pax_track_stack();
46994+
46995 /*
46996 * display in kilobytes.
46997 */
46998@@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
46999 vmi.used >> 10,
47000 vmi.largest_chunk >> 10
47001 #ifdef CONFIG_MEMORY_FAILURE
47002- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
47003+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
47004 #endif
47005 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
47006 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
47007diff -urNp linux-3.1.4/fs/proc/nommu.c linux-3.1.4/fs/proc/nommu.c
47008--- linux-3.1.4/fs/proc/nommu.c 2011-11-11 15:19:27.000000000 -0500
47009+++ linux-3.1.4/fs/proc/nommu.c 2011-11-16 18:39:08.000000000 -0500
47010@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
47011 if (len < 1)
47012 len = 1;
47013 seq_printf(m, "%*c", len, ' ');
47014- seq_path(m, &file->f_path, "");
47015+ seq_path(m, &file->f_path, "\n\\");
47016 }
47017
47018 seq_putc(m, '\n');
47019diff -urNp linux-3.1.4/fs/proc/proc_net.c linux-3.1.4/fs/proc/proc_net.c
47020--- linux-3.1.4/fs/proc/proc_net.c 2011-11-11 15:19:27.000000000 -0500
47021+++ linux-3.1.4/fs/proc/proc_net.c 2011-11-16 18:40:29.000000000 -0500
47022@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
47023 struct task_struct *task;
47024 struct nsproxy *ns;
47025 struct net *net = NULL;
47026+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47027+ const struct cred *cred = current_cred();
47028+#endif
47029+
47030+#ifdef CONFIG_GRKERNSEC_PROC_USER
47031+ if (cred->fsuid)
47032+ return net;
47033+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47034+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
47035+ return net;
47036+#endif
47037
47038 rcu_read_lock();
47039 task = pid_task(proc_pid(dir), PIDTYPE_PID);
47040diff -urNp linux-3.1.4/fs/proc/proc_sysctl.c linux-3.1.4/fs/proc/proc_sysctl.c
47041--- linux-3.1.4/fs/proc/proc_sysctl.c 2011-11-11 15:19:27.000000000 -0500
47042+++ linux-3.1.4/fs/proc/proc_sysctl.c 2011-11-18 18:45:33.000000000 -0500
47043@@ -8,11 +8,13 @@
47044 #include <linux/namei.h>
47045 #include "internal.h"
47046
47047+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
47048+
47049 static const struct dentry_operations proc_sys_dentry_operations;
47050 static const struct file_operations proc_sys_file_operations;
47051-static const struct inode_operations proc_sys_inode_operations;
47052+const struct inode_operations proc_sys_inode_operations;
47053 static const struct file_operations proc_sys_dir_file_operations;
47054-static const struct inode_operations proc_sys_dir_operations;
47055+const struct inode_operations proc_sys_dir_operations;
47056
47057 static struct inode *proc_sys_make_inode(struct super_block *sb,
47058 struct ctl_table_header *head, struct ctl_table *table)
47059@@ -121,8 +123,14 @@ static struct dentry *proc_sys_lookup(st
47060
47061 err = NULL;
47062 d_set_d_op(dentry, &proc_sys_dentry_operations);
47063+
47064+ gr_handle_proc_create(dentry, inode);
47065+
47066 d_add(dentry, inode);
47067
47068+ if (gr_handle_sysctl(p, MAY_EXEC))
47069+ err = ERR_PTR(-ENOENT);
47070+
47071 out:
47072 sysctl_head_finish(head);
47073 return err;
47074@@ -202,6 +210,9 @@ static int proc_sys_fill_cache(struct fi
47075 return -ENOMEM;
47076 } else {
47077 d_set_d_op(child, &proc_sys_dentry_operations);
47078+
47079+ gr_handle_proc_create(child, inode);
47080+
47081 d_add(child, inode);
47082 }
47083 } else {
47084@@ -230,6 +241,9 @@ static int scan(struct ctl_table_header
47085 if (*pos < file->f_pos)
47086 continue;
47087
47088+ if (gr_handle_sysctl(table, 0))
47089+ continue;
47090+
47091 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
47092 if (res)
47093 return res;
47094@@ -355,6 +369,9 @@ static int proc_sys_getattr(struct vfsmo
47095 if (IS_ERR(head))
47096 return PTR_ERR(head);
47097
47098+ if (table && gr_handle_sysctl(table, MAY_EXEC))
47099+ return -ENOENT;
47100+
47101 generic_fillattr(inode, stat);
47102 if (table)
47103 stat->mode = (stat->mode & S_IFMT) | table->mode;
47104@@ -370,17 +387,18 @@ static const struct file_operations proc
47105 };
47106
47107 static const struct file_operations proc_sys_dir_file_operations = {
47108+ .read = generic_read_dir,
47109 .readdir = proc_sys_readdir,
47110 .llseek = generic_file_llseek,
47111 };
47112
47113-static const struct inode_operations proc_sys_inode_operations = {
47114+const struct inode_operations proc_sys_inode_operations = {
47115 .permission = proc_sys_permission,
47116 .setattr = proc_sys_setattr,
47117 .getattr = proc_sys_getattr,
47118 };
47119
47120-static const struct inode_operations proc_sys_dir_operations = {
47121+const struct inode_operations proc_sys_dir_operations = {
47122 .lookup = proc_sys_lookup,
47123 .permission = proc_sys_permission,
47124 .setattr = proc_sys_setattr,
47125diff -urNp linux-3.1.4/fs/proc/root.c linux-3.1.4/fs/proc/root.c
47126--- linux-3.1.4/fs/proc/root.c 2011-11-11 15:19:27.000000000 -0500
47127+++ linux-3.1.4/fs/proc/root.c 2011-11-16 18:40:29.000000000 -0500
47128@@ -123,7 +123,15 @@ void __init proc_root_init(void)
47129 #ifdef CONFIG_PROC_DEVICETREE
47130 proc_device_tree_init();
47131 #endif
47132+#ifdef CONFIG_GRKERNSEC_PROC_ADD
47133+#ifdef CONFIG_GRKERNSEC_PROC_USER
47134+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
47135+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47136+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
47137+#endif
47138+#else
47139 proc_mkdir("bus", NULL);
47140+#endif
47141 proc_sys_init();
47142 }
47143
47144diff -urNp linux-3.1.4/fs/proc/task_mmu.c linux-3.1.4/fs/proc/task_mmu.c
47145--- linux-3.1.4/fs/proc/task_mmu.c 2011-11-11 15:19:27.000000000 -0500
47146+++ linux-3.1.4/fs/proc/task_mmu.c 2011-11-16 18:40:29.000000000 -0500
47147@@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
47148 "VmExe:\t%8lu kB\n"
47149 "VmLib:\t%8lu kB\n"
47150 "VmPTE:\t%8lu kB\n"
47151- "VmSwap:\t%8lu kB\n",
47152- hiwater_vm << (PAGE_SHIFT-10),
47153+ "VmSwap:\t%8lu kB\n"
47154+
47155+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
47156+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
47157+#endif
47158+
47159+ ,hiwater_vm << (PAGE_SHIFT-10),
47160 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
47161 mm->locked_vm << (PAGE_SHIFT-10),
47162 hiwater_rss << (PAGE_SHIFT-10),
47163@@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
47164 data << (PAGE_SHIFT-10),
47165 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
47166 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
47167- swap << (PAGE_SHIFT-10));
47168+ swap << (PAGE_SHIFT-10)
47169+
47170+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
47171+ , mm->context.user_cs_base, mm->context.user_cs_limit
47172+#endif
47173+
47174+ );
47175 }
47176
47177 unsigned long task_vsize(struct mm_struct *mm)
47178@@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
47179 return ret;
47180 }
47181
47182+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47183+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47184+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
47185+ _mm->pax_flags & MF_PAX_SEGMEXEC))
47186+#endif
47187+
47188 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
47189 {
47190 struct mm_struct *mm = vma->vm_mm;
47191@@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
47192 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
47193 }
47194
47195- /* We don't show the stack guard page in /proc/maps */
47196+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47197+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
47198+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
47199+#else
47200 start = vma->vm_start;
47201- if (stack_guard_page_start(vma, start))
47202- start += PAGE_SIZE;
47203 end = vma->vm_end;
47204- if (stack_guard_page_end(vma, end))
47205- end -= PAGE_SIZE;
47206+#endif
47207
47208 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
47209 start,
47210@@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
47211 flags & VM_WRITE ? 'w' : '-',
47212 flags & VM_EXEC ? 'x' : '-',
47213 flags & VM_MAYSHARE ? 's' : 'p',
47214+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47215+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
47216+#else
47217 pgoff,
47218+#endif
47219 MAJOR(dev), MINOR(dev), ino, &len);
47220
47221 /*
47222@@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
47223 */
47224 if (file) {
47225 pad_len_spaces(m, len);
47226- seq_path(m, &file->f_path, "\n");
47227+ seq_path(m, &file->f_path, "\n\\");
47228 } else {
47229 const char *name = arch_vma_name(vma);
47230 if (!name) {
47231@@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
47232 if (vma->vm_start <= mm->brk &&
47233 vma->vm_end >= mm->start_brk) {
47234 name = "[heap]";
47235- } else if (vma->vm_start <= mm->start_stack &&
47236- vma->vm_end >= mm->start_stack) {
47237+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
47238+ (vma->vm_start <= mm->start_stack &&
47239+ vma->vm_end >= mm->start_stack)) {
47240 name = "[stack]";
47241 }
47242 } else {
47243@@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
47244 };
47245
47246 memset(&mss, 0, sizeof mss);
47247- mss.vma = vma;
47248- /* mmap_sem is held in m_start */
47249- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
47250- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
47251-
47252+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47253+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
47254+#endif
47255+ mss.vma = vma;
47256+ /* mmap_sem is held in m_start */
47257+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
47258+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
47259+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47260+ }
47261+#endif
47262 show_map_vma(m, vma);
47263
47264 seq_printf(m,
47265@@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
47266 "KernelPageSize: %8lu kB\n"
47267 "MMUPageSize: %8lu kB\n"
47268 "Locked: %8lu kB\n",
47269+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47270+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
47271+#else
47272 (vma->vm_end - vma->vm_start) >> 10,
47273+#endif
47274 mss.resident >> 10,
47275 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
47276 mss.shared_clean >> 10,
47277@@ -1031,7 +1062,7 @@ static int show_numa_map(struct seq_file
47278
47279 if (file) {
47280 seq_printf(m, " file=");
47281- seq_path(m, &file->f_path, "\n\t= ");
47282+ seq_path(m, &file->f_path, "\n\t\\= ");
47283 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
47284 seq_printf(m, " heap");
47285 } else if (vma->vm_start <= mm->start_stack &&
47286diff -urNp linux-3.1.4/fs/proc/task_nommu.c linux-3.1.4/fs/proc/task_nommu.c
47287--- linux-3.1.4/fs/proc/task_nommu.c 2011-11-11 15:19:27.000000000 -0500
47288+++ linux-3.1.4/fs/proc/task_nommu.c 2011-11-16 18:39:08.000000000 -0500
47289@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
47290 else
47291 bytes += kobjsize(mm);
47292
47293- if (current->fs && current->fs->users > 1)
47294+ if (current->fs && atomic_read(&current->fs->users) > 1)
47295 sbytes += kobjsize(current->fs);
47296 else
47297 bytes += kobjsize(current->fs);
47298@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
47299
47300 if (file) {
47301 pad_len_spaces(m, len);
47302- seq_path(m, &file->f_path, "");
47303+ seq_path(m, &file->f_path, "\n\\");
47304 } else if (mm) {
47305 if (vma->vm_start <= mm->start_stack &&
47306 vma->vm_end >= mm->start_stack) {
47307diff -urNp linux-3.1.4/fs/quota/netlink.c linux-3.1.4/fs/quota/netlink.c
47308--- linux-3.1.4/fs/quota/netlink.c 2011-11-11 15:19:27.000000000 -0500
47309+++ linux-3.1.4/fs/quota/netlink.c 2011-11-16 18:39:08.000000000 -0500
47310@@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
47311 void quota_send_warning(short type, unsigned int id, dev_t dev,
47312 const char warntype)
47313 {
47314- static atomic_t seq;
47315+ static atomic_unchecked_t seq;
47316 struct sk_buff *skb;
47317 void *msg_head;
47318 int ret;
47319@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
47320 "VFS: Not enough memory to send quota warning.\n");
47321 return;
47322 }
47323- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
47324+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
47325 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
47326 if (!msg_head) {
47327 printk(KERN_ERR
47328diff -urNp linux-3.1.4/fs/readdir.c linux-3.1.4/fs/readdir.c
47329--- linux-3.1.4/fs/readdir.c 2011-11-11 15:19:27.000000000 -0500
47330+++ linux-3.1.4/fs/readdir.c 2011-11-16 18:40:29.000000000 -0500
47331@@ -17,6 +17,7 @@
47332 #include <linux/security.h>
47333 #include <linux/syscalls.h>
47334 #include <linux/unistd.h>
47335+#include <linux/namei.h>
47336
47337 #include <asm/uaccess.h>
47338
47339@@ -67,6 +68,7 @@ struct old_linux_dirent {
47340
47341 struct readdir_callback {
47342 struct old_linux_dirent __user * dirent;
47343+ struct file * file;
47344 int result;
47345 };
47346
47347@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
47348 buf->result = -EOVERFLOW;
47349 return -EOVERFLOW;
47350 }
47351+
47352+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47353+ return 0;
47354+
47355 buf->result++;
47356 dirent = buf->dirent;
47357 if (!access_ok(VERIFY_WRITE, dirent,
47358@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
47359
47360 buf.result = 0;
47361 buf.dirent = dirent;
47362+ buf.file = file;
47363
47364 error = vfs_readdir(file, fillonedir, &buf);
47365 if (buf.result)
47366@@ -142,6 +149,7 @@ struct linux_dirent {
47367 struct getdents_callback {
47368 struct linux_dirent __user * current_dir;
47369 struct linux_dirent __user * previous;
47370+ struct file * file;
47371 int count;
47372 int error;
47373 };
47374@@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
47375 buf->error = -EOVERFLOW;
47376 return -EOVERFLOW;
47377 }
47378+
47379+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47380+ return 0;
47381+
47382 dirent = buf->previous;
47383 if (dirent) {
47384 if (__put_user(offset, &dirent->d_off))
47385@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
47386 buf.previous = NULL;
47387 buf.count = count;
47388 buf.error = 0;
47389+ buf.file = file;
47390
47391 error = vfs_readdir(file, filldir, &buf);
47392 if (error >= 0)
47393@@ -229,6 +242,7 @@ out:
47394 struct getdents_callback64 {
47395 struct linux_dirent64 __user * current_dir;
47396 struct linux_dirent64 __user * previous;
47397+ struct file *file;
47398 int count;
47399 int error;
47400 };
47401@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
47402 buf->error = -EINVAL; /* only used if we fail.. */
47403 if (reclen > buf->count)
47404 return -EINVAL;
47405+
47406+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47407+ return 0;
47408+
47409 dirent = buf->previous;
47410 if (dirent) {
47411 if (__put_user(offset, &dirent->d_off))
47412@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
47413
47414 buf.current_dir = dirent;
47415 buf.previous = NULL;
47416+ buf.file = file;
47417 buf.count = count;
47418 buf.error = 0;
47419
47420@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
47421 error = buf.error;
47422 lastdirent = buf.previous;
47423 if (lastdirent) {
47424- typeof(lastdirent->d_off) d_off = file->f_pos;
47425+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
47426 if (__put_user(d_off, &lastdirent->d_off))
47427 error = -EFAULT;
47428 else
47429diff -urNp linux-3.1.4/fs/reiserfs/dir.c linux-3.1.4/fs/reiserfs/dir.c
47430--- linux-3.1.4/fs/reiserfs/dir.c 2011-11-11 15:19:27.000000000 -0500
47431+++ linux-3.1.4/fs/reiserfs/dir.c 2011-11-16 18:40:29.000000000 -0500
47432@@ -75,6 +75,8 @@ int reiserfs_readdir_dentry(struct dentr
47433 struct reiserfs_dir_entry de;
47434 int ret = 0;
47435
47436+ pax_track_stack();
47437+
47438 reiserfs_write_lock(inode->i_sb);
47439
47440 reiserfs_check_lock_depth(inode->i_sb, "readdir");
47441diff -urNp linux-3.1.4/fs/reiserfs/do_balan.c linux-3.1.4/fs/reiserfs/do_balan.c
47442--- linux-3.1.4/fs/reiserfs/do_balan.c 2011-11-11 15:19:27.000000000 -0500
47443+++ linux-3.1.4/fs/reiserfs/do_balan.c 2011-11-16 18:39:08.000000000 -0500
47444@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
47445 return;
47446 }
47447
47448- atomic_inc(&(fs_generation(tb->tb_sb)));
47449+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
47450 do_balance_starts(tb);
47451
47452 /* balance leaf returns 0 except if combining L R and S into
47453diff -urNp linux-3.1.4/fs/reiserfs/journal.c linux-3.1.4/fs/reiserfs/journal.c
47454--- linux-3.1.4/fs/reiserfs/journal.c 2011-11-11 15:19:27.000000000 -0500
47455+++ linux-3.1.4/fs/reiserfs/journal.c 2011-11-16 18:40:29.000000000 -0500
47456@@ -2289,6 +2289,8 @@ static struct buffer_head *reiserfs_brea
47457 struct buffer_head *bh;
47458 int i, j;
47459
47460+ pax_track_stack();
47461+
47462 bh = __getblk(dev, block, bufsize);
47463 if (buffer_uptodate(bh))
47464 return (bh);
47465diff -urNp linux-3.1.4/fs/reiserfs/namei.c linux-3.1.4/fs/reiserfs/namei.c
47466--- linux-3.1.4/fs/reiserfs/namei.c 2011-11-11 15:19:27.000000000 -0500
47467+++ linux-3.1.4/fs/reiserfs/namei.c 2011-11-16 18:40:29.000000000 -0500
47468@@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
47469 unsigned long savelink = 1;
47470 struct timespec ctime;
47471
47472+ pax_track_stack();
47473+
47474 /* three balancings: (1) old name removal, (2) new name insertion
47475 and (3) maybe "save" link insertion
47476 stat data updates: (1) old directory,
47477diff -urNp linux-3.1.4/fs/reiserfs/procfs.c linux-3.1.4/fs/reiserfs/procfs.c
47478--- linux-3.1.4/fs/reiserfs/procfs.c 2011-11-11 15:19:27.000000000 -0500
47479+++ linux-3.1.4/fs/reiserfs/procfs.c 2011-11-16 18:40:29.000000000 -0500
47480@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
47481 "SMALL_TAILS " : "NO_TAILS ",
47482 replay_only(sb) ? "REPLAY_ONLY " : "",
47483 convert_reiserfs(sb) ? "CONV " : "",
47484- atomic_read(&r->s_generation_counter),
47485+ atomic_read_unchecked(&r->s_generation_counter),
47486 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
47487 SF(s_do_balance), SF(s_unneeded_left_neighbor),
47488 SF(s_good_search_by_key_reada), SF(s_bmaps),
47489@@ -299,6 +299,8 @@ static int show_journal(struct seq_file
47490 struct journal_params *jp = &rs->s_v1.s_journal;
47491 char b[BDEVNAME_SIZE];
47492
47493+ pax_track_stack();
47494+
47495 seq_printf(m, /* on-disk fields */
47496 "jp_journal_1st_block: \t%i\n"
47497 "jp_journal_dev: \t%s[%x]\n"
47498diff -urNp linux-3.1.4/fs/reiserfs/stree.c linux-3.1.4/fs/reiserfs/stree.c
47499--- linux-3.1.4/fs/reiserfs/stree.c 2011-11-11 15:19:27.000000000 -0500
47500+++ linux-3.1.4/fs/reiserfs/stree.c 2011-11-16 18:40:29.000000000 -0500
47501@@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
47502 int iter = 0;
47503 #endif
47504
47505+ pax_track_stack();
47506+
47507 BUG_ON(!th->t_trans_id);
47508
47509 init_tb_struct(th, &s_del_balance, sb, path,
47510@@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
47511 int retval;
47512 int quota_cut_bytes = 0;
47513
47514+ pax_track_stack();
47515+
47516 BUG_ON(!th->t_trans_id);
47517
47518 le_key2cpu_key(&cpu_key, key);
47519@@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
47520 int quota_cut_bytes;
47521 loff_t tail_pos = 0;
47522
47523+ pax_track_stack();
47524+
47525 BUG_ON(!th->t_trans_id);
47526
47527 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
47528@@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
47529 int retval;
47530 int fs_gen;
47531
47532+ pax_track_stack();
47533+
47534 BUG_ON(!th->t_trans_id);
47535
47536 fs_gen = get_generation(inode->i_sb);
47537@@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
47538 int fs_gen = 0;
47539 int quota_bytes = 0;
47540
47541+ pax_track_stack();
47542+
47543 BUG_ON(!th->t_trans_id);
47544
47545 if (inode) { /* Do we count quotas for item? */
47546diff -urNp linux-3.1.4/fs/reiserfs/super.c linux-3.1.4/fs/reiserfs/super.c
47547--- linux-3.1.4/fs/reiserfs/super.c 2011-11-11 15:19:27.000000000 -0500
47548+++ linux-3.1.4/fs/reiserfs/super.c 2011-11-16 18:40:29.000000000 -0500
47549@@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
47550 {.option_name = NULL}
47551 };
47552
47553+ pax_track_stack();
47554+
47555 *blocks = 0;
47556 if (!options || !*options)
47557 /* use default configuration: create tails, journaling on, no
47558diff -urNp linux-3.1.4/fs/select.c linux-3.1.4/fs/select.c
47559--- linux-3.1.4/fs/select.c 2011-11-11 15:19:27.000000000 -0500
47560+++ linux-3.1.4/fs/select.c 2011-11-16 18:40:29.000000000 -0500
47561@@ -20,6 +20,7 @@
47562 #include <linux/module.h>
47563 #include <linux/slab.h>
47564 #include <linux/poll.h>
47565+#include <linux/security.h>
47566 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
47567 #include <linux/file.h>
47568 #include <linux/fdtable.h>
47569@@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
47570 int retval, i, timed_out = 0;
47571 unsigned long slack = 0;
47572
47573+ pax_track_stack();
47574+
47575 rcu_read_lock();
47576 retval = max_select_fd(n, fds);
47577 rcu_read_unlock();
47578@@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
47579 /* Allocate small arguments on the stack to save memory and be faster */
47580 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
47581
47582+ pax_track_stack();
47583+
47584 ret = -EINVAL;
47585 if (n < 0)
47586 goto out_nofds;
47587@@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
47588 struct poll_list *walk = head;
47589 unsigned long todo = nfds;
47590
47591+ pax_track_stack();
47592+
47593+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
47594 if (nfds > rlimit(RLIMIT_NOFILE))
47595 return -EINVAL;
47596
47597diff -urNp linux-3.1.4/fs/seq_file.c linux-3.1.4/fs/seq_file.c
47598--- linux-3.1.4/fs/seq_file.c 2011-11-11 15:19:27.000000000 -0500
47599+++ linux-3.1.4/fs/seq_file.c 2011-11-16 18:39:08.000000000 -0500
47600@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
47601 return 0;
47602 }
47603 if (!m->buf) {
47604- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
47605+ m->size = PAGE_SIZE;
47606+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
47607 if (!m->buf)
47608 return -ENOMEM;
47609 }
47610@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
47611 Eoverflow:
47612 m->op->stop(m, p);
47613 kfree(m->buf);
47614- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
47615+ m->size <<= 1;
47616+ m->buf = kmalloc(m->size, GFP_KERNEL);
47617 return !m->buf ? -ENOMEM : -EAGAIN;
47618 }
47619
47620@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
47621 m->version = file->f_version;
47622 /* grab buffer if we didn't have one */
47623 if (!m->buf) {
47624- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
47625+ m->size = PAGE_SIZE;
47626+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
47627 if (!m->buf)
47628 goto Enomem;
47629 }
47630@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
47631 goto Fill;
47632 m->op->stop(m, p);
47633 kfree(m->buf);
47634- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
47635+ m->size <<= 1;
47636+ m->buf = kmalloc(m->size, GFP_KERNEL);
47637 if (!m->buf)
47638 goto Enomem;
47639 m->count = 0;
47640@@ -549,7 +553,7 @@ static void single_stop(struct seq_file
47641 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
47642 void *data)
47643 {
47644- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
47645+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
47646 int res = -ENOMEM;
47647
47648 if (op) {
47649diff -urNp linux-3.1.4/fs/splice.c linux-3.1.4/fs/splice.c
47650--- linux-3.1.4/fs/splice.c 2011-11-11 15:19:27.000000000 -0500
47651+++ linux-3.1.4/fs/splice.c 2011-11-16 18:40:29.000000000 -0500
47652@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode
47653 pipe_lock(pipe);
47654
47655 for (;;) {
47656- if (!pipe->readers) {
47657+ if (!atomic_read(&pipe->readers)) {
47658 send_sig(SIGPIPE, current, 0);
47659 if (!ret)
47660 ret = -EPIPE;
47661@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode
47662 do_wakeup = 0;
47663 }
47664
47665- pipe->waiting_writers++;
47666+ atomic_inc(&pipe->waiting_writers);
47667 pipe_wait(pipe);
47668- pipe->waiting_writers--;
47669+ atomic_dec(&pipe->waiting_writers);
47670 }
47671
47672 pipe_unlock(pipe);
47673@@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *
47674 .spd_release = spd_release_page,
47675 };
47676
47677+ pax_track_stack();
47678+
47679 if (splice_grow_spd(pipe, &spd))
47680 return -ENOMEM;
47681
47682@@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file
47683 old_fs = get_fs();
47684 set_fs(get_ds());
47685 /* The cast to a user pointer is valid due to the set_fs() */
47686- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
47687+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
47688 set_fs(old_fs);
47689
47690 return res;
47691@@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file
47692 old_fs = get_fs();
47693 set_fs(get_ds());
47694 /* The cast to a user pointer is valid due to the set_fs() */
47695- res = vfs_write(file, (const char __user *)buf, count, &pos);
47696+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
47697 set_fs(old_fs);
47698
47699 return res;
47700@@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct
47701 .spd_release = spd_release_page,
47702 };
47703
47704+ pax_track_stack();
47705+
47706 if (splice_grow_spd(pipe, &spd))
47707 return -ENOMEM;
47708
47709@@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct
47710 goto err;
47711
47712 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
47713- vec[i].iov_base = (void __user *) page_address(page);
47714+ vec[i].iov_base = (void __force_user *) page_address(page);
47715 vec[i].iov_len = this_len;
47716 spd.pages[i] = page;
47717 spd.nr_pages++;
47718@@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
47719 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
47720 {
47721 while (!pipe->nrbufs) {
47722- if (!pipe->writers)
47723+ if (!atomic_read(&pipe->writers))
47724 return 0;
47725
47726- if (!pipe->waiting_writers && sd->num_spliced)
47727+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
47728 return 0;
47729
47730 if (sd->flags & SPLICE_F_NONBLOCK)
47731@@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct fi
47732 * out of the pipe right after the splice_to_pipe(). So set
47733 * PIPE_READERS appropriately.
47734 */
47735- pipe->readers = 1;
47736+ atomic_set(&pipe->readers, 1);
47737
47738 current->splice_pipe = pipe;
47739 }
47740@@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file
47741 };
47742 long ret;
47743
47744+ pax_track_stack();
47745+
47746 pipe = get_pipe_info(file);
47747 if (!pipe)
47748 return -EBADF;
47749@@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_
47750 ret = -ERESTARTSYS;
47751 break;
47752 }
47753- if (!pipe->writers)
47754+ if (!atomic_read(&pipe->writers))
47755 break;
47756- if (!pipe->waiting_writers) {
47757+ if (!atomic_read(&pipe->waiting_writers)) {
47758 if (flags & SPLICE_F_NONBLOCK) {
47759 ret = -EAGAIN;
47760 break;
47761@@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_
47762 pipe_lock(pipe);
47763
47764 while (pipe->nrbufs >= pipe->buffers) {
47765- if (!pipe->readers) {
47766+ if (!atomic_read(&pipe->readers)) {
47767 send_sig(SIGPIPE, current, 0);
47768 ret = -EPIPE;
47769 break;
47770@@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_
47771 ret = -ERESTARTSYS;
47772 break;
47773 }
47774- pipe->waiting_writers++;
47775+ atomic_inc(&pipe->waiting_writers);
47776 pipe_wait(pipe);
47777- pipe->waiting_writers--;
47778+ atomic_dec(&pipe->waiting_writers);
47779 }
47780
47781 pipe_unlock(pipe);
47782@@ -1819,14 +1825,14 @@ retry:
47783 pipe_double_lock(ipipe, opipe);
47784
47785 do {
47786- if (!opipe->readers) {
47787+ if (!atomic_read(&opipe->readers)) {
47788 send_sig(SIGPIPE, current, 0);
47789 if (!ret)
47790 ret = -EPIPE;
47791 break;
47792 }
47793
47794- if (!ipipe->nrbufs && !ipipe->writers)
47795+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
47796 break;
47797
47798 /*
47799@@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_i
47800 pipe_double_lock(ipipe, opipe);
47801
47802 do {
47803- if (!opipe->readers) {
47804+ if (!atomic_read(&opipe->readers)) {
47805 send_sig(SIGPIPE, current, 0);
47806 if (!ret)
47807 ret = -EPIPE;
47808@@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_i
47809 * return EAGAIN if we have the potential of some data in the
47810 * future, otherwise just return 0
47811 */
47812- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
47813+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
47814 ret = -EAGAIN;
47815
47816 pipe_unlock(ipipe);
47817diff -urNp linux-3.1.4/fs/sysfs/file.c linux-3.1.4/fs/sysfs/file.c
47818--- linux-3.1.4/fs/sysfs/file.c 2011-11-11 15:19:27.000000000 -0500
47819+++ linux-3.1.4/fs/sysfs/file.c 2011-11-16 18:39:08.000000000 -0500
47820@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
47821
47822 struct sysfs_open_dirent {
47823 atomic_t refcnt;
47824- atomic_t event;
47825+ atomic_unchecked_t event;
47826 wait_queue_head_t poll;
47827 struct list_head buffers; /* goes through sysfs_buffer.list */
47828 };
47829@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentr
47830 if (!sysfs_get_active(attr_sd))
47831 return -ENODEV;
47832
47833- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
47834+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
47835 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
47836
47837 sysfs_put_active(attr_sd);
47838@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct
47839 return -ENOMEM;
47840
47841 atomic_set(&new_od->refcnt, 0);
47842- atomic_set(&new_od->event, 1);
47843+ atomic_set_unchecked(&new_od->event, 1);
47844 init_waitqueue_head(&new_od->poll);
47845 INIT_LIST_HEAD(&new_od->buffers);
47846 goto retry;
47847@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct fi
47848
47849 sysfs_put_active(attr_sd);
47850
47851- if (buffer->event != atomic_read(&od->event))
47852+ if (buffer->event != atomic_read_unchecked(&od->event))
47853 goto trigger;
47854
47855 return DEFAULT_POLLMASK;
47856@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_di
47857
47858 od = sd->s_attr.open;
47859 if (od) {
47860- atomic_inc(&od->event);
47861+ atomic_inc_unchecked(&od->event);
47862 wake_up_interruptible(&od->poll);
47863 }
47864
47865diff -urNp linux-3.1.4/fs/sysfs/mount.c linux-3.1.4/fs/sysfs/mount.c
47866--- linux-3.1.4/fs/sysfs/mount.c 2011-11-11 15:19:27.000000000 -0500
47867+++ linux-3.1.4/fs/sysfs/mount.c 2011-11-16 18:40:29.000000000 -0500
47868@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
47869 .s_name = "",
47870 .s_count = ATOMIC_INIT(1),
47871 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
47872+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
47873+ .s_mode = S_IFDIR | S_IRWXU,
47874+#else
47875 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
47876+#endif
47877 .s_ino = 1,
47878 };
47879
47880diff -urNp linux-3.1.4/fs/sysfs/symlink.c linux-3.1.4/fs/sysfs/symlink.c
47881--- linux-3.1.4/fs/sysfs/symlink.c 2011-11-11 15:19:27.000000000 -0500
47882+++ linux-3.1.4/fs/sysfs/symlink.c 2011-11-16 18:39:08.000000000 -0500
47883@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
47884
47885 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
47886 {
47887- char *page = nd_get_link(nd);
47888+ const char *page = nd_get_link(nd);
47889 if (!IS_ERR(page))
47890 free_page((unsigned long)page);
47891 }
47892diff -urNp linux-3.1.4/fs/udf/inode.c linux-3.1.4/fs/udf/inode.c
47893--- linux-3.1.4/fs/udf/inode.c 2011-11-11 15:19:27.000000000 -0500
47894+++ linux-3.1.4/fs/udf/inode.c 2011-11-16 18:40:29.000000000 -0500
47895@@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
47896 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
47897 int lastblock = 0;
47898
47899+ pax_track_stack();
47900+
47901 prev_epos.offset = udf_file_entry_alloc_offset(inode);
47902 prev_epos.block = iinfo->i_location;
47903 prev_epos.bh = NULL;
47904diff -urNp linux-3.1.4/fs/udf/misc.c linux-3.1.4/fs/udf/misc.c
47905--- linux-3.1.4/fs/udf/misc.c 2011-11-11 15:19:27.000000000 -0500
47906+++ linux-3.1.4/fs/udf/misc.c 2011-11-16 18:39:08.000000000 -0500
47907@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
47908
47909 u8 udf_tag_checksum(const struct tag *t)
47910 {
47911- u8 *data = (u8 *)t;
47912+ const u8 *data = (const u8 *)t;
47913 u8 checksum = 0;
47914 int i;
47915 for (i = 0; i < sizeof(struct tag); ++i)
47916diff -urNp linux-3.1.4/fs/utimes.c linux-3.1.4/fs/utimes.c
47917--- linux-3.1.4/fs/utimes.c 2011-11-11 15:19:27.000000000 -0500
47918+++ linux-3.1.4/fs/utimes.c 2011-11-16 18:40:29.000000000 -0500
47919@@ -1,6 +1,7 @@
47920 #include <linux/compiler.h>
47921 #include <linux/file.h>
47922 #include <linux/fs.h>
47923+#include <linux/security.h>
47924 #include <linux/linkage.h>
47925 #include <linux/mount.h>
47926 #include <linux/namei.h>
47927@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
47928 goto mnt_drop_write_and_out;
47929 }
47930 }
47931+
47932+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
47933+ error = -EACCES;
47934+ goto mnt_drop_write_and_out;
47935+ }
47936+
47937 mutex_lock(&inode->i_mutex);
47938 error = notify_change(path->dentry, &newattrs);
47939 mutex_unlock(&inode->i_mutex);
47940diff -urNp linux-3.1.4/fs/xattr_acl.c linux-3.1.4/fs/xattr_acl.c
47941--- linux-3.1.4/fs/xattr_acl.c 2011-11-11 15:19:27.000000000 -0500
47942+++ linux-3.1.4/fs/xattr_acl.c 2011-11-16 18:39:08.000000000 -0500
47943@@ -17,8 +17,8 @@
47944 struct posix_acl *
47945 posix_acl_from_xattr(const void *value, size_t size)
47946 {
47947- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
47948- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
47949+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
47950+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
47951 int count;
47952 struct posix_acl *acl;
47953 struct posix_acl_entry *acl_e;
47954diff -urNp linux-3.1.4/fs/xattr.c linux-3.1.4/fs/xattr.c
47955--- linux-3.1.4/fs/xattr.c 2011-11-11 15:19:27.000000000 -0500
47956+++ linux-3.1.4/fs/xattr.c 2011-11-16 18:40:29.000000000 -0500
47957@@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
47958 * Extended attribute SET operations
47959 */
47960 static long
47961-setxattr(struct dentry *d, const char __user *name, const void __user *value,
47962+setxattr(struct path *path, const char __user *name, const void __user *value,
47963 size_t size, int flags)
47964 {
47965 int error;
47966@@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __
47967 return PTR_ERR(kvalue);
47968 }
47969
47970- error = vfs_setxattr(d, kname, kvalue, size, flags);
47971+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
47972+ error = -EACCES;
47973+ goto out;
47974+ }
47975+
47976+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
47977+out:
47978 kfree(kvalue);
47979 return error;
47980 }
47981@@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
47982 return error;
47983 error = mnt_want_write(path.mnt);
47984 if (!error) {
47985- error = setxattr(path.dentry, name, value, size, flags);
47986+ error = setxattr(&path, name, value, size, flags);
47987 mnt_drop_write(path.mnt);
47988 }
47989 path_put(&path);
47990@@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
47991 return error;
47992 error = mnt_want_write(path.mnt);
47993 if (!error) {
47994- error = setxattr(path.dentry, name, value, size, flags);
47995+ error = setxattr(&path, name, value, size, flags);
47996 mnt_drop_write(path.mnt);
47997 }
47998 path_put(&path);
47999@@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
48000 const void __user *,value, size_t, size, int, flags)
48001 {
48002 struct file *f;
48003- struct dentry *dentry;
48004 int error = -EBADF;
48005
48006 f = fget(fd);
48007 if (!f)
48008 return error;
48009- dentry = f->f_path.dentry;
48010- audit_inode(NULL, dentry);
48011+ audit_inode(NULL, f->f_path.dentry);
48012 error = mnt_want_write_file(f);
48013 if (!error) {
48014- error = setxattr(dentry, name, value, size, flags);
48015+ error = setxattr(&f->f_path, name, value, size, flags);
48016 mnt_drop_write(f->f_path.mnt);
48017 }
48018 fput(f);
48019diff -urNp linux-3.1.4/fs/xfs/xfs_bmap.c linux-3.1.4/fs/xfs/xfs_bmap.c
48020--- linux-3.1.4/fs/xfs/xfs_bmap.c 2011-11-11 15:19:27.000000000 -0500
48021+++ linux-3.1.4/fs/xfs/xfs_bmap.c 2011-11-16 18:39:08.000000000 -0500
48022@@ -250,7 +250,7 @@ xfs_bmap_validate_ret(
48023 int nmap,
48024 int ret_nmap);
48025 #else
48026-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
48027+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
48028 #endif /* DEBUG */
48029
48030 STATIC int
48031diff -urNp linux-3.1.4/fs/xfs/xfs_dir2_sf.c linux-3.1.4/fs/xfs/xfs_dir2_sf.c
48032--- linux-3.1.4/fs/xfs/xfs_dir2_sf.c 2011-11-11 15:19:27.000000000 -0500
48033+++ linux-3.1.4/fs/xfs/xfs_dir2_sf.c 2011-11-16 18:39:08.000000000 -0500
48034@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
48035 }
48036
48037 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
48038- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
48039+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
48040+ char name[sfep->namelen];
48041+ memcpy(name, sfep->name, sfep->namelen);
48042+ if (filldir(dirent, name, sfep->namelen,
48043+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
48044+ *offset = off & 0x7fffffff;
48045+ return 0;
48046+ }
48047+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
48048 off & 0x7fffffff, ino, DT_UNKNOWN)) {
48049 *offset = off & 0x7fffffff;
48050 return 0;
48051diff -urNp linux-3.1.4/fs/xfs/xfs_ioctl.c linux-3.1.4/fs/xfs/xfs_ioctl.c
48052--- linux-3.1.4/fs/xfs/xfs_ioctl.c 2011-11-11 15:19:27.000000000 -0500
48053+++ linux-3.1.4/fs/xfs/xfs_ioctl.c 2011-11-16 18:39:08.000000000 -0500
48054@@ -128,7 +128,7 @@ xfs_find_handle(
48055 }
48056
48057 error = -EFAULT;
48058- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
48059+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
48060 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
48061 goto out_put;
48062
48063diff -urNp linux-3.1.4/fs/xfs/xfs_iops.c linux-3.1.4/fs/xfs/xfs_iops.c
48064--- linux-3.1.4/fs/xfs/xfs_iops.c 2011-11-11 15:19:27.000000000 -0500
48065+++ linux-3.1.4/fs/xfs/xfs_iops.c 2011-11-16 18:39:08.000000000 -0500
48066@@ -446,7 +446,7 @@ xfs_vn_put_link(
48067 struct nameidata *nd,
48068 void *p)
48069 {
48070- char *s = nd_get_link(nd);
48071+ const char *s = nd_get_link(nd);
48072
48073 if (!IS_ERR(s))
48074 kfree(s);
48075diff -urNp linux-3.1.4/fs/xfs/xfs_vnodeops.c linux-3.1.4/fs/xfs/xfs_vnodeops.c
48076--- linux-3.1.4/fs/xfs/xfs_vnodeops.c 2011-11-11 15:19:27.000000000 -0500
48077+++ linux-3.1.4/fs/xfs/xfs_vnodeops.c 2011-11-18 18:54:56.000000000 -0500
48078@@ -123,13 +123,17 @@ xfs_readlink(
48079
48080 xfs_ilock(ip, XFS_ILOCK_SHARED);
48081
48082- ASSERT(S_ISLNK(ip->i_d.di_mode));
48083- ASSERT(ip->i_d.di_size <= MAXPATHLEN);
48084-
48085 pathlen = ip->i_d.di_size;
48086 if (!pathlen)
48087 goto out;
48088
48089+ if (pathlen > MAXPATHLEN) {
48090+ xfs_alert(mp, "%s: inode (%llu) symlink length (%d) too long",
48091+ __func__, (unsigned long long)ip->i_ino, pathlen);
48092+ ASSERT(0);
48093+ return XFS_ERROR(EFSCORRUPTED);
48094+ }
48095+
48096 if (ip->i_df.if_flags & XFS_IFINLINE) {
48097 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
48098 link[pathlen] = '\0';
48099diff -urNp linux-3.1.4/grsecurity/gracl_alloc.c linux-3.1.4/grsecurity/gracl_alloc.c
48100--- linux-3.1.4/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
48101+++ linux-3.1.4/grsecurity/gracl_alloc.c 2011-11-16 18:40:31.000000000 -0500
48102@@ -0,0 +1,105 @@
48103+#include <linux/kernel.h>
48104+#include <linux/mm.h>
48105+#include <linux/slab.h>
48106+#include <linux/vmalloc.h>
48107+#include <linux/gracl.h>
48108+#include <linux/grsecurity.h>
48109+
48110+static unsigned long alloc_stack_next = 1;
48111+static unsigned long alloc_stack_size = 1;
48112+static void **alloc_stack;
48113+
48114+static __inline__ int
48115+alloc_pop(void)
48116+{
48117+ if (alloc_stack_next == 1)
48118+ return 0;
48119+
48120+ kfree(alloc_stack[alloc_stack_next - 2]);
48121+
48122+ alloc_stack_next--;
48123+
48124+ return 1;
48125+}
48126+
48127+static __inline__ int
48128+alloc_push(void *buf)
48129+{
48130+ if (alloc_stack_next >= alloc_stack_size)
48131+ return 1;
48132+
48133+ alloc_stack[alloc_stack_next - 1] = buf;
48134+
48135+ alloc_stack_next++;
48136+
48137+ return 0;
48138+}
48139+
48140+void *
48141+acl_alloc(unsigned long len)
48142+{
48143+ void *ret = NULL;
48144+
48145+ if (!len || len > PAGE_SIZE)
48146+ goto out;
48147+
48148+ ret = kmalloc(len, GFP_KERNEL);
48149+
48150+ if (ret) {
48151+ if (alloc_push(ret)) {
48152+ kfree(ret);
48153+ ret = NULL;
48154+ }
48155+ }
48156+
48157+out:
48158+ return ret;
48159+}
48160+
48161+void *
48162+acl_alloc_num(unsigned long num, unsigned long len)
48163+{
48164+ if (!len || (num > (PAGE_SIZE / len)))
48165+ return NULL;
48166+
48167+ return acl_alloc(num * len);
48168+}
48169+
48170+void
48171+acl_free_all(void)
48172+{
48173+ if (gr_acl_is_enabled() || !alloc_stack)
48174+ return;
48175+
48176+ while (alloc_pop()) ;
48177+
48178+ if (alloc_stack) {
48179+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
48180+ kfree(alloc_stack);
48181+ else
48182+ vfree(alloc_stack);
48183+ }
48184+
48185+ alloc_stack = NULL;
48186+ alloc_stack_size = 1;
48187+ alloc_stack_next = 1;
48188+
48189+ return;
48190+}
48191+
48192+int
48193+acl_alloc_stack_init(unsigned long size)
48194+{
48195+ if ((size * sizeof (void *)) <= PAGE_SIZE)
48196+ alloc_stack =
48197+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
48198+ else
48199+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
48200+
48201+ alloc_stack_size = size;
48202+
48203+ if (!alloc_stack)
48204+ return 0;
48205+ else
48206+ return 1;
48207+}
48208diff -urNp linux-3.1.4/grsecurity/gracl.c linux-3.1.4/grsecurity/gracl.c
48209--- linux-3.1.4/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
48210+++ linux-3.1.4/grsecurity/gracl.c 2011-11-16 19:31:00.000000000 -0500
48211@@ -0,0 +1,4156 @@
48212+#include <linux/kernel.h>
48213+#include <linux/module.h>
48214+#include <linux/sched.h>
48215+#include <linux/mm.h>
48216+#include <linux/file.h>
48217+#include <linux/fs.h>
48218+#include <linux/namei.h>
48219+#include <linux/mount.h>
48220+#include <linux/tty.h>
48221+#include <linux/proc_fs.h>
48222+#include <linux/lglock.h>
48223+#include <linux/slab.h>
48224+#include <linux/vmalloc.h>
48225+#include <linux/types.h>
48226+#include <linux/sysctl.h>
48227+#include <linux/netdevice.h>
48228+#include <linux/ptrace.h>
48229+#include <linux/gracl.h>
48230+#include <linux/gralloc.h>
48231+#include <linux/grsecurity.h>
48232+#include <linux/grinternal.h>
48233+#include <linux/pid_namespace.h>
48234+#include <linux/fdtable.h>
48235+#include <linux/percpu.h>
48236+
48237+#include <asm/uaccess.h>
48238+#include <asm/errno.h>
48239+#include <asm/mman.h>
48240+
48241+static struct acl_role_db acl_role_set;
48242+static struct name_db name_set;
48243+static struct inodev_db inodev_set;
48244+
48245+/* for keeping track of userspace pointers used for subjects, so we
48246+ can share references in the kernel as well
48247+*/
48248+
48249+static struct path real_root;
48250+
48251+static struct acl_subj_map_db subj_map_set;
48252+
48253+static struct acl_role_label *default_role;
48254+
48255+static struct acl_role_label *role_list;
48256+
48257+static u16 acl_sp_role_value;
48258+
48259+extern char *gr_shared_page[4];
48260+static DEFINE_MUTEX(gr_dev_mutex);
48261+DEFINE_RWLOCK(gr_inode_lock);
48262+
48263+struct gr_arg *gr_usermode;
48264+
48265+static unsigned int gr_status __read_only = GR_STATUS_INIT;
48266+
48267+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
48268+extern void gr_clear_learn_entries(void);
48269+
48270+#ifdef CONFIG_GRKERNSEC_RESLOG
48271+extern void gr_log_resource(const struct task_struct *task,
48272+ const int res, const unsigned long wanted, const int gt);
48273+#endif
48274+
48275+unsigned char *gr_system_salt;
48276+unsigned char *gr_system_sum;
48277+
48278+static struct sprole_pw **acl_special_roles = NULL;
48279+static __u16 num_sprole_pws = 0;
48280+
48281+static struct acl_role_label *kernel_role = NULL;
48282+
48283+static unsigned int gr_auth_attempts = 0;
48284+static unsigned long gr_auth_expires = 0UL;
48285+
48286+#ifdef CONFIG_NET
48287+extern struct vfsmount *sock_mnt;
48288+#endif
48289+
48290+extern struct vfsmount *pipe_mnt;
48291+extern struct vfsmount *shm_mnt;
48292+#ifdef CONFIG_HUGETLBFS
48293+extern struct vfsmount *hugetlbfs_vfsmount;
48294+#endif
48295+
48296+static struct acl_object_label *fakefs_obj_rw;
48297+static struct acl_object_label *fakefs_obj_rwx;
48298+
48299+extern int gr_init_uidset(void);
48300+extern void gr_free_uidset(void);
48301+extern void gr_remove_uid(uid_t uid);
48302+extern int gr_find_uid(uid_t uid);
48303+
48304+DECLARE_BRLOCK(vfsmount_lock);
48305+
48306+__inline__ int
48307+gr_acl_is_enabled(void)
48308+{
48309+ return (gr_status & GR_READY);
48310+}
48311+
48312+#ifdef CONFIG_BTRFS_FS
48313+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
48314+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
48315+#endif
48316+
48317+static inline dev_t __get_dev(const struct dentry *dentry)
48318+{
48319+#ifdef CONFIG_BTRFS_FS
48320+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
48321+ return get_btrfs_dev_from_inode(dentry->d_inode);
48322+ else
48323+#endif
48324+ return dentry->d_inode->i_sb->s_dev;
48325+}
48326+
48327+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
48328+{
48329+ return __get_dev(dentry);
48330+}
48331+
48332+static char gr_task_roletype_to_char(struct task_struct *task)
48333+{
48334+ switch (task->role->roletype &
48335+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
48336+ GR_ROLE_SPECIAL)) {
48337+ case GR_ROLE_DEFAULT:
48338+ return 'D';
48339+ case GR_ROLE_USER:
48340+ return 'U';
48341+ case GR_ROLE_GROUP:
48342+ return 'G';
48343+ case GR_ROLE_SPECIAL:
48344+ return 'S';
48345+ }
48346+
48347+ return 'X';
48348+}
48349+
48350+char gr_roletype_to_char(void)
48351+{
48352+ return gr_task_roletype_to_char(current);
48353+}
48354+
48355+__inline__ int
48356+gr_acl_tpe_check(void)
48357+{
48358+ if (unlikely(!(gr_status & GR_READY)))
48359+ return 0;
48360+ if (current->role->roletype & GR_ROLE_TPE)
48361+ return 1;
48362+ else
48363+ return 0;
48364+}
48365+
48366+int
48367+gr_handle_rawio(const struct inode *inode)
48368+{
48369+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48370+ if (inode && S_ISBLK(inode->i_mode) &&
48371+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
48372+ !capable(CAP_SYS_RAWIO))
48373+ return 1;
48374+#endif
48375+ return 0;
48376+}
48377+
48378+static int
48379+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
48380+{
48381+ if (likely(lena != lenb))
48382+ return 0;
48383+
48384+ return !memcmp(a, b, lena);
48385+}
48386+
48387+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
48388+{
48389+ *buflen -= namelen;
48390+ if (*buflen < 0)
48391+ return -ENAMETOOLONG;
48392+ *buffer -= namelen;
48393+ memcpy(*buffer, str, namelen);
48394+ return 0;
48395+}
48396+
48397+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
48398+{
48399+ return prepend(buffer, buflen, name->name, name->len);
48400+}
48401+
48402+static int prepend_path(const struct path *path, struct path *root,
48403+ char **buffer, int *buflen)
48404+{
48405+ struct dentry *dentry = path->dentry;
48406+ struct vfsmount *vfsmnt = path->mnt;
48407+ bool slash = false;
48408+ int error = 0;
48409+
48410+ while (dentry != root->dentry || vfsmnt != root->mnt) {
48411+ struct dentry * parent;
48412+
48413+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
48414+ /* Global root? */
48415+ if (vfsmnt->mnt_parent == vfsmnt) {
48416+ goto out;
48417+ }
48418+ dentry = vfsmnt->mnt_mountpoint;
48419+ vfsmnt = vfsmnt->mnt_parent;
48420+ continue;
48421+ }
48422+ parent = dentry->d_parent;
48423+ prefetch(parent);
48424+ spin_lock(&dentry->d_lock);
48425+ error = prepend_name(buffer, buflen, &dentry->d_name);
48426+ spin_unlock(&dentry->d_lock);
48427+ if (!error)
48428+ error = prepend(buffer, buflen, "/", 1);
48429+ if (error)
48430+ break;
48431+
48432+ slash = true;
48433+ dentry = parent;
48434+ }
48435+
48436+out:
48437+ if (!error && !slash)
48438+ error = prepend(buffer, buflen, "/", 1);
48439+
48440+ return error;
48441+}
48442+
48443+/* this must be called with vfsmount_lock and rename_lock held */
48444+
48445+static char *__our_d_path(const struct path *path, struct path *root,
48446+ char *buf, int buflen)
48447+{
48448+ char *res = buf + buflen;
48449+ int error;
48450+
48451+ prepend(&res, &buflen, "\0", 1);
48452+ error = prepend_path(path, root, &res, &buflen);
48453+ if (error)
48454+ return ERR_PTR(error);
48455+
48456+ return res;
48457+}
48458+
48459+static char *
48460+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
48461+{
48462+ char *retval;
48463+
48464+ retval = __our_d_path(path, root, buf, buflen);
48465+ if (unlikely(IS_ERR(retval)))
48466+ retval = strcpy(buf, "<path too long>");
48467+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
48468+ retval[1] = '\0';
48469+
48470+ return retval;
48471+}
48472+
48473+static char *
48474+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48475+ char *buf, int buflen)
48476+{
48477+ struct path path;
48478+ char *res;
48479+
48480+ path.dentry = (struct dentry *)dentry;
48481+ path.mnt = (struct vfsmount *)vfsmnt;
48482+
48483+ /* we can use real_root.dentry, real_root.mnt, because this is only called
48484+ by the RBAC system */
48485+ res = gen_full_path(&path, &real_root, buf, buflen);
48486+
48487+ return res;
48488+}
48489+
48490+static char *
48491+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48492+ char *buf, int buflen)
48493+{
48494+ char *res;
48495+ struct path path;
48496+ struct path root;
48497+ struct task_struct *reaper = &init_task;
48498+
48499+ path.dentry = (struct dentry *)dentry;
48500+ path.mnt = (struct vfsmount *)vfsmnt;
48501+
48502+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
48503+ get_fs_root(reaper->fs, &root);
48504+
48505+ write_seqlock(&rename_lock);
48506+ br_read_lock(vfsmount_lock);
48507+ res = gen_full_path(&path, &root, buf, buflen);
48508+ br_read_unlock(vfsmount_lock);
48509+ write_sequnlock(&rename_lock);
48510+
48511+ path_put(&root);
48512+ return res;
48513+}
48514+
48515+static char *
48516+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48517+{
48518+ char *ret;
48519+ write_seqlock(&rename_lock);
48520+ br_read_lock(vfsmount_lock);
48521+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48522+ PAGE_SIZE);
48523+ br_read_unlock(vfsmount_lock);
48524+ write_sequnlock(&rename_lock);
48525+ return ret;
48526+}
48527+
48528+static char *
48529+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48530+{
48531+ char *ret;
48532+ char *buf;
48533+ int buflen;
48534+
48535+ write_seqlock(&rename_lock);
48536+ br_read_lock(vfsmount_lock);
48537+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
48538+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
48539+ buflen = (int)(ret - buf);
48540+ if (buflen >= 5)
48541+ prepend(&ret, &buflen, "/proc", 5);
48542+ else
48543+ ret = strcpy(buf, "<path too long>");
48544+ br_read_unlock(vfsmount_lock);
48545+ write_sequnlock(&rename_lock);
48546+ return ret;
48547+}
48548+
48549+char *
48550+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
48551+{
48552+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48553+ PAGE_SIZE);
48554+}
48555+
48556+char *
48557+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
48558+{
48559+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48560+ PAGE_SIZE);
48561+}
48562+
48563+char *
48564+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
48565+{
48566+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
48567+ PAGE_SIZE);
48568+}
48569+
48570+char *
48571+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
48572+{
48573+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
48574+ PAGE_SIZE);
48575+}
48576+
48577+char *
48578+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
48579+{
48580+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
48581+ PAGE_SIZE);
48582+}
48583+
48584+__inline__ __u32
48585+to_gr_audit(const __u32 reqmode)
48586+{
48587+ /* masks off auditable permission flags, then shifts them to create
48588+ auditing flags, and adds the special case of append auditing if
48589+ we're requesting write */
48590+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
48591+}
48592+
48593+struct acl_subject_label *
48594+lookup_subject_map(const struct acl_subject_label *userp)
48595+{
48596+ unsigned int index = shash(userp, subj_map_set.s_size);
48597+ struct subject_map *match;
48598+
48599+ match = subj_map_set.s_hash[index];
48600+
48601+ while (match && match->user != userp)
48602+ match = match->next;
48603+
48604+ if (match != NULL)
48605+ return match->kernel;
48606+ else
48607+ return NULL;
48608+}
48609+
48610+static void
48611+insert_subj_map_entry(struct subject_map *subjmap)
48612+{
48613+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
48614+ struct subject_map **curr;
48615+
48616+ subjmap->prev = NULL;
48617+
48618+ curr = &subj_map_set.s_hash[index];
48619+ if (*curr != NULL)
48620+ (*curr)->prev = subjmap;
48621+
48622+ subjmap->next = *curr;
48623+ *curr = subjmap;
48624+
48625+ return;
48626+}
48627+
48628+static struct acl_role_label *
48629+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
48630+ const gid_t gid)
48631+{
48632+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
48633+ struct acl_role_label *match;
48634+ struct role_allowed_ip *ipp;
48635+ unsigned int x;
48636+ u32 curr_ip = task->signal->curr_ip;
48637+
48638+ task->signal->saved_ip = curr_ip;
48639+
48640+ match = acl_role_set.r_hash[index];
48641+
48642+ while (match) {
48643+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
48644+ for (x = 0; x < match->domain_child_num; x++) {
48645+ if (match->domain_children[x] == uid)
48646+ goto found;
48647+ }
48648+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
48649+ break;
48650+ match = match->next;
48651+ }
48652+found:
48653+ if (match == NULL) {
48654+ try_group:
48655+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
48656+ match = acl_role_set.r_hash[index];
48657+
48658+ while (match) {
48659+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
48660+ for (x = 0; x < match->domain_child_num; x++) {
48661+ if (match->domain_children[x] == gid)
48662+ goto found2;
48663+ }
48664+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
48665+ break;
48666+ match = match->next;
48667+ }
48668+found2:
48669+ if (match == NULL)
48670+ match = default_role;
48671+ if (match->allowed_ips == NULL)
48672+ return match;
48673+ else {
48674+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48675+ if (likely
48676+ ((ntohl(curr_ip) & ipp->netmask) ==
48677+ (ntohl(ipp->addr) & ipp->netmask)))
48678+ return match;
48679+ }
48680+ match = default_role;
48681+ }
48682+ } else if (match->allowed_ips == NULL) {
48683+ return match;
48684+ } else {
48685+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48686+ if (likely
48687+ ((ntohl(curr_ip) & ipp->netmask) ==
48688+ (ntohl(ipp->addr) & ipp->netmask)))
48689+ return match;
48690+ }
48691+ goto try_group;
48692+ }
48693+
48694+ return match;
48695+}
48696+
48697+struct acl_subject_label *
48698+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
48699+ const struct acl_role_label *role)
48700+{
48701+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
48702+ struct acl_subject_label *match;
48703+
48704+ match = role->subj_hash[index];
48705+
48706+ while (match && (match->inode != ino || match->device != dev ||
48707+ (match->mode & GR_DELETED))) {
48708+ match = match->next;
48709+ }
48710+
48711+ if (match && !(match->mode & GR_DELETED))
48712+ return match;
48713+ else
48714+ return NULL;
48715+}
48716+
48717+struct acl_subject_label *
48718+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
48719+ const struct acl_role_label *role)
48720+{
48721+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
48722+ struct acl_subject_label *match;
48723+
48724+ match = role->subj_hash[index];
48725+
48726+ while (match && (match->inode != ino || match->device != dev ||
48727+ !(match->mode & GR_DELETED))) {
48728+ match = match->next;
48729+ }
48730+
48731+ if (match && (match->mode & GR_DELETED))
48732+ return match;
48733+ else
48734+ return NULL;
48735+}
48736+
48737+static struct acl_object_label *
48738+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
48739+ const struct acl_subject_label *subj)
48740+{
48741+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
48742+ struct acl_object_label *match;
48743+
48744+ match = subj->obj_hash[index];
48745+
48746+ while (match && (match->inode != ino || match->device != dev ||
48747+ (match->mode & GR_DELETED))) {
48748+ match = match->next;
48749+ }
48750+
48751+ if (match && !(match->mode & GR_DELETED))
48752+ return match;
48753+ else
48754+ return NULL;
48755+}
48756+
48757+static struct acl_object_label *
48758+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
48759+ const struct acl_subject_label *subj)
48760+{
48761+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
48762+ struct acl_object_label *match;
48763+
48764+ match = subj->obj_hash[index];
48765+
48766+ while (match && (match->inode != ino || match->device != dev ||
48767+ !(match->mode & GR_DELETED))) {
48768+ match = match->next;
48769+ }
48770+
48771+ if (match && (match->mode & GR_DELETED))
48772+ return match;
48773+
48774+ match = subj->obj_hash[index];
48775+
48776+ while (match && (match->inode != ino || match->device != dev ||
48777+ (match->mode & GR_DELETED))) {
48778+ match = match->next;
48779+ }
48780+
48781+ if (match && !(match->mode & GR_DELETED))
48782+ return match;
48783+ else
48784+ return NULL;
48785+}
48786+
48787+static struct name_entry *
48788+lookup_name_entry(const char *name)
48789+{
48790+ unsigned int len = strlen(name);
48791+ unsigned int key = full_name_hash(name, len);
48792+ unsigned int index = key % name_set.n_size;
48793+ struct name_entry *match;
48794+
48795+ match = name_set.n_hash[index];
48796+
48797+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
48798+ match = match->next;
48799+
48800+ return match;
48801+}
48802+
48803+static struct name_entry *
48804+lookup_name_entry_create(const char *name)
48805+{
48806+ unsigned int len = strlen(name);
48807+ unsigned int key = full_name_hash(name, len);
48808+ unsigned int index = key % name_set.n_size;
48809+ struct name_entry *match;
48810+
48811+ match = name_set.n_hash[index];
48812+
48813+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
48814+ !match->deleted))
48815+ match = match->next;
48816+
48817+ if (match && match->deleted)
48818+ return match;
48819+
48820+ match = name_set.n_hash[index];
48821+
48822+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
48823+ match->deleted))
48824+ match = match->next;
48825+
48826+ if (match && !match->deleted)
48827+ return match;
48828+ else
48829+ return NULL;
48830+}
48831+
48832+static struct inodev_entry *
48833+lookup_inodev_entry(const ino_t ino, const dev_t dev)
48834+{
48835+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
48836+ struct inodev_entry *match;
48837+
48838+ match = inodev_set.i_hash[index];
48839+
48840+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
48841+ match = match->next;
48842+
48843+ return match;
48844+}
48845+
48846+static void
48847+insert_inodev_entry(struct inodev_entry *entry)
48848+{
48849+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
48850+ inodev_set.i_size);
48851+ struct inodev_entry **curr;
48852+
48853+ entry->prev = NULL;
48854+
48855+ curr = &inodev_set.i_hash[index];
48856+ if (*curr != NULL)
48857+ (*curr)->prev = entry;
48858+
48859+ entry->next = *curr;
48860+ *curr = entry;
48861+
48862+ return;
48863+}
48864+
48865+static void
48866+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
48867+{
48868+ unsigned int index =
48869+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
48870+ struct acl_role_label **curr;
48871+ struct acl_role_label *tmp;
48872+
48873+ curr = &acl_role_set.r_hash[index];
48874+
48875+ /* if role was already inserted due to domains and already has
48876+ a role in the same bucket as it attached, then we need to
48877+ combine these two buckets
48878+ */
48879+ if (role->next) {
48880+ tmp = role->next;
48881+ while (tmp->next)
48882+ tmp = tmp->next;
48883+ tmp->next = *curr;
48884+ } else
48885+ role->next = *curr;
48886+ *curr = role;
48887+
48888+ return;
48889+}
48890+
48891+static void
48892+insert_acl_role_label(struct acl_role_label *role)
48893+{
48894+ int i;
48895+
48896+ if (role_list == NULL) {
48897+ role_list = role;
48898+ role->prev = NULL;
48899+ } else {
48900+ role->prev = role_list;
48901+ role_list = role;
48902+ }
48903+
48904+ /* used for hash chains */
48905+ role->next = NULL;
48906+
48907+ if (role->roletype & GR_ROLE_DOMAIN) {
48908+ for (i = 0; i < role->domain_child_num; i++)
48909+ __insert_acl_role_label(role, role->domain_children[i]);
48910+ } else
48911+ __insert_acl_role_label(role, role->uidgid);
48912+}
48913+
48914+static int
48915+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
48916+{
48917+ struct name_entry **curr, *nentry;
48918+ struct inodev_entry *ientry;
48919+ unsigned int len = strlen(name);
48920+ unsigned int key = full_name_hash(name, len);
48921+ unsigned int index = key % name_set.n_size;
48922+
48923+ curr = &name_set.n_hash[index];
48924+
48925+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
48926+ curr = &((*curr)->next);
48927+
48928+ if (*curr != NULL)
48929+ return 1;
48930+
48931+ nentry = acl_alloc(sizeof (struct name_entry));
48932+ if (nentry == NULL)
48933+ return 0;
48934+ ientry = acl_alloc(sizeof (struct inodev_entry));
48935+ if (ientry == NULL)
48936+ return 0;
48937+ ientry->nentry = nentry;
48938+
48939+ nentry->key = key;
48940+ nentry->name = name;
48941+ nentry->inode = inode;
48942+ nentry->device = device;
48943+ nentry->len = len;
48944+ nentry->deleted = deleted;
48945+
48946+ nentry->prev = NULL;
48947+ curr = &name_set.n_hash[index];
48948+ if (*curr != NULL)
48949+ (*curr)->prev = nentry;
48950+ nentry->next = *curr;
48951+ *curr = nentry;
48952+
48953+ /* insert us into the table searchable by inode/dev */
48954+ insert_inodev_entry(ientry);
48955+
48956+ return 1;
48957+}
48958+
48959+static void
48960+insert_acl_obj_label(struct acl_object_label *obj,
48961+ struct acl_subject_label *subj)
48962+{
48963+ unsigned int index =
48964+ fhash(obj->inode, obj->device, subj->obj_hash_size);
48965+ struct acl_object_label **curr;
48966+
48967+
48968+ obj->prev = NULL;
48969+
48970+ curr = &subj->obj_hash[index];
48971+ if (*curr != NULL)
48972+ (*curr)->prev = obj;
48973+
48974+ obj->next = *curr;
48975+ *curr = obj;
48976+
48977+ return;
48978+}
48979+
48980+static void
48981+insert_acl_subj_label(struct acl_subject_label *obj,
48982+ struct acl_role_label *role)
48983+{
48984+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
48985+ struct acl_subject_label **curr;
48986+
48987+ obj->prev = NULL;
48988+
48989+ curr = &role->subj_hash[index];
48990+ if (*curr != NULL)
48991+ (*curr)->prev = obj;
48992+
48993+ obj->next = *curr;
48994+ *curr = obj;
48995+
48996+ return;
48997+}
48998+
48999+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
49000+
49001+static void *
49002+create_table(__u32 * len, int elementsize)
49003+{
49004+ unsigned int table_sizes[] = {
49005+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
49006+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
49007+ 4194301, 8388593, 16777213, 33554393, 67108859
49008+ };
49009+ void *newtable = NULL;
49010+ unsigned int pwr = 0;
49011+
49012+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
49013+ table_sizes[pwr] <= *len)
49014+ pwr++;
49015+
49016+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
49017+ return newtable;
49018+
49019+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
49020+ newtable =
49021+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
49022+ else
49023+ newtable = vmalloc(table_sizes[pwr] * elementsize);
49024+
49025+ *len = table_sizes[pwr];
49026+
49027+ return newtable;
49028+}
49029+
49030+static int
49031+init_variables(const struct gr_arg *arg)
49032+{
49033+ struct task_struct *reaper = &init_task;
49034+ unsigned int stacksize;
49035+
49036+ subj_map_set.s_size = arg->role_db.num_subjects;
49037+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
49038+ name_set.n_size = arg->role_db.num_objects;
49039+ inodev_set.i_size = arg->role_db.num_objects;
49040+
49041+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
49042+ !name_set.n_size || !inodev_set.i_size)
49043+ return 1;
49044+
49045+ if (!gr_init_uidset())
49046+ return 1;
49047+
49048+ /* set up the stack that holds allocation info */
49049+
49050+ stacksize = arg->role_db.num_pointers + 5;
49051+
49052+ if (!acl_alloc_stack_init(stacksize))
49053+ return 1;
49054+
49055+ /* grab reference for the real root dentry and vfsmount */
49056+ get_fs_root(reaper->fs, &real_root);
49057+
49058+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49059+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
49060+#endif
49061+
49062+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
49063+ if (fakefs_obj_rw == NULL)
49064+ return 1;
49065+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
49066+
49067+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
49068+ if (fakefs_obj_rwx == NULL)
49069+ return 1;
49070+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
49071+
49072+ subj_map_set.s_hash =
49073+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
49074+ acl_role_set.r_hash =
49075+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
49076+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
49077+ inodev_set.i_hash =
49078+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
49079+
49080+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
49081+ !name_set.n_hash || !inodev_set.i_hash)
49082+ return 1;
49083+
49084+ memset(subj_map_set.s_hash, 0,
49085+ sizeof(struct subject_map *) * subj_map_set.s_size);
49086+ memset(acl_role_set.r_hash, 0,
49087+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
49088+ memset(name_set.n_hash, 0,
49089+ sizeof (struct name_entry *) * name_set.n_size);
49090+ memset(inodev_set.i_hash, 0,
49091+ sizeof (struct inodev_entry *) * inodev_set.i_size);
49092+
49093+ return 0;
49094+}
49095+
49096+/* free information not needed after startup
49097+ currently contains user->kernel pointer mappings for subjects
49098+*/
49099+
49100+static void
49101+free_init_variables(void)
49102+{
49103+ __u32 i;
49104+
49105+ if (subj_map_set.s_hash) {
49106+ for (i = 0; i < subj_map_set.s_size; i++) {
49107+ if (subj_map_set.s_hash[i]) {
49108+ kfree(subj_map_set.s_hash[i]);
49109+ subj_map_set.s_hash[i] = NULL;
49110+ }
49111+ }
49112+
49113+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
49114+ PAGE_SIZE)
49115+ kfree(subj_map_set.s_hash);
49116+ else
49117+ vfree(subj_map_set.s_hash);
49118+ }
49119+
49120+ return;
49121+}
49122+
49123+static void
49124+free_variables(void)
49125+{
49126+ struct acl_subject_label *s;
49127+ struct acl_role_label *r;
49128+ struct task_struct *task, *task2;
49129+ unsigned int x;
49130+
49131+ gr_clear_learn_entries();
49132+
49133+ read_lock(&tasklist_lock);
49134+ do_each_thread(task2, task) {
49135+ task->acl_sp_role = 0;
49136+ task->acl_role_id = 0;
49137+ task->acl = NULL;
49138+ task->role = NULL;
49139+ } while_each_thread(task2, task);
49140+ read_unlock(&tasklist_lock);
49141+
49142+ /* release the reference to the real root dentry and vfsmount */
49143+ path_put(&real_root);
49144+
49145+ /* free all object hash tables */
49146+
49147+ FOR_EACH_ROLE_START(r)
49148+ if (r->subj_hash == NULL)
49149+ goto next_role;
49150+ FOR_EACH_SUBJECT_START(r, s, x)
49151+ if (s->obj_hash == NULL)
49152+ break;
49153+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49154+ kfree(s->obj_hash);
49155+ else
49156+ vfree(s->obj_hash);
49157+ FOR_EACH_SUBJECT_END(s, x)
49158+ FOR_EACH_NESTED_SUBJECT_START(r, s)
49159+ if (s->obj_hash == NULL)
49160+ break;
49161+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49162+ kfree(s->obj_hash);
49163+ else
49164+ vfree(s->obj_hash);
49165+ FOR_EACH_NESTED_SUBJECT_END(s)
49166+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
49167+ kfree(r->subj_hash);
49168+ else
49169+ vfree(r->subj_hash);
49170+ r->subj_hash = NULL;
49171+next_role:
49172+ FOR_EACH_ROLE_END(r)
49173+
49174+ acl_free_all();
49175+
49176+ if (acl_role_set.r_hash) {
49177+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
49178+ PAGE_SIZE)
49179+ kfree(acl_role_set.r_hash);
49180+ else
49181+ vfree(acl_role_set.r_hash);
49182+ }
49183+ if (name_set.n_hash) {
49184+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
49185+ PAGE_SIZE)
49186+ kfree(name_set.n_hash);
49187+ else
49188+ vfree(name_set.n_hash);
49189+ }
49190+
49191+ if (inodev_set.i_hash) {
49192+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
49193+ PAGE_SIZE)
49194+ kfree(inodev_set.i_hash);
49195+ else
49196+ vfree(inodev_set.i_hash);
49197+ }
49198+
49199+ gr_free_uidset();
49200+
49201+ memset(&name_set, 0, sizeof (struct name_db));
49202+ memset(&inodev_set, 0, sizeof (struct inodev_db));
49203+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
49204+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
49205+
49206+ default_role = NULL;
49207+ role_list = NULL;
49208+
49209+ return;
49210+}
49211+
49212+static __u32
49213+count_user_objs(struct acl_object_label *userp)
49214+{
49215+ struct acl_object_label o_tmp;
49216+ __u32 num = 0;
49217+
49218+ while (userp) {
49219+ if (copy_from_user(&o_tmp, userp,
49220+ sizeof (struct acl_object_label)))
49221+ break;
49222+
49223+ userp = o_tmp.prev;
49224+ num++;
49225+ }
49226+
49227+ return num;
49228+}
49229+
49230+static struct acl_subject_label *
49231+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
49232+
49233+static int
49234+copy_user_glob(struct acl_object_label *obj)
49235+{
49236+ struct acl_object_label *g_tmp, **guser;
49237+ unsigned int len;
49238+ char *tmp;
49239+
49240+ if (obj->globbed == NULL)
49241+ return 0;
49242+
49243+ guser = &obj->globbed;
49244+ while (*guser) {
49245+ g_tmp = (struct acl_object_label *)
49246+ acl_alloc(sizeof (struct acl_object_label));
49247+ if (g_tmp == NULL)
49248+ return -ENOMEM;
49249+
49250+ if (copy_from_user(g_tmp, *guser,
49251+ sizeof (struct acl_object_label)))
49252+ return -EFAULT;
49253+
49254+ len = strnlen_user(g_tmp->filename, PATH_MAX);
49255+
49256+ if (!len || len >= PATH_MAX)
49257+ return -EINVAL;
49258+
49259+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49260+ return -ENOMEM;
49261+
49262+ if (copy_from_user(tmp, g_tmp->filename, len))
49263+ return -EFAULT;
49264+ tmp[len-1] = '\0';
49265+ g_tmp->filename = tmp;
49266+
49267+ *guser = g_tmp;
49268+ guser = &(g_tmp->next);
49269+ }
49270+
49271+ return 0;
49272+}
49273+
49274+static int
49275+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
49276+ struct acl_role_label *role)
49277+{
49278+ struct acl_object_label *o_tmp;
49279+ unsigned int len;
49280+ int ret;
49281+ char *tmp;
49282+
49283+ while (userp) {
49284+ if ((o_tmp = (struct acl_object_label *)
49285+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
49286+ return -ENOMEM;
49287+
49288+ if (copy_from_user(o_tmp, userp,
49289+ sizeof (struct acl_object_label)))
49290+ return -EFAULT;
49291+
49292+ userp = o_tmp->prev;
49293+
49294+ len = strnlen_user(o_tmp->filename, PATH_MAX);
49295+
49296+ if (!len || len >= PATH_MAX)
49297+ return -EINVAL;
49298+
49299+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49300+ return -ENOMEM;
49301+
49302+ if (copy_from_user(tmp, o_tmp->filename, len))
49303+ return -EFAULT;
49304+ tmp[len-1] = '\0';
49305+ o_tmp->filename = tmp;
49306+
49307+ insert_acl_obj_label(o_tmp, subj);
49308+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
49309+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
49310+ return -ENOMEM;
49311+
49312+ ret = copy_user_glob(o_tmp);
49313+ if (ret)
49314+ return ret;
49315+
49316+ if (o_tmp->nested) {
49317+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
49318+ if (IS_ERR(o_tmp->nested))
49319+ return PTR_ERR(o_tmp->nested);
49320+
49321+ /* insert into nested subject list */
49322+ o_tmp->nested->next = role->hash->first;
49323+ role->hash->first = o_tmp->nested;
49324+ }
49325+ }
49326+
49327+ return 0;
49328+}
49329+
49330+static __u32
49331+count_user_subjs(struct acl_subject_label *userp)
49332+{
49333+ struct acl_subject_label s_tmp;
49334+ __u32 num = 0;
49335+
49336+ while (userp) {
49337+ if (copy_from_user(&s_tmp, userp,
49338+ sizeof (struct acl_subject_label)))
49339+ break;
49340+
49341+ userp = s_tmp.prev;
49342+ /* do not count nested subjects against this count, since
49343+ they are not included in the hash table, but are
49344+ attached to objects. We have already counted
49345+ the subjects in userspace for the allocation
49346+ stack
49347+ */
49348+ if (!(s_tmp.mode & GR_NESTED))
49349+ num++;
49350+ }
49351+
49352+ return num;
49353+}
49354+
49355+static int
49356+copy_user_allowedips(struct acl_role_label *rolep)
49357+{
49358+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
49359+
49360+ ruserip = rolep->allowed_ips;
49361+
49362+ while (ruserip) {
49363+ rlast = rtmp;
49364+
49365+ if ((rtmp = (struct role_allowed_ip *)
49366+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
49367+ return -ENOMEM;
49368+
49369+ if (copy_from_user(rtmp, ruserip,
49370+ sizeof (struct role_allowed_ip)))
49371+ return -EFAULT;
49372+
49373+ ruserip = rtmp->prev;
49374+
49375+ if (!rlast) {
49376+ rtmp->prev = NULL;
49377+ rolep->allowed_ips = rtmp;
49378+ } else {
49379+ rlast->next = rtmp;
49380+ rtmp->prev = rlast;
49381+ }
49382+
49383+ if (!ruserip)
49384+ rtmp->next = NULL;
49385+ }
49386+
49387+ return 0;
49388+}
49389+
49390+static int
49391+copy_user_transitions(struct acl_role_label *rolep)
49392+{
49393+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
49394+
49395+ unsigned int len;
49396+ char *tmp;
49397+
49398+ rusertp = rolep->transitions;
49399+
49400+ while (rusertp) {
49401+ rlast = rtmp;
49402+
49403+ if ((rtmp = (struct role_transition *)
49404+ acl_alloc(sizeof (struct role_transition))) == NULL)
49405+ return -ENOMEM;
49406+
49407+ if (copy_from_user(rtmp, rusertp,
49408+ sizeof (struct role_transition)))
49409+ return -EFAULT;
49410+
49411+ rusertp = rtmp->prev;
49412+
49413+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
49414+
49415+ if (!len || len >= GR_SPROLE_LEN)
49416+ return -EINVAL;
49417+
49418+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49419+ return -ENOMEM;
49420+
49421+ if (copy_from_user(tmp, rtmp->rolename, len))
49422+ return -EFAULT;
49423+ tmp[len-1] = '\0';
49424+ rtmp->rolename = tmp;
49425+
49426+ if (!rlast) {
49427+ rtmp->prev = NULL;
49428+ rolep->transitions = rtmp;
49429+ } else {
49430+ rlast->next = rtmp;
49431+ rtmp->prev = rlast;
49432+ }
49433+
49434+ if (!rusertp)
49435+ rtmp->next = NULL;
49436+ }
49437+
49438+ return 0;
49439+}
49440+
49441+static struct acl_subject_label *
49442+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
49443+{
49444+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
49445+ unsigned int len;
49446+ char *tmp;
49447+ __u32 num_objs;
49448+ struct acl_ip_label **i_tmp, *i_utmp2;
49449+ struct gr_hash_struct ghash;
49450+ struct subject_map *subjmap;
49451+ unsigned int i_num;
49452+ int err;
49453+
49454+ s_tmp = lookup_subject_map(userp);
49455+
49456+ /* we've already copied this subject into the kernel, just return
49457+ the reference to it, and don't copy it over again
49458+ */
49459+ if (s_tmp)
49460+ return(s_tmp);
49461+
49462+ if ((s_tmp = (struct acl_subject_label *)
49463+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
49464+ return ERR_PTR(-ENOMEM);
49465+
49466+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
49467+ if (subjmap == NULL)
49468+ return ERR_PTR(-ENOMEM);
49469+
49470+ subjmap->user = userp;
49471+ subjmap->kernel = s_tmp;
49472+ insert_subj_map_entry(subjmap);
49473+
49474+ if (copy_from_user(s_tmp, userp,
49475+ sizeof (struct acl_subject_label)))
49476+ return ERR_PTR(-EFAULT);
49477+
49478+ len = strnlen_user(s_tmp->filename, PATH_MAX);
49479+
49480+ if (!len || len >= PATH_MAX)
49481+ return ERR_PTR(-EINVAL);
49482+
49483+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49484+ return ERR_PTR(-ENOMEM);
49485+
49486+ if (copy_from_user(tmp, s_tmp->filename, len))
49487+ return ERR_PTR(-EFAULT);
49488+ tmp[len-1] = '\0';
49489+ s_tmp->filename = tmp;
49490+
49491+ if (!strcmp(s_tmp->filename, "/"))
49492+ role->root_label = s_tmp;
49493+
49494+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
49495+ return ERR_PTR(-EFAULT);
49496+
49497+ /* copy user and group transition tables */
49498+
49499+ if (s_tmp->user_trans_num) {
49500+ uid_t *uidlist;
49501+
49502+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
49503+ if (uidlist == NULL)
49504+ return ERR_PTR(-ENOMEM);
49505+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
49506+ return ERR_PTR(-EFAULT);
49507+
49508+ s_tmp->user_transitions = uidlist;
49509+ }
49510+
49511+ if (s_tmp->group_trans_num) {
49512+ gid_t *gidlist;
49513+
49514+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
49515+ if (gidlist == NULL)
49516+ return ERR_PTR(-ENOMEM);
49517+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
49518+ return ERR_PTR(-EFAULT);
49519+
49520+ s_tmp->group_transitions = gidlist;
49521+ }
49522+
49523+ /* set up object hash table */
49524+ num_objs = count_user_objs(ghash.first);
49525+
49526+ s_tmp->obj_hash_size = num_objs;
49527+ s_tmp->obj_hash =
49528+ (struct acl_object_label **)
49529+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
49530+
49531+ if (!s_tmp->obj_hash)
49532+ return ERR_PTR(-ENOMEM);
49533+
49534+ memset(s_tmp->obj_hash, 0,
49535+ s_tmp->obj_hash_size *
49536+ sizeof (struct acl_object_label *));
49537+
49538+ /* add in objects */
49539+ err = copy_user_objs(ghash.first, s_tmp, role);
49540+
49541+ if (err)
49542+ return ERR_PTR(err);
49543+
49544+ /* set pointer for parent subject */
49545+ if (s_tmp->parent_subject) {
49546+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
49547+
49548+ if (IS_ERR(s_tmp2))
49549+ return s_tmp2;
49550+
49551+ s_tmp->parent_subject = s_tmp2;
49552+ }
49553+
49554+ /* add in ip acls */
49555+
49556+ if (!s_tmp->ip_num) {
49557+ s_tmp->ips = NULL;
49558+ goto insert;
49559+ }
49560+
49561+ i_tmp =
49562+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
49563+ sizeof (struct acl_ip_label *));
49564+
49565+ if (!i_tmp)
49566+ return ERR_PTR(-ENOMEM);
49567+
49568+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
49569+ *(i_tmp + i_num) =
49570+ (struct acl_ip_label *)
49571+ acl_alloc(sizeof (struct acl_ip_label));
49572+ if (!*(i_tmp + i_num))
49573+ return ERR_PTR(-ENOMEM);
49574+
49575+ if (copy_from_user
49576+ (&i_utmp2, s_tmp->ips + i_num,
49577+ sizeof (struct acl_ip_label *)))
49578+ return ERR_PTR(-EFAULT);
49579+
49580+ if (copy_from_user
49581+ (*(i_tmp + i_num), i_utmp2,
49582+ sizeof (struct acl_ip_label)))
49583+ return ERR_PTR(-EFAULT);
49584+
49585+ if ((*(i_tmp + i_num))->iface == NULL)
49586+ continue;
49587+
49588+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
49589+ if (!len || len >= IFNAMSIZ)
49590+ return ERR_PTR(-EINVAL);
49591+ tmp = acl_alloc(len);
49592+ if (tmp == NULL)
49593+ return ERR_PTR(-ENOMEM);
49594+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
49595+ return ERR_PTR(-EFAULT);
49596+ (*(i_tmp + i_num))->iface = tmp;
49597+ }
49598+
49599+ s_tmp->ips = i_tmp;
49600+
49601+insert:
49602+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
49603+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
49604+ return ERR_PTR(-ENOMEM);
49605+
49606+ return s_tmp;
49607+}
49608+
49609+static int
49610+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
49611+{
49612+ struct acl_subject_label s_pre;
49613+ struct acl_subject_label * ret;
49614+ int err;
49615+
49616+ while (userp) {
49617+ if (copy_from_user(&s_pre, userp,
49618+ sizeof (struct acl_subject_label)))
49619+ return -EFAULT;
49620+
49621+ /* do not add nested subjects here, add
49622+ while parsing objects
49623+ */
49624+
49625+ if (s_pre.mode & GR_NESTED) {
49626+ userp = s_pre.prev;
49627+ continue;
49628+ }
49629+
49630+ ret = do_copy_user_subj(userp, role);
49631+
49632+ err = PTR_ERR(ret);
49633+ if (IS_ERR(ret))
49634+ return err;
49635+
49636+ insert_acl_subj_label(ret, role);
49637+
49638+ userp = s_pre.prev;
49639+ }
49640+
49641+ return 0;
49642+}
49643+
49644+static int
49645+copy_user_acl(struct gr_arg *arg)
49646+{
49647+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
49648+ struct sprole_pw *sptmp;
49649+ struct gr_hash_struct *ghash;
49650+ uid_t *domainlist;
49651+ unsigned int r_num;
49652+ unsigned int len;
49653+ char *tmp;
49654+ int err = 0;
49655+ __u16 i;
49656+ __u32 num_subjs;
49657+
49658+ /* we need a default and kernel role */
49659+ if (arg->role_db.num_roles < 2)
49660+ return -EINVAL;
49661+
49662+ /* copy special role authentication info from userspace */
49663+
49664+ num_sprole_pws = arg->num_sprole_pws;
49665+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
49666+
49667+ if (!acl_special_roles) {
49668+ err = -ENOMEM;
49669+ goto cleanup;
49670+ }
49671+
49672+ for (i = 0; i < num_sprole_pws; i++) {
49673+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
49674+ if (!sptmp) {
49675+ err = -ENOMEM;
49676+ goto cleanup;
49677+ }
49678+ if (copy_from_user(sptmp, arg->sprole_pws + i,
49679+ sizeof (struct sprole_pw))) {
49680+ err = -EFAULT;
49681+ goto cleanup;
49682+ }
49683+
49684+ len =
49685+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
49686+
49687+ if (!len || len >= GR_SPROLE_LEN) {
49688+ err = -EINVAL;
49689+ goto cleanup;
49690+ }
49691+
49692+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
49693+ err = -ENOMEM;
49694+ goto cleanup;
49695+ }
49696+
49697+ if (copy_from_user(tmp, sptmp->rolename, len)) {
49698+ err = -EFAULT;
49699+ goto cleanup;
49700+ }
49701+ tmp[len-1] = '\0';
49702+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49703+ printk(KERN_ALERT "Copying special role %s\n", tmp);
49704+#endif
49705+ sptmp->rolename = tmp;
49706+ acl_special_roles[i] = sptmp;
49707+ }
49708+
49709+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
49710+
49711+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
49712+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
49713+
49714+ if (!r_tmp) {
49715+ err = -ENOMEM;
49716+ goto cleanup;
49717+ }
49718+
49719+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
49720+ sizeof (struct acl_role_label *))) {
49721+ err = -EFAULT;
49722+ goto cleanup;
49723+ }
49724+
49725+ if (copy_from_user(r_tmp, r_utmp2,
49726+ sizeof (struct acl_role_label))) {
49727+ err = -EFAULT;
49728+ goto cleanup;
49729+ }
49730+
49731+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
49732+
49733+ if (!len || len >= PATH_MAX) {
49734+ err = -EINVAL;
49735+ goto cleanup;
49736+ }
49737+
49738+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
49739+ err = -ENOMEM;
49740+ goto cleanup;
49741+ }
49742+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
49743+ err = -EFAULT;
49744+ goto cleanup;
49745+ }
49746+ tmp[len-1] = '\0';
49747+ r_tmp->rolename = tmp;
49748+
49749+ if (!strcmp(r_tmp->rolename, "default")
49750+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
49751+ default_role = r_tmp;
49752+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
49753+ kernel_role = r_tmp;
49754+ }
49755+
49756+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
49757+ err = -ENOMEM;
49758+ goto cleanup;
49759+ }
49760+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
49761+ err = -EFAULT;
49762+ goto cleanup;
49763+ }
49764+
49765+ r_tmp->hash = ghash;
49766+
49767+ num_subjs = count_user_subjs(r_tmp->hash->first);
49768+
49769+ r_tmp->subj_hash_size = num_subjs;
49770+ r_tmp->subj_hash =
49771+ (struct acl_subject_label **)
49772+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
49773+
49774+ if (!r_tmp->subj_hash) {
49775+ err = -ENOMEM;
49776+ goto cleanup;
49777+ }
49778+
49779+ err = copy_user_allowedips(r_tmp);
49780+ if (err)
49781+ goto cleanup;
49782+
49783+ /* copy domain info */
49784+ if (r_tmp->domain_children != NULL) {
49785+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
49786+ if (domainlist == NULL) {
49787+ err = -ENOMEM;
49788+ goto cleanup;
49789+ }
49790+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
49791+ err = -EFAULT;
49792+ goto cleanup;
49793+ }
49794+ r_tmp->domain_children = domainlist;
49795+ }
49796+
49797+ err = copy_user_transitions(r_tmp);
49798+ if (err)
49799+ goto cleanup;
49800+
49801+ memset(r_tmp->subj_hash, 0,
49802+ r_tmp->subj_hash_size *
49803+ sizeof (struct acl_subject_label *));
49804+
49805+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
49806+
49807+ if (err)
49808+ goto cleanup;
49809+
49810+ /* set nested subject list to null */
49811+ r_tmp->hash->first = NULL;
49812+
49813+ insert_acl_role_label(r_tmp);
49814+ }
49815+
49816+ goto return_err;
49817+ cleanup:
49818+ free_variables();
49819+ return_err:
49820+ return err;
49821+
49822+}
49823+
49824+static int
49825+gracl_init(struct gr_arg *args)
49826+{
49827+ int error = 0;
49828+
49829+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
49830+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
49831+
49832+ if (init_variables(args)) {
49833+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
49834+ error = -ENOMEM;
49835+ free_variables();
49836+ goto out;
49837+ }
49838+
49839+ error = copy_user_acl(args);
49840+ free_init_variables();
49841+ if (error) {
49842+ free_variables();
49843+ goto out;
49844+ }
49845+
49846+ if ((error = gr_set_acls(0))) {
49847+ free_variables();
49848+ goto out;
49849+ }
49850+
49851+ pax_open_kernel();
49852+ gr_status |= GR_READY;
49853+ pax_close_kernel();
49854+
49855+ out:
49856+ return error;
49857+}
49858+
49859+/* derived from glibc fnmatch() 0: match, 1: no match*/
49860+
49861+static int
49862+glob_match(const char *p, const char *n)
49863+{
49864+ char c;
49865+
49866+ while ((c = *p++) != '\0') {
49867+ switch (c) {
49868+ case '?':
49869+ if (*n == '\0')
49870+ return 1;
49871+ else if (*n == '/')
49872+ return 1;
49873+ break;
49874+ case '\\':
49875+ if (*n != c)
49876+ return 1;
49877+ break;
49878+ case '*':
49879+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
49880+ if (*n == '/')
49881+ return 1;
49882+ else if (c == '?') {
49883+ if (*n == '\0')
49884+ return 1;
49885+ else
49886+ ++n;
49887+ }
49888+ }
49889+ if (c == '\0') {
49890+ return 0;
49891+ } else {
49892+ const char *endp;
49893+
49894+ if ((endp = strchr(n, '/')) == NULL)
49895+ endp = n + strlen(n);
49896+
49897+ if (c == '[') {
49898+ for (--p; n < endp; ++n)
49899+ if (!glob_match(p, n))
49900+ return 0;
49901+ } else if (c == '/') {
49902+ while (*n != '\0' && *n != '/')
49903+ ++n;
49904+ if (*n == '/' && !glob_match(p, n + 1))
49905+ return 0;
49906+ } else {
49907+ for (--p; n < endp; ++n)
49908+ if (*n == c && !glob_match(p, n))
49909+ return 0;
49910+ }
49911+
49912+ return 1;
49913+ }
49914+ case '[':
49915+ {
49916+ int not;
49917+ char cold;
49918+
49919+ if (*n == '\0' || *n == '/')
49920+ return 1;
49921+
49922+ not = (*p == '!' || *p == '^');
49923+ if (not)
49924+ ++p;
49925+
49926+ c = *p++;
49927+ for (;;) {
49928+ unsigned char fn = (unsigned char)*n;
49929+
49930+ if (c == '\0')
49931+ return 1;
49932+ else {
49933+ if (c == fn)
49934+ goto matched;
49935+ cold = c;
49936+ c = *p++;
49937+
49938+ if (c == '-' && *p != ']') {
49939+ unsigned char cend = *p++;
49940+
49941+ if (cend == '\0')
49942+ return 1;
49943+
49944+ if (cold <= fn && fn <= cend)
49945+ goto matched;
49946+
49947+ c = *p++;
49948+ }
49949+ }
49950+
49951+ if (c == ']')
49952+ break;
49953+ }
49954+ if (!not)
49955+ return 1;
49956+ break;
49957+ matched:
49958+ while (c != ']') {
49959+ if (c == '\0')
49960+ return 1;
49961+
49962+ c = *p++;
49963+ }
49964+ if (not)
49965+ return 1;
49966+ }
49967+ break;
49968+ default:
49969+ if (c != *n)
49970+ return 1;
49971+ }
49972+
49973+ ++n;
49974+ }
49975+
49976+ if (*n == '\0')
49977+ return 0;
49978+
49979+ if (*n == '/')
49980+ return 0;
49981+
49982+ return 1;
49983+}
49984+
49985+static struct acl_object_label *
49986+chk_glob_label(struct acl_object_label *globbed,
49987+ struct dentry *dentry, struct vfsmount *mnt, char **path)
49988+{
49989+ struct acl_object_label *tmp;
49990+
49991+ if (*path == NULL)
49992+ *path = gr_to_filename_nolock(dentry, mnt);
49993+
49994+ tmp = globbed;
49995+
49996+ while (tmp) {
49997+ if (!glob_match(tmp->filename, *path))
49998+ return tmp;
49999+ tmp = tmp->next;
50000+ }
50001+
50002+ return NULL;
50003+}
50004+
50005+static struct acl_object_label *
50006+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
50007+ const ino_t curr_ino, const dev_t curr_dev,
50008+ const struct acl_subject_label *subj, char **path, const int checkglob)
50009+{
50010+ struct acl_subject_label *tmpsubj;
50011+ struct acl_object_label *retval;
50012+ struct acl_object_label *retval2;
50013+
50014+ tmpsubj = (struct acl_subject_label *) subj;
50015+ read_lock(&gr_inode_lock);
50016+ do {
50017+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
50018+ if (retval) {
50019+ if (checkglob && retval->globbed) {
50020+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
50021+ (struct vfsmount *)orig_mnt, path);
50022+ if (retval2)
50023+ retval = retval2;
50024+ }
50025+ break;
50026+ }
50027+ } while ((tmpsubj = tmpsubj->parent_subject));
50028+ read_unlock(&gr_inode_lock);
50029+
50030+ return retval;
50031+}
50032+
50033+static __inline__ struct acl_object_label *
50034+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
50035+ struct dentry *curr_dentry,
50036+ const struct acl_subject_label *subj, char **path, const int checkglob)
50037+{
50038+ int newglob = checkglob;
50039+ ino_t inode;
50040+ dev_t device;
50041+
50042+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
50043+ as we don't want a / * rule to match instead of the / object
50044+ don't do this for create lookups that call this function though, since they're looking up
50045+ on the parent and thus need globbing checks on all paths
50046+ */
50047+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
50048+ newglob = GR_NO_GLOB;
50049+
50050+ spin_lock(&curr_dentry->d_lock);
50051+ inode = curr_dentry->d_inode->i_ino;
50052+ device = __get_dev(curr_dentry);
50053+ spin_unlock(&curr_dentry->d_lock);
50054+
50055+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
50056+}
50057+
50058+static struct acl_object_label *
50059+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50060+ const struct acl_subject_label *subj, char *path, const int checkglob)
50061+{
50062+ struct dentry *dentry = (struct dentry *) l_dentry;
50063+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
50064+ struct acl_object_label *retval;
50065+ struct dentry *parent;
50066+
50067+ write_seqlock(&rename_lock);
50068+ br_read_lock(vfsmount_lock);
50069+
50070+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
50071+#ifdef CONFIG_NET
50072+ mnt == sock_mnt ||
50073+#endif
50074+#ifdef CONFIG_HUGETLBFS
50075+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
50076+#endif
50077+ /* ignore Eric Biederman */
50078+ IS_PRIVATE(l_dentry->d_inode))) {
50079+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
50080+ goto out;
50081+ }
50082+
50083+ for (;;) {
50084+ if (dentry == real_root.dentry && mnt == real_root.mnt)
50085+ break;
50086+
50087+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
50088+ if (mnt->mnt_parent == mnt)
50089+ break;
50090+
50091+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50092+ if (retval != NULL)
50093+ goto out;
50094+
50095+ dentry = mnt->mnt_mountpoint;
50096+ mnt = mnt->mnt_parent;
50097+ continue;
50098+ }
50099+
50100+ parent = dentry->d_parent;
50101+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50102+ if (retval != NULL)
50103+ goto out;
50104+
50105+ dentry = parent;
50106+ }
50107+
50108+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50109+
50110+ /* real_root is pinned so we don't have to hold a reference */
50111+ if (retval == NULL)
50112+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
50113+out:
50114+ br_read_unlock(vfsmount_lock);
50115+ write_sequnlock(&rename_lock);
50116+
50117+ BUG_ON(retval == NULL);
50118+
50119+ return retval;
50120+}
50121+
50122+static __inline__ struct acl_object_label *
50123+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50124+ const struct acl_subject_label *subj)
50125+{
50126+ char *path = NULL;
50127+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
50128+}
50129+
50130+static __inline__ struct acl_object_label *
50131+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50132+ const struct acl_subject_label *subj)
50133+{
50134+ char *path = NULL;
50135+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
50136+}
50137+
50138+static __inline__ struct acl_object_label *
50139+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50140+ const struct acl_subject_label *subj, char *path)
50141+{
50142+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
50143+}
50144+
50145+static struct acl_subject_label *
50146+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50147+ const struct acl_role_label *role)
50148+{
50149+ struct dentry *dentry = (struct dentry *) l_dentry;
50150+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
50151+ struct acl_subject_label *retval;
50152+ struct dentry *parent;
50153+
50154+ write_seqlock(&rename_lock);
50155+ br_read_lock(vfsmount_lock);
50156+
50157+ for (;;) {
50158+ if (dentry == real_root.dentry && mnt == real_root.mnt)
50159+ break;
50160+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
50161+ if (mnt->mnt_parent == mnt)
50162+ break;
50163+
50164+ spin_lock(&dentry->d_lock);
50165+ read_lock(&gr_inode_lock);
50166+ retval =
50167+ lookup_acl_subj_label(dentry->d_inode->i_ino,
50168+ __get_dev(dentry), role);
50169+ read_unlock(&gr_inode_lock);
50170+ spin_unlock(&dentry->d_lock);
50171+ if (retval != NULL)
50172+ goto out;
50173+
50174+ dentry = mnt->mnt_mountpoint;
50175+ mnt = mnt->mnt_parent;
50176+ continue;
50177+ }
50178+
50179+ spin_lock(&dentry->d_lock);
50180+ read_lock(&gr_inode_lock);
50181+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50182+ __get_dev(dentry), role);
50183+ read_unlock(&gr_inode_lock);
50184+ parent = dentry->d_parent;
50185+ spin_unlock(&dentry->d_lock);
50186+
50187+ if (retval != NULL)
50188+ goto out;
50189+
50190+ dentry = parent;
50191+ }
50192+
50193+ spin_lock(&dentry->d_lock);
50194+ read_lock(&gr_inode_lock);
50195+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50196+ __get_dev(dentry), role);
50197+ read_unlock(&gr_inode_lock);
50198+ spin_unlock(&dentry->d_lock);
50199+
50200+ if (unlikely(retval == NULL)) {
50201+ /* real_root is pinned, we don't need to hold a reference */
50202+ read_lock(&gr_inode_lock);
50203+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
50204+ __get_dev(real_root.dentry), role);
50205+ read_unlock(&gr_inode_lock);
50206+ }
50207+out:
50208+ br_read_unlock(vfsmount_lock);
50209+ write_sequnlock(&rename_lock);
50210+
50211+ BUG_ON(retval == NULL);
50212+
50213+ return retval;
50214+}
50215+
50216+static void
50217+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
50218+{
50219+ struct task_struct *task = current;
50220+ const struct cred *cred = current_cred();
50221+
50222+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50223+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50224+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50225+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
50226+
50227+ return;
50228+}
50229+
50230+static void
50231+gr_log_learn_sysctl(const char *path, const __u32 mode)
50232+{
50233+ struct task_struct *task = current;
50234+ const struct cred *cred = current_cred();
50235+
50236+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50237+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50238+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50239+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
50240+
50241+ return;
50242+}
50243+
50244+static void
50245+gr_log_learn_id_change(const char type, const unsigned int real,
50246+ const unsigned int effective, const unsigned int fs)
50247+{
50248+ struct task_struct *task = current;
50249+ const struct cred *cred = current_cred();
50250+
50251+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
50252+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50253+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50254+ type, real, effective, fs, &task->signal->saved_ip);
50255+
50256+ return;
50257+}
50258+
50259+__u32
50260+gr_search_file(const struct dentry * dentry, const __u32 mode,
50261+ const struct vfsmount * mnt)
50262+{
50263+ __u32 retval = mode;
50264+ struct acl_subject_label *curracl;
50265+ struct acl_object_label *currobj;
50266+
50267+ if (unlikely(!(gr_status & GR_READY)))
50268+ return (mode & ~GR_AUDITS);
50269+
50270+ curracl = current->acl;
50271+
50272+ currobj = chk_obj_label(dentry, mnt, curracl);
50273+ retval = currobj->mode & mode;
50274+
50275+ /* if we're opening a specified transfer file for writing
50276+ (e.g. /dev/initctl), then transfer our role to init
50277+ */
50278+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
50279+ current->role->roletype & GR_ROLE_PERSIST)) {
50280+ struct task_struct *task = init_pid_ns.child_reaper;
50281+
50282+ if (task->role != current->role) {
50283+ task->acl_sp_role = 0;
50284+ task->acl_role_id = current->acl_role_id;
50285+ task->role = current->role;
50286+ rcu_read_lock();
50287+ read_lock(&grsec_exec_file_lock);
50288+ gr_apply_subject_to_task(task);
50289+ read_unlock(&grsec_exec_file_lock);
50290+ rcu_read_unlock();
50291+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
50292+ }
50293+ }
50294+
50295+ if (unlikely
50296+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
50297+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
50298+ __u32 new_mode = mode;
50299+
50300+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50301+
50302+ retval = new_mode;
50303+
50304+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
50305+ new_mode |= GR_INHERIT;
50306+
50307+ if (!(mode & GR_NOLEARN))
50308+ gr_log_learn(dentry, mnt, new_mode);
50309+ }
50310+
50311+ return retval;
50312+}
50313+
50314+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
50315+ const struct dentry *parent,
50316+ const struct vfsmount *mnt)
50317+{
50318+ struct name_entry *match;
50319+ struct acl_object_label *matchpo;
50320+ struct acl_subject_label *curracl;
50321+ char *path;
50322+
50323+ if (unlikely(!(gr_status & GR_READY)))
50324+ return NULL;
50325+
50326+ preempt_disable();
50327+ path = gr_to_filename_rbac(new_dentry, mnt);
50328+ match = lookup_name_entry_create(path);
50329+
50330+ curracl = current->acl;
50331+
50332+ if (match) {
50333+ read_lock(&gr_inode_lock);
50334+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
50335+ read_unlock(&gr_inode_lock);
50336+
50337+ if (matchpo) {
50338+ preempt_enable();
50339+ return matchpo;
50340+ }
50341+ }
50342+
50343+ // lookup parent
50344+
50345+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
50346+
50347+ preempt_enable();
50348+ return matchpo;
50349+}
50350+
50351+__u32
50352+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
50353+ const struct vfsmount * mnt, const __u32 mode)
50354+{
50355+ struct acl_object_label *matchpo;
50356+ __u32 retval;
50357+
50358+ if (unlikely(!(gr_status & GR_READY)))
50359+ return (mode & ~GR_AUDITS);
50360+
50361+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
50362+
50363+ retval = matchpo->mode & mode;
50364+
50365+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
50366+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
50367+ __u32 new_mode = mode;
50368+
50369+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50370+
50371+ gr_log_learn(new_dentry, mnt, new_mode);
50372+ return new_mode;
50373+ }
50374+
50375+ return retval;
50376+}
50377+
50378+__u32
50379+gr_check_link(const struct dentry * new_dentry,
50380+ const struct dentry * parent_dentry,
50381+ const struct vfsmount * parent_mnt,
50382+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
50383+{
50384+ struct acl_object_label *obj;
50385+ __u32 oldmode, newmode;
50386+ __u32 needmode;
50387+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
50388+ GR_DELETE | GR_INHERIT;
50389+
50390+ if (unlikely(!(gr_status & GR_READY)))
50391+ return (GR_CREATE | GR_LINK);
50392+
50393+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
50394+ oldmode = obj->mode;
50395+
50396+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
50397+ newmode = obj->mode;
50398+
50399+ needmode = newmode & checkmodes;
50400+
50401+ // old name for hardlink must have at least the permissions of the new name
50402+ if ((oldmode & needmode) != needmode)
50403+ goto bad;
50404+
50405+ // if old name had restrictions/auditing, make sure the new name does as well
50406+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
50407+
50408+ // don't allow hardlinking of suid/sgid files without permission
50409+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50410+ needmode |= GR_SETID;
50411+
50412+ if ((newmode & needmode) != needmode)
50413+ goto bad;
50414+
50415+ // enforce minimum permissions
50416+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
50417+ return newmode;
50418+bad:
50419+ needmode = oldmode;
50420+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50421+ needmode |= GR_SETID;
50422+
50423+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
50424+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
50425+ return (GR_CREATE | GR_LINK);
50426+ } else if (newmode & GR_SUPPRESS)
50427+ return GR_SUPPRESS;
50428+ else
50429+ return 0;
50430+}
50431+
50432+int
50433+gr_check_hidden_task(const struct task_struct *task)
50434+{
50435+ if (unlikely(!(gr_status & GR_READY)))
50436+ return 0;
50437+
50438+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
50439+ return 1;
50440+
50441+ return 0;
50442+}
50443+
50444+int
50445+gr_check_protected_task(const struct task_struct *task)
50446+{
50447+ if (unlikely(!(gr_status & GR_READY) || !task))
50448+ return 0;
50449+
50450+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50451+ task->acl != current->acl)
50452+ return 1;
50453+
50454+ return 0;
50455+}
50456+
50457+int
50458+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50459+{
50460+ struct task_struct *p;
50461+ int ret = 0;
50462+
50463+ if (unlikely(!(gr_status & GR_READY) || !pid))
50464+ return ret;
50465+
50466+ read_lock(&tasklist_lock);
50467+ do_each_pid_task(pid, type, p) {
50468+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50469+ p->acl != current->acl) {
50470+ ret = 1;
50471+ goto out;
50472+ }
50473+ } while_each_pid_task(pid, type, p);
50474+out:
50475+ read_unlock(&tasklist_lock);
50476+
50477+ return ret;
50478+}
50479+
50480+void
50481+gr_copy_label(struct task_struct *tsk)
50482+{
50483+ tsk->signal->used_accept = 0;
50484+ tsk->acl_sp_role = 0;
50485+ tsk->acl_role_id = current->acl_role_id;
50486+ tsk->acl = current->acl;
50487+ tsk->role = current->role;
50488+ tsk->signal->curr_ip = current->signal->curr_ip;
50489+ tsk->signal->saved_ip = current->signal->saved_ip;
50490+ if (current->exec_file)
50491+ get_file(current->exec_file);
50492+ tsk->exec_file = current->exec_file;
50493+ tsk->is_writable = current->is_writable;
50494+ if (unlikely(current->signal->used_accept)) {
50495+ current->signal->curr_ip = 0;
50496+ current->signal->saved_ip = 0;
50497+ }
50498+
50499+ return;
50500+}
50501+
50502+static void
50503+gr_set_proc_res(struct task_struct *task)
50504+{
50505+ struct acl_subject_label *proc;
50506+ unsigned short i;
50507+
50508+ proc = task->acl;
50509+
50510+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
50511+ return;
50512+
50513+ for (i = 0; i < RLIM_NLIMITS; i++) {
50514+ if (!(proc->resmask & (1 << i)))
50515+ continue;
50516+
50517+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
50518+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
50519+ }
50520+
50521+ return;
50522+}
50523+
50524+extern int __gr_process_user_ban(struct user_struct *user);
50525+
50526+int
50527+gr_check_user_change(int real, int effective, int fs)
50528+{
50529+ unsigned int i;
50530+ __u16 num;
50531+ uid_t *uidlist;
50532+ int curuid;
50533+ int realok = 0;
50534+ int effectiveok = 0;
50535+ int fsok = 0;
50536+
50537+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
50538+ struct user_struct *user;
50539+
50540+ if (real == -1)
50541+ goto skipit;
50542+
50543+ user = find_user(real);
50544+ if (user == NULL)
50545+ goto skipit;
50546+
50547+ if (__gr_process_user_ban(user)) {
50548+ /* for find_user */
50549+ free_uid(user);
50550+ return 1;
50551+ }
50552+
50553+ /* for find_user */
50554+ free_uid(user);
50555+
50556+skipit:
50557+#endif
50558+
50559+ if (unlikely(!(gr_status & GR_READY)))
50560+ return 0;
50561+
50562+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50563+ gr_log_learn_id_change('u', real, effective, fs);
50564+
50565+ num = current->acl->user_trans_num;
50566+ uidlist = current->acl->user_transitions;
50567+
50568+ if (uidlist == NULL)
50569+ return 0;
50570+
50571+ if (real == -1)
50572+ realok = 1;
50573+ if (effective == -1)
50574+ effectiveok = 1;
50575+ if (fs == -1)
50576+ fsok = 1;
50577+
50578+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
50579+ for (i = 0; i < num; i++) {
50580+ curuid = (int)uidlist[i];
50581+ if (real == curuid)
50582+ realok = 1;
50583+ if (effective == curuid)
50584+ effectiveok = 1;
50585+ if (fs == curuid)
50586+ fsok = 1;
50587+ }
50588+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
50589+ for (i = 0; i < num; i++) {
50590+ curuid = (int)uidlist[i];
50591+ if (real == curuid)
50592+ break;
50593+ if (effective == curuid)
50594+ break;
50595+ if (fs == curuid)
50596+ break;
50597+ }
50598+ /* not in deny list */
50599+ if (i == num) {
50600+ realok = 1;
50601+ effectiveok = 1;
50602+ fsok = 1;
50603+ }
50604+ }
50605+
50606+ if (realok && effectiveok && fsok)
50607+ return 0;
50608+ else {
50609+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50610+ return 1;
50611+ }
50612+}
50613+
50614+int
50615+gr_check_group_change(int real, int effective, int fs)
50616+{
50617+ unsigned int i;
50618+ __u16 num;
50619+ gid_t *gidlist;
50620+ int curgid;
50621+ int realok = 0;
50622+ int effectiveok = 0;
50623+ int fsok = 0;
50624+
50625+ if (unlikely(!(gr_status & GR_READY)))
50626+ return 0;
50627+
50628+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50629+ gr_log_learn_id_change('g', real, effective, fs);
50630+
50631+ num = current->acl->group_trans_num;
50632+ gidlist = current->acl->group_transitions;
50633+
50634+ if (gidlist == NULL)
50635+ return 0;
50636+
50637+ if (real == -1)
50638+ realok = 1;
50639+ if (effective == -1)
50640+ effectiveok = 1;
50641+ if (fs == -1)
50642+ fsok = 1;
50643+
50644+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
50645+ for (i = 0; i < num; i++) {
50646+ curgid = (int)gidlist[i];
50647+ if (real == curgid)
50648+ realok = 1;
50649+ if (effective == curgid)
50650+ effectiveok = 1;
50651+ if (fs == curgid)
50652+ fsok = 1;
50653+ }
50654+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
50655+ for (i = 0; i < num; i++) {
50656+ curgid = (int)gidlist[i];
50657+ if (real == curgid)
50658+ break;
50659+ if (effective == curgid)
50660+ break;
50661+ if (fs == curgid)
50662+ break;
50663+ }
50664+ /* not in deny list */
50665+ if (i == num) {
50666+ realok = 1;
50667+ effectiveok = 1;
50668+ fsok = 1;
50669+ }
50670+ }
50671+
50672+ if (realok && effectiveok && fsok)
50673+ return 0;
50674+ else {
50675+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50676+ return 1;
50677+ }
50678+}
50679+
50680+void
50681+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
50682+{
50683+ struct acl_role_label *role = task->role;
50684+ struct acl_subject_label *subj = NULL;
50685+ struct acl_object_label *obj;
50686+ struct file *filp;
50687+
50688+ if (unlikely(!(gr_status & GR_READY)))
50689+ return;
50690+
50691+ filp = task->exec_file;
50692+
50693+ /* kernel process, we'll give them the kernel role */
50694+ if (unlikely(!filp)) {
50695+ task->role = kernel_role;
50696+ task->acl = kernel_role->root_label;
50697+ return;
50698+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
50699+ role = lookup_acl_role_label(task, uid, gid);
50700+
50701+ /* perform subject lookup in possibly new role
50702+ we can use this result below in the case where role == task->role
50703+ */
50704+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
50705+
50706+ /* if we changed uid/gid, but result in the same role
50707+ and are using inheritance, don't lose the inherited subject
50708+ if current subject is other than what normal lookup
50709+ would result in, we arrived via inheritance, don't
50710+ lose subject
50711+ */
50712+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
50713+ (subj == task->acl)))
50714+ task->acl = subj;
50715+
50716+ task->role = role;
50717+
50718+ task->is_writable = 0;
50719+
50720+ /* ignore additional mmap checks for processes that are writable
50721+ by the default ACL */
50722+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50723+ if (unlikely(obj->mode & GR_WRITE))
50724+ task->is_writable = 1;
50725+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50726+ if (unlikely(obj->mode & GR_WRITE))
50727+ task->is_writable = 1;
50728+
50729+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50730+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50731+#endif
50732+
50733+ gr_set_proc_res(task);
50734+
50735+ return;
50736+}
50737+
50738+int
50739+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
50740+ const int unsafe_share)
50741+{
50742+ struct task_struct *task = current;
50743+ struct acl_subject_label *newacl;
50744+ struct acl_object_label *obj;
50745+ __u32 retmode;
50746+
50747+ if (unlikely(!(gr_status & GR_READY)))
50748+ return 0;
50749+
50750+ newacl = chk_subj_label(dentry, mnt, task->role);
50751+
50752+ task_lock(task);
50753+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
50754+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
50755+ !(task->role->roletype & GR_ROLE_GOD) &&
50756+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
50757+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
50758+ task_unlock(task);
50759+ if (unsafe_share)
50760+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
50761+ else
50762+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
50763+ return -EACCES;
50764+ }
50765+ task_unlock(task);
50766+
50767+ obj = chk_obj_label(dentry, mnt, task->acl);
50768+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
50769+
50770+ if (!(task->acl->mode & GR_INHERITLEARN) &&
50771+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
50772+ if (obj->nested)
50773+ task->acl = obj->nested;
50774+ else
50775+ task->acl = newacl;
50776+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
50777+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
50778+
50779+ task->is_writable = 0;
50780+
50781+ /* ignore additional mmap checks for processes that are writable
50782+ by the default ACL */
50783+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
50784+ if (unlikely(obj->mode & GR_WRITE))
50785+ task->is_writable = 1;
50786+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
50787+ if (unlikely(obj->mode & GR_WRITE))
50788+ task->is_writable = 1;
50789+
50790+ gr_set_proc_res(task);
50791+
50792+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50793+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50794+#endif
50795+ return 0;
50796+}
50797+
50798+/* always called with valid inodev ptr */
50799+static void
50800+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
50801+{
50802+ struct acl_object_label *matchpo;
50803+ struct acl_subject_label *matchps;
50804+ struct acl_subject_label *subj;
50805+ struct acl_role_label *role;
50806+ unsigned int x;
50807+
50808+ FOR_EACH_ROLE_START(role)
50809+ FOR_EACH_SUBJECT_START(role, subj, x)
50810+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
50811+ matchpo->mode |= GR_DELETED;
50812+ FOR_EACH_SUBJECT_END(subj,x)
50813+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
50814+ if (subj->inode == ino && subj->device == dev)
50815+ subj->mode |= GR_DELETED;
50816+ FOR_EACH_NESTED_SUBJECT_END(subj)
50817+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
50818+ matchps->mode |= GR_DELETED;
50819+ FOR_EACH_ROLE_END(role)
50820+
50821+ inodev->nentry->deleted = 1;
50822+
50823+ return;
50824+}
50825+
50826+void
50827+gr_handle_delete(const ino_t ino, const dev_t dev)
50828+{
50829+ struct inodev_entry *inodev;
50830+
50831+ if (unlikely(!(gr_status & GR_READY)))
50832+ return;
50833+
50834+ write_lock(&gr_inode_lock);
50835+ inodev = lookup_inodev_entry(ino, dev);
50836+ if (inodev != NULL)
50837+ do_handle_delete(inodev, ino, dev);
50838+ write_unlock(&gr_inode_lock);
50839+
50840+ return;
50841+}
50842+
50843+static void
50844+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
50845+ const ino_t newinode, const dev_t newdevice,
50846+ struct acl_subject_label *subj)
50847+{
50848+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
50849+ struct acl_object_label *match;
50850+
50851+ match = subj->obj_hash[index];
50852+
50853+ while (match && (match->inode != oldinode ||
50854+ match->device != olddevice ||
50855+ !(match->mode & GR_DELETED)))
50856+ match = match->next;
50857+
50858+ if (match && (match->inode == oldinode)
50859+ && (match->device == olddevice)
50860+ && (match->mode & GR_DELETED)) {
50861+ if (match->prev == NULL) {
50862+ subj->obj_hash[index] = match->next;
50863+ if (match->next != NULL)
50864+ match->next->prev = NULL;
50865+ } else {
50866+ match->prev->next = match->next;
50867+ if (match->next != NULL)
50868+ match->next->prev = match->prev;
50869+ }
50870+ match->prev = NULL;
50871+ match->next = NULL;
50872+ match->inode = newinode;
50873+ match->device = newdevice;
50874+ match->mode &= ~GR_DELETED;
50875+
50876+ insert_acl_obj_label(match, subj);
50877+ }
50878+
50879+ return;
50880+}
50881+
50882+static void
50883+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
50884+ const ino_t newinode, const dev_t newdevice,
50885+ struct acl_role_label *role)
50886+{
50887+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
50888+ struct acl_subject_label *match;
50889+
50890+ match = role->subj_hash[index];
50891+
50892+ while (match && (match->inode != oldinode ||
50893+ match->device != olddevice ||
50894+ !(match->mode & GR_DELETED)))
50895+ match = match->next;
50896+
50897+ if (match && (match->inode == oldinode)
50898+ && (match->device == olddevice)
50899+ && (match->mode & GR_DELETED)) {
50900+ if (match->prev == NULL) {
50901+ role->subj_hash[index] = match->next;
50902+ if (match->next != NULL)
50903+ match->next->prev = NULL;
50904+ } else {
50905+ match->prev->next = match->next;
50906+ if (match->next != NULL)
50907+ match->next->prev = match->prev;
50908+ }
50909+ match->prev = NULL;
50910+ match->next = NULL;
50911+ match->inode = newinode;
50912+ match->device = newdevice;
50913+ match->mode &= ~GR_DELETED;
50914+
50915+ insert_acl_subj_label(match, role);
50916+ }
50917+
50918+ return;
50919+}
50920+
50921+static void
50922+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
50923+ const ino_t newinode, const dev_t newdevice)
50924+{
50925+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
50926+ struct inodev_entry *match;
50927+
50928+ match = inodev_set.i_hash[index];
50929+
50930+ while (match && (match->nentry->inode != oldinode ||
50931+ match->nentry->device != olddevice || !match->nentry->deleted))
50932+ match = match->next;
50933+
50934+ if (match && (match->nentry->inode == oldinode)
50935+ && (match->nentry->device == olddevice) &&
50936+ match->nentry->deleted) {
50937+ if (match->prev == NULL) {
50938+ inodev_set.i_hash[index] = match->next;
50939+ if (match->next != NULL)
50940+ match->next->prev = NULL;
50941+ } else {
50942+ match->prev->next = match->next;
50943+ if (match->next != NULL)
50944+ match->next->prev = match->prev;
50945+ }
50946+ match->prev = NULL;
50947+ match->next = NULL;
50948+ match->nentry->inode = newinode;
50949+ match->nentry->device = newdevice;
50950+ match->nentry->deleted = 0;
50951+
50952+ insert_inodev_entry(match);
50953+ }
50954+
50955+ return;
50956+}
50957+
50958+static void
50959+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
50960+{
50961+ struct acl_subject_label *subj;
50962+ struct acl_role_label *role;
50963+ unsigned int x;
50964+
50965+ FOR_EACH_ROLE_START(role)
50966+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
50967+
50968+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
50969+ if ((subj->inode == ino) && (subj->device == dev)) {
50970+ subj->inode = ino;
50971+ subj->device = dev;
50972+ }
50973+ FOR_EACH_NESTED_SUBJECT_END(subj)
50974+ FOR_EACH_SUBJECT_START(role, subj, x)
50975+ update_acl_obj_label(matchn->inode, matchn->device,
50976+ ino, dev, subj);
50977+ FOR_EACH_SUBJECT_END(subj,x)
50978+ FOR_EACH_ROLE_END(role)
50979+
50980+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
50981+
50982+ return;
50983+}
50984+
50985+static void
50986+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
50987+ const struct vfsmount *mnt)
50988+{
50989+ ino_t ino = dentry->d_inode->i_ino;
50990+ dev_t dev = __get_dev(dentry);
50991+
50992+ __do_handle_create(matchn, ino, dev);
50993+
50994+ return;
50995+}
50996+
50997+void
50998+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50999+{
51000+ struct name_entry *matchn;
51001+
51002+ if (unlikely(!(gr_status & GR_READY)))
51003+ return;
51004+
51005+ preempt_disable();
51006+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
51007+
51008+ if (unlikely((unsigned long)matchn)) {
51009+ write_lock(&gr_inode_lock);
51010+ do_handle_create(matchn, dentry, mnt);
51011+ write_unlock(&gr_inode_lock);
51012+ }
51013+ preempt_enable();
51014+
51015+ return;
51016+}
51017+
51018+void
51019+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
51020+{
51021+ struct name_entry *matchn;
51022+
51023+ if (unlikely(!(gr_status & GR_READY)))
51024+ return;
51025+
51026+ preempt_disable();
51027+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
51028+
51029+ if (unlikely((unsigned long)matchn)) {
51030+ write_lock(&gr_inode_lock);
51031+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
51032+ write_unlock(&gr_inode_lock);
51033+ }
51034+ preempt_enable();
51035+
51036+ return;
51037+}
51038+
51039+void
51040+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
51041+ struct dentry *old_dentry,
51042+ struct dentry *new_dentry,
51043+ struct vfsmount *mnt, const __u8 replace)
51044+{
51045+ struct name_entry *matchn;
51046+ struct inodev_entry *inodev;
51047+ struct inode *inode = new_dentry->d_inode;
51048+ ino_t old_ino = old_dentry->d_inode->i_ino;
51049+ dev_t old_dev = __get_dev(old_dentry);
51050+
51051+ /* vfs_rename swaps the name and parent link for old_dentry and
51052+ new_dentry
51053+ at this point, old_dentry has the new name, parent link, and inode
51054+ for the renamed file
51055+ if a file is being replaced by a rename, new_dentry has the inode
51056+ and name for the replaced file
51057+ */
51058+
51059+ if (unlikely(!(gr_status & GR_READY)))
51060+ return;
51061+
51062+ preempt_disable();
51063+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
51064+
51065+ /* we wouldn't have to check d_inode if it weren't for
51066+ NFS silly-renaming
51067+ */
51068+
51069+ write_lock(&gr_inode_lock);
51070+ if (unlikely(replace && inode)) {
51071+ ino_t new_ino = inode->i_ino;
51072+ dev_t new_dev = __get_dev(new_dentry);
51073+
51074+ inodev = lookup_inodev_entry(new_ino, new_dev);
51075+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
51076+ do_handle_delete(inodev, new_ino, new_dev);
51077+ }
51078+
51079+ inodev = lookup_inodev_entry(old_ino, old_dev);
51080+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
51081+ do_handle_delete(inodev, old_ino, old_dev);
51082+
51083+ if (unlikely((unsigned long)matchn))
51084+ do_handle_create(matchn, old_dentry, mnt);
51085+
51086+ write_unlock(&gr_inode_lock);
51087+ preempt_enable();
51088+
51089+ return;
51090+}
51091+
51092+static int
51093+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
51094+ unsigned char **sum)
51095+{
51096+ struct acl_role_label *r;
51097+ struct role_allowed_ip *ipp;
51098+ struct role_transition *trans;
51099+ unsigned int i;
51100+ int found = 0;
51101+ u32 curr_ip = current->signal->curr_ip;
51102+
51103+ current->signal->saved_ip = curr_ip;
51104+
51105+ /* check transition table */
51106+
51107+ for (trans = current->role->transitions; trans; trans = trans->next) {
51108+ if (!strcmp(rolename, trans->rolename)) {
51109+ found = 1;
51110+ break;
51111+ }
51112+ }
51113+
51114+ if (!found)
51115+ return 0;
51116+
51117+ /* handle special roles that do not require authentication
51118+ and check ip */
51119+
51120+ FOR_EACH_ROLE_START(r)
51121+ if (!strcmp(rolename, r->rolename) &&
51122+ (r->roletype & GR_ROLE_SPECIAL)) {
51123+ found = 0;
51124+ if (r->allowed_ips != NULL) {
51125+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
51126+ if ((ntohl(curr_ip) & ipp->netmask) ==
51127+ (ntohl(ipp->addr) & ipp->netmask))
51128+ found = 1;
51129+ }
51130+ } else
51131+ found = 2;
51132+ if (!found)
51133+ return 0;
51134+
51135+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
51136+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
51137+ *salt = NULL;
51138+ *sum = NULL;
51139+ return 1;
51140+ }
51141+ }
51142+ FOR_EACH_ROLE_END(r)
51143+
51144+ for (i = 0; i < num_sprole_pws; i++) {
51145+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
51146+ *salt = acl_special_roles[i]->salt;
51147+ *sum = acl_special_roles[i]->sum;
51148+ return 1;
51149+ }
51150+ }
51151+
51152+ return 0;
51153+}
51154+
51155+static void
51156+assign_special_role(char *rolename)
51157+{
51158+ struct acl_object_label *obj;
51159+ struct acl_role_label *r;
51160+ struct acl_role_label *assigned = NULL;
51161+ struct task_struct *tsk;
51162+ struct file *filp;
51163+
51164+ FOR_EACH_ROLE_START(r)
51165+ if (!strcmp(rolename, r->rolename) &&
51166+ (r->roletype & GR_ROLE_SPECIAL)) {
51167+ assigned = r;
51168+ break;
51169+ }
51170+ FOR_EACH_ROLE_END(r)
51171+
51172+ if (!assigned)
51173+ return;
51174+
51175+ read_lock(&tasklist_lock);
51176+ read_lock(&grsec_exec_file_lock);
51177+
51178+ tsk = current->real_parent;
51179+ if (tsk == NULL)
51180+ goto out_unlock;
51181+
51182+ filp = tsk->exec_file;
51183+ if (filp == NULL)
51184+ goto out_unlock;
51185+
51186+ tsk->is_writable = 0;
51187+
51188+ tsk->acl_sp_role = 1;
51189+ tsk->acl_role_id = ++acl_sp_role_value;
51190+ tsk->role = assigned;
51191+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
51192+
51193+ /* ignore additional mmap checks for processes that are writable
51194+ by the default ACL */
51195+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51196+ if (unlikely(obj->mode & GR_WRITE))
51197+ tsk->is_writable = 1;
51198+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
51199+ if (unlikely(obj->mode & GR_WRITE))
51200+ tsk->is_writable = 1;
51201+
51202+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51203+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
51204+#endif
51205+
51206+out_unlock:
51207+ read_unlock(&grsec_exec_file_lock);
51208+ read_unlock(&tasklist_lock);
51209+ return;
51210+}
51211+
51212+int gr_check_secure_terminal(struct task_struct *task)
51213+{
51214+ struct task_struct *p, *p2, *p3;
51215+ struct files_struct *files;
51216+ struct fdtable *fdt;
51217+ struct file *our_file = NULL, *file;
51218+ int i;
51219+
51220+ if (task->signal->tty == NULL)
51221+ return 1;
51222+
51223+ files = get_files_struct(task);
51224+ if (files != NULL) {
51225+ rcu_read_lock();
51226+ fdt = files_fdtable(files);
51227+ for (i=0; i < fdt->max_fds; i++) {
51228+ file = fcheck_files(files, i);
51229+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
51230+ get_file(file);
51231+ our_file = file;
51232+ }
51233+ }
51234+ rcu_read_unlock();
51235+ put_files_struct(files);
51236+ }
51237+
51238+ if (our_file == NULL)
51239+ return 1;
51240+
51241+ read_lock(&tasklist_lock);
51242+ do_each_thread(p2, p) {
51243+ files = get_files_struct(p);
51244+ if (files == NULL ||
51245+ (p->signal && p->signal->tty == task->signal->tty)) {
51246+ if (files != NULL)
51247+ put_files_struct(files);
51248+ continue;
51249+ }
51250+ rcu_read_lock();
51251+ fdt = files_fdtable(files);
51252+ for (i=0; i < fdt->max_fds; i++) {
51253+ file = fcheck_files(files, i);
51254+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
51255+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
51256+ p3 = task;
51257+ while (p3->pid > 0) {
51258+ if (p3 == p)
51259+ break;
51260+ p3 = p3->real_parent;
51261+ }
51262+ if (p3 == p)
51263+ break;
51264+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
51265+ gr_handle_alertkill(p);
51266+ rcu_read_unlock();
51267+ put_files_struct(files);
51268+ read_unlock(&tasklist_lock);
51269+ fput(our_file);
51270+ return 0;
51271+ }
51272+ }
51273+ rcu_read_unlock();
51274+ put_files_struct(files);
51275+ } while_each_thread(p2, p);
51276+ read_unlock(&tasklist_lock);
51277+
51278+ fput(our_file);
51279+ return 1;
51280+}
51281+
51282+ssize_t
51283+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
51284+{
51285+ struct gr_arg_wrapper uwrap;
51286+ unsigned char *sprole_salt = NULL;
51287+ unsigned char *sprole_sum = NULL;
51288+ int error = sizeof (struct gr_arg_wrapper);
51289+ int error2 = 0;
51290+
51291+ mutex_lock(&gr_dev_mutex);
51292+
51293+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
51294+ error = -EPERM;
51295+ goto out;
51296+ }
51297+
51298+ if (count != sizeof (struct gr_arg_wrapper)) {
51299+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
51300+ error = -EINVAL;
51301+ goto out;
51302+ }
51303+
51304+
51305+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
51306+ gr_auth_expires = 0;
51307+ gr_auth_attempts = 0;
51308+ }
51309+
51310+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
51311+ error = -EFAULT;
51312+ goto out;
51313+ }
51314+
51315+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
51316+ error = -EINVAL;
51317+ goto out;
51318+ }
51319+
51320+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
51321+ error = -EFAULT;
51322+ goto out;
51323+ }
51324+
51325+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51326+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51327+ time_after(gr_auth_expires, get_seconds())) {
51328+ error = -EBUSY;
51329+ goto out;
51330+ }
51331+
51332+ /* if non-root trying to do anything other than use a special role,
51333+ do not attempt authentication, do not count towards authentication
51334+ locking
51335+ */
51336+
51337+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
51338+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51339+ current_uid()) {
51340+ error = -EPERM;
51341+ goto out;
51342+ }
51343+
51344+ /* ensure pw and special role name are null terminated */
51345+
51346+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
51347+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
51348+
51349+ /* Okay.
51350+ * We have our enough of the argument structure..(we have yet
51351+ * to copy_from_user the tables themselves) . Copy the tables
51352+ * only if we need them, i.e. for loading operations. */
51353+
51354+ switch (gr_usermode->mode) {
51355+ case GR_STATUS:
51356+ if (gr_status & GR_READY) {
51357+ error = 1;
51358+ if (!gr_check_secure_terminal(current))
51359+ error = 3;
51360+ } else
51361+ error = 2;
51362+ goto out;
51363+ case GR_SHUTDOWN:
51364+ if ((gr_status & GR_READY)
51365+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51366+ pax_open_kernel();
51367+ gr_status &= ~GR_READY;
51368+ pax_close_kernel();
51369+
51370+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
51371+ free_variables();
51372+ memset(gr_usermode, 0, sizeof (struct gr_arg));
51373+ memset(gr_system_salt, 0, GR_SALT_LEN);
51374+ memset(gr_system_sum, 0, GR_SHA_LEN);
51375+ } else if (gr_status & GR_READY) {
51376+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
51377+ error = -EPERM;
51378+ } else {
51379+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
51380+ error = -EAGAIN;
51381+ }
51382+ break;
51383+ case GR_ENABLE:
51384+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
51385+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
51386+ else {
51387+ if (gr_status & GR_READY)
51388+ error = -EAGAIN;
51389+ else
51390+ error = error2;
51391+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
51392+ }
51393+ break;
51394+ case GR_RELOAD:
51395+ if (!(gr_status & GR_READY)) {
51396+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
51397+ error = -EAGAIN;
51398+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51399+ preempt_disable();
51400+
51401+ pax_open_kernel();
51402+ gr_status &= ~GR_READY;
51403+ pax_close_kernel();
51404+
51405+ free_variables();
51406+ if (!(error2 = gracl_init(gr_usermode))) {
51407+ preempt_enable();
51408+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
51409+ } else {
51410+ preempt_enable();
51411+ error = error2;
51412+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51413+ }
51414+ } else {
51415+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51416+ error = -EPERM;
51417+ }
51418+ break;
51419+ case GR_SEGVMOD:
51420+ if (unlikely(!(gr_status & GR_READY))) {
51421+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
51422+ error = -EAGAIN;
51423+ break;
51424+ }
51425+
51426+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51427+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
51428+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
51429+ struct acl_subject_label *segvacl;
51430+ segvacl =
51431+ lookup_acl_subj_label(gr_usermode->segv_inode,
51432+ gr_usermode->segv_device,
51433+ current->role);
51434+ if (segvacl) {
51435+ segvacl->crashes = 0;
51436+ segvacl->expires = 0;
51437+ }
51438+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
51439+ gr_remove_uid(gr_usermode->segv_uid);
51440+ }
51441+ } else {
51442+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
51443+ error = -EPERM;
51444+ }
51445+ break;
51446+ case GR_SPROLE:
51447+ case GR_SPROLEPAM:
51448+ if (unlikely(!(gr_status & GR_READY))) {
51449+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
51450+ error = -EAGAIN;
51451+ break;
51452+ }
51453+
51454+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
51455+ current->role->expires = 0;
51456+ current->role->auth_attempts = 0;
51457+ }
51458+
51459+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51460+ time_after(current->role->expires, get_seconds())) {
51461+ error = -EBUSY;
51462+ goto out;
51463+ }
51464+
51465+ if (lookup_special_role_auth
51466+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
51467+ && ((!sprole_salt && !sprole_sum)
51468+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
51469+ char *p = "";
51470+ assign_special_role(gr_usermode->sp_role);
51471+ read_lock(&tasklist_lock);
51472+ if (current->real_parent)
51473+ p = current->real_parent->role->rolename;
51474+ read_unlock(&tasklist_lock);
51475+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
51476+ p, acl_sp_role_value);
51477+ } else {
51478+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
51479+ error = -EPERM;
51480+ if(!(current->role->auth_attempts++))
51481+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51482+
51483+ goto out;
51484+ }
51485+ break;
51486+ case GR_UNSPROLE:
51487+ if (unlikely(!(gr_status & GR_READY))) {
51488+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
51489+ error = -EAGAIN;
51490+ break;
51491+ }
51492+
51493+ if (current->role->roletype & GR_ROLE_SPECIAL) {
51494+ char *p = "";
51495+ int i = 0;
51496+
51497+ read_lock(&tasklist_lock);
51498+ if (current->real_parent) {
51499+ p = current->real_parent->role->rolename;
51500+ i = current->real_parent->acl_role_id;
51501+ }
51502+ read_unlock(&tasklist_lock);
51503+
51504+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
51505+ gr_set_acls(1);
51506+ } else {
51507+ error = -EPERM;
51508+ goto out;
51509+ }
51510+ break;
51511+ default:
51512+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
51513+ error = -EINVAL;
51514+ break;
51515+ }
51516+
51517+ if (error != -EPERM)
51518+ goto out;
51519+
51520+ if(!(gr_auth_attempts++))
51521+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51522+
51523+ out:
51524+ mutex_unlock(&gr_dev_mutex);
51525+ return error;
51526+}
51527+
51528+/* must be called with
51529+ rcu_read_lock();
51530+ read_lock(&tasklist_lock);
51531+ read_lock(&grsec_exec_file_lock);
51532+*/
51533+int gr_apply_subject_to_task(struct task_struct *task)
51534+{
51535+ struct acl_object_label *obj;
51536+ char *tmpname;
51537+ struct acl_subject_label *tmpsubj;
51538+ struct file *filp;
51539+ struct name_entry *nmatch;
51540+
51541+ filp = task->exec_file;
51542+ if (filp == NULL)
51543+ return 0;
51544+
51545+ /* the following is to apply the correct subject
51546+ on binaries running when the RBAC system
51547+ is enabled, when the binaries have been
51548+ replaced or deleted since their execution
51549+ -----
51550+ when the RBAC system starts, the inode/dev
51551+ from exec_file will be one the RBAC system
51552+ is unaware of. It only knows the inode/dev
51553+ of the present file on disk, or the absence
51554+ of it.
51555+ */
51556+ preempt_disable();
51557+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
51558+
51559+ nmatch = lookup_name_entry(tmpname);
51560+ preempt_enable();
51561+ tmpsubj = NULL;
51562+ if (nmatch) {
51563+ if (nmatch->deleted)
51564+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
51565+ else
51566+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
51567+ if (tmpsubj != NULL)
51568+ task->acl = tmpsubj;
51569+ }
51570+ if (tmpsubj == NULL)
51571+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
51572+ task->role);
51573+ if (task->acl) {
51574+ task->is_writable = 0;
51575+ /* ignore additional mmap checks for processes that are writable
51576+ by the default ACL */
51577+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51578+ if (unlikely(obj->mode & GR_WRITE))
51579+ task->is_writable = 1;
51580+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
51581+ if (unlikely(obj->mode & GR_WRITE))
51582+ task->is_writable = 1;
51583+
51584+ gr_set_proc_res(task);
51585+
51586+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51587+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
51588+#endif
51589+ } else {
51590+ return 1;
51591+ }
51592+
51593+ return 0;
51594+}
51595+
51596+int
51597+gr_set_acls(const int type)
51598+{
51599+ struct task_struct *task, *task2;
51600+ struct acl_role_label *role = current->role;
51601+ __u16 acl_role_id = current->acl_role_id;
51602+ const struct cred *cred;
51603+ int ret;
51604+
51605+ rcu_read_lock();
51606+ read_lock(&tasklist_lock);
51607+ read_lock(&grsec_exec_file_lock);
51608+ do_each_thread(task2, task) {
51609+ /* check to see if we're called from the exit handler,
51610+ if so, only replace ACLs that have inherited the admin
51611+ ACL */
51612+
51613+ if (type && (task->role != role ||
51614+ task->acl_role_id != acl_role_id))
51615+ continue;
51616+
51617+ task->acl_role_id = 0;
51618+ task->acl_sp_role = 0;
51619+
51620+ if (task->exec_file) {
51621+ cred = __task_cred(task);
51622+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
51623+ ret = gr_apply_subject_to_task(task);
51624+ if (ret) {
51625+ read_unlock(&grsec_exec_file_lock);
51626+ read_unlock(&tasklist_lock);
51627+ rcu_read_unlock();
51628+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
51629+ return ret;
51630+ }
51631+ } else {
51632+ // it's a kernel process
51633+ task->role = kernel_role;
51634+ task->acl = kernel_role->root_label;
51635+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
51636+ task->acl->mode &= ~GR_PROCFIND;
51637+#endif
51638+ }
51639+ } while_each_thread(task2, task);
51640+ read_unlock(&grsec_exec_file_lock);
51641+ read_unlock(&tasklist_lock);
51642+ rcu_read_unlock();
51643+
51644+ return 0;
51645+}
51646+
51647+void
51648+gr_learn_resource(const struct task_struct *task,
51649+ const int res, const unsigned long wanted, const int gt)
51650+{
51651+ struct acl_subject_label *acl;
51652+ const struct cred *cred;
51653+
51654+ if (unlikely((gr_status & GR_READY) &&
51655+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
51656+ goto skip_reslog;
51657+
51658+#ifdef CONFIG_GRKERNSEC_RESLOG
51659+ gr_log_resource(task, res, wanted, gt);
51660+#endif
51661+ skip_reslog:
51662+
51663+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
51664+ return;
51665+
51666+ acl = task->acl;
51667+
51668+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
51669+ !(acl->resmask & (1 << (unsigned short) res))))
51670+ return;
51671+
51672+ if (wanted >= acl->res[res].rlim_cur) {
51673+ unsigned long res_add;
51674+
51675+ res_add = wanted;
51676+ switch (res) {
51677+ case RLIMIT_CPU:
51678+ res_add += GR_RLIM_CPU_BUMP;
51679+ break;
51680+ case RLIMIT_FSIZE:
51681+ res_add += GR_RLIM_FSIZE_BUMP;
51682+ break;
51683+ case RLIMIT_DATA:
51684+ res_add += GR_RLIM_DATA_BUMP;
51685+ break;
51686+ case RLIMIT_STACK:
51687+ res_add += GR_RLIM_STACK_BUMP;
51688+ break;
51689+ case RLIMIT_CORE:
51690+ res_add += GR_RLIM_CORE_BUMP;
51691+ break;
51692+ case RLIMIT_RSS:
51693+ res_add += GR_RLIM_RSS_BUMP;
51694+ break;
51695+ case RLIMIT_NPROC:
51696+ res_add += GR_RLIM_NPROC_BUMP;
51697+ break;
51698+ case RLIMIT_NOFILE:
51699+ res_add += GR_RLIM_NOFILE_BUMP;
51700+ break;
51701+ case RLIMIT_MEMLOCK:
51702+ res_add += GR_RLIM_MEMLOCK_BUMP;
51703+ break;
51704+ case RLIMIT_AS:
51705+ res_add += GR_RLIM_AS_BUMP;
51706+ break;
51707+ case RLIMIT_LOCKS:
51708+ res_add += GR_RLIM_LOCKS_BUMP;
51709+ break;
51710+ case RLIMIT_SIGPENDING:
51711+ res_add += GR_RLIM_SIGPENDING_BUMP;
51712+ break;
51713+ case RLIMIT_MSGQUEUE:
51714+ res_add += GR_RLIM_MSGQUEUE_BUMP;
51715+ break;
51716+ case RLIMIT_NICE:
51717+ res_add += GR_RLIM_NICE_BUMP;
51718+ break;
51719+ case RLIMIT_RTPRIO:
51720+ res_add += GR_RLIM_RTPRIO_BUMP;
51721+ break;
51722+ case RLIMIT_RTTIME:
51723+ res_add += GR_RLIM_RTTIME_BUMP;
51724+ break;
51725+ }
51726+
51727+ acl->res[res].rlim_cur = res_add;
51728+
51729+ if (wanted > acl->res[res].rlim_max)
51730+ acl->res[res].rlim_max = res_add;
51731+
51732+ /* only log the subject filename, since resource logging is supported for
51733+ single-subject learning only */
51734+ rcu_read_lock();
51735+ cred = __task_cred(task);
51736+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51737+ task->role->roletype, cred->uid, cred->gid, acl->filename,
51738+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
51739+ "", (unsigned long) res, &task->signal->saved_ip);
51740+ rcu_read_unlock();
51741+ }
51742+
51743+ return;
51744+}
51745+
51746+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
51747+void
51748+pax_set_initial_flags(struct linux_binprm *bprm)
51749+{
51750+ struct task_struct *task = current;
51751+ struct acl_subject_label *proc;
51752+ unsigned long flags;
51753+
51754+ if (unlikely(!(gr_status & GR_READY)))
51755+ return;
51756+
51757+ flags = pax_get_flags(task);
51758+
51759+ proc = task->acl;
51760+
51761+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
51762+ flags &= ~MF_PAX_PAGEEXEC;
51763+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
51764+ flags &= ~MF_PAX_SEGMEXEC;
51765+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
51766+ flags &= ~MF_PAX_RANDMMAP;
51767+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
51768+ flags &= ~MF_PAX_EMUTRAMP;
51769+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
51770+ flags &= ~MF_PAX_MPROTECT;
51771+
51772+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
51773+ flags |= MF_PAX_PAGEEXEC;
51774+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
51775+ flags |= MF_PAX_SEGMEXEC;
51776+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
51777+ flags |= MF_PAX_RANDMMAP;
51778+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
51779+ flags |= MF_PAX_EMUTRAMP;
51780+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
51781+ flags |= MF_PAX_MPROTECT;
51782+
51783+ pax_set_flags(task, flags);
51784+
51785+ return;
51786+}
51787+#endif
51788+
51789+#ifdef CONFIG_SYSCTL
51790+/* Eric Biederman likes breaking userland ABI and every inode-based security
51791+ system to save 35kb of memory */
51792+
51793+/* we modify the passed in filename, but adjust it back before returning */
51794+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
51795+{
51796+ struct name_entry *nmatch;
51797+ char *p, *lastp = NULL;
51798+ struct acl_object_label *obj = NULL, *tmp;
51799+ struct acl_subject_label *tmpsubj;
51800+ char c = '\0';
51801+
51802+ read_lock(&gr_inode_lock);
51803+
51804+ p = name + len - 1;
51805+ do {
51806+ nmatch = lookup_name_entry(name);
51807+ if (lastp != NULL)
51808+ *lastp = c;
51809+
51810+ if (nmatch == NULL)
51811+ goto next_component;
51812+ tmpsubj = current->acl;
51813+ do {
51814+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
51815+ if (obj != NULL) {
51816+ tmp = obj->globbed;
51817+ while (tmp) {
51818+ if (!glob_match(tmp->filename, name)) {
51819+ obj = tmp;
51820+ goto found_obj;
51821+ }
51822+ tmp = tmp->next;
51823+ }
51824+ goto found_obj;
51825+ }
51826+ } while ((tmpsubj = tmpsubj->parent_subject));
51827+next_component:
51828+ /* end case */
51829+ if (p == name)
51830+ break;
51831+
51832+ while (*p != '/')
51833+ p--;
51834+ if (p == name)
51835+ lastp = p + 1;
51836+ else {
51837+ lastp = p;
51838+ p--;
51839+ }
51840+ c = *lastp;
51841+ *lastp = '\0';
51842+ } while (1);
51843+found_obj:
51844+ read_unlock(&gr_inode_lock);
51845+ /* obj returned will always be non-null */
51846+ return obj;
51847+}
51848+
51849+/* returns 0 when allowing, non-zero on error
51850+ op of 0 is used for readdir, so we don't log the names of hidden files
51851+*/
51852+__u32
51853+gr_handle_sysctl(const struct ctl_table *table, const int op)
51854+{
51855+ struct ctl_table *tmp;
51856+ const char *proc_sys = "/proc/sys";
51857+ char *path;
51858+ struct acl_object_label *obj;
51859+ unsigned short len = 0, pos = 0, depth = 0, i;
51860+ __u32 err = 0;
51861+ __u32 mode = 0;
51862+
51863+ if (unlikely(!(gr_status & GR_READY)))
51864+ return 0;
51865+
51866+ /* for now, ignore operations on non-sysctl entries if it's not a
51867+ readdir*/
51868+ if (table->child != NULL && op != 0)
51869+ return 0;
51870+
51871+ mode |= GR_FIND;
51872+ /* it's only a read if it's an entry, read on dirs is for readdir */
51873+ if (op & MAY_READ)
51874+ mode |= GR_READ;
51875+ if (op & MAY_WRITE)
51876+ mode |= GR_WRITE;
51877+
51878+ preempt_disable();
51879+
51880+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
51881+
51882+ /* it's only a read/write if it's an actual entry, not a dir
51883+ (which are opened for readdir)
51884+ */
51885+
51886+ /* convert the requested sysctl entry into a pathname */
51887+
51888+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51889+ len += strlen(tmp->procname);
51890+ len++;
51891+ depth++;
51892+ }
51893+
51894+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
51895+ /* deny */
51896+ goto out;
51897+ }
51898+
51899+ memset(path, 0, PAGE_SIZE);
51900+
51901+ memcpy(path, proc_sys, strlen(proc_sys));
51902+
51903+ pos += strlen(proc_sys);
51904+
51905+ for (; depth > 0; depth--) {
51906+ path[pos] = '/';
51907+ pos++;
51908+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51909+ if (depth == i) {
51910+ memcpy(path + pos, tmp->procname,
51911+ strlen(tmp->procname));
51912+ pos += strlen(tmp->procname);
51913+ }
51914+ i++;
51915+ }
51916+ }
51917+
51918+ obj = gr_lookup_by_name(path, pos);
51919+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
51920+
51921+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
51922+ ((err & mode) != mode))) {
51923+ __u32 new_mode = mode;
51924+
51925+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
51926+
51927+ err = 0;
51928+ gr_log_learn_sysctl(path, new_mode);
51929+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
51930+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
51931+ err = -ENOENT;
51932+ } else if (!(err & GR_FIND)) {
51933+ err = -ENOENT;
51934+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
51935+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
51936+ path, (mode & GR_READ) ? " reading" : "",
51937+ (mode & GR_WRITE) ? " writing" : "");
51938+ err = -EACCES;
51939+ } else if ((err & mode) != mode) {
51940+ err = -EACCES;
51941+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
51942+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
51943+ path, (mode & GR_READ) ? " reading" : "",
51944+ (mode & GR_WRITE) ? " writing" : "");
51945+ err = 0;
51946+ } else
51947+ err = 0;
51948+
51949+ out:
51950+ preempt_enable();
51951+
51952+ return err;
51953+}
51954+#endif
51955+
51956+int
51957+gr_handle_proc_ptrace(struct task_struct *task)
51958+{
51959+ struct file *filp;
51960+ struct task_struct *tmp = task;
51961+ struct task_struct *curtemp = current;
51962+ __u32 retmode;
51963+
51964+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
51965+ if (unlikely(!(gr_status & GR_READY)))
51966+ return 0;
51967+#endif
51968+
51969+ read_lock(&tasklist_lock);
51970+ read_lock(&grsec_exec_file_lock);
51971+ filp = task->exec_file;
51972+
51973+ while (tmp->pid > 0) {
51974+ if (tmp == curtemp)
51975+ break;
51976+ tmp = tmp->real_parent;
51977+ }
51978+
51979+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
51980+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
51981+ read_unlock(&grsec_exec_file_lock);
51982+ read_unlock(&tasklist_lock);
51983+ return 1;
51984+ }
51985+
51986+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51987+ if (!(gr_status & GR_READY)) {
51988+ read_unlock(&grsec_exec_file_lock);
51989+ read_unlock(&tasklist_lock);
51990+ return 0;
51991+ }
51992+#endif
51993+
51994+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
51995+ read_unlock(&grsec_exec_file_lock);
51996+ read_unlock(&tasklist_lock);
51997+
51998+ if (retmode & GR_NOPTRACE)
51999+ return 1;
52000+
52001+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
52002+ && (current->acl != task->acl || (current->acl != current->role->root_label
52003+ && current->pid != task->pid)))
52004+ return 1;
52005+
52006+ return 0;
52007+}
52008+
52009+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
52010+{
52011+ if (unlikely(!(gr_status & GR_READY)))
52012+ return;
52013+
52014+ if (!(current->role->roletype & GR_ROLE_GOD))
52015+ return;
52016+
52017+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
52018+ p->role->rolename, gr_task_roletype_to_char(p),
52019+ p->acl->filename);
52020+}
52021+
52022+int
52023+gr_handle_ptrace(struct task_struct *task, const long request)
52024+{
52025+ struct task_struct *tmp = task;
52026+ struct task_struct *curtemp = current;
52027+ __u32 retmode;
52028+
52029+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
52030+ if (unlikely(!(gr_status & GR_READY)))
52031+ return 0;
52032+#endif
52033+
52034+ read_lock(&tasklist_lock);
52035+ while (tmp->pid > 0) {
52036+ if (tmp == curtemp)
52037+ break;
52038+ tmp = tmp->real_parent;
52039+ }
52040+
52041+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
52042+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
52043+ read_unlock(&tasklist_lock);
52044+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52045+ return 1;
52046+ }
52047+ read_unlock(&tasklist_lock);
52048+
52049+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52050+ if (!(gr_status & GR_READY))
52051+ return 0;
52052+#endif
52053+
52054+ read_lock(&grsec_exec_file_lock);
52055+ if (unlikely(!task->exec_file)) {
52056+ read_unlock(&grsec_exec_file_lock);
52057+ return 0;
52058+ }
52059+
52060+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
52061+ read_unlock(&grsec_exec_file_lock);
52062+
52063+ if (retmode & GR_NOPTRACE) {
52064+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52065+ return 1;
52066+ }
52067+
52068+ if (retmode & GR_PTRACERD) {
52069+ switch (request) {
52070+ case PTRACE_SEIZE:
52071+ case PTRACE_POKETEXT:
52072+ case PTRACE_POKEDATA:
52073+ case PTRACE_POKEUSR:
52074+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
52075+ case PTRACE_SETREGS:
52076+ case PTRACE_SETFPREGS:
52077+#endif
52078+#ifdef CONFIG_X86
52079+ case PTRACE_SETFPXREGS:
52080+#endif
52081+#ifdef CONFIG_ALTIVEC
52082+ case PTRACE_SETVRREGS:
52083+#endif
52084+ return 1;
52085+ default:
52086+ return 0;
52087+ }
52088+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
52089+ !(current->role->roletype & GR_ROLE_GOD) &&
52090+ (current->acl != task->acl)) {
52091+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52092+ return 1;
52093+ }
52094+
52095+ return 0;
52096+}
52097+
52098+static int is_writable_mmap(const struct file *filp)
52099+{
52100+ struct task_struct *task = current;
52101+ struct acl_object_label *obj, *obj2;
52102+
52103+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
52104+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
52105+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52106+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
52107+ task->role->root_label);
52108+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
52109+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
52110+ return 1;
52111+ }
52112+ }
52113+ return 0;
52114+}
52115+
52116+int
52117+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
52118+{
52119+ __u32 mode;
52120+
52121+ if (unlikely(!file || !(prot & PROT_EXEC)))
52122+ return 1;
52123+
52124+ if (is_writable_mmap(file))
52125+ return 0;
52126+
52127+ mode =
52128+ gr_search_file(file->f_path.dentry,
52129+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52130+ file->f_path.mnt);
52131+
52132+ if (!gr_tpe_allow(file))
52133+ return 0;
52134+
52135+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52136+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52137+ return 0;
52138+ } else if (unlikely(!(mode & GR_EXEC))) {
52139+ return 0;
52140+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52141+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52142+ return 1;
52143+ }
52144+
52145+ return 1;
52146+}
52147+
52148+int
52149+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
52150+{
52151+ __u32 mode;
52152+
52153+ if (unlikely(!file || !(prot & PROT_EXEC)))
52154+ return 1;
52155+
52156+ if (is_writable_mmap(file))
52157+ return 0;
52158+
52159+ mode =
52160+ gr_search_file(file->f_path.dentry,
52161+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52162+ file->f_path.mnt);
52163+
52164+ if (!gr_tpe_allow(file))
52165+ return 0;
52166+
52167+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52168+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52169+ return 0;
52170+ } else if (unlikely(!(mode & GR_EXEC))) {
52171+ return 0;
52172+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52173+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52174+ return 1;
52175+ }
52176+
52177+ return 1;
52178+}
52179+
52180+void
52181+gr_acl_handle_psacct(struct task_struct *task, const long code)
52182+{
52183+ unsigned long runtime;
52184+ unsigned long cputime;
52185+ unsigned int wday, cday;
52186+ __u8 whr, chr;
52187+ __u8 wmin, cmin;
52188+ __u8 wsec, csec;
52189+ struct timespec timeval;
52190+
52191+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
52192+ !(task->acl->mode & GR_PROCACCT)))
52193+ return;
52194+
52195+ do_posix_clock_monotonic_gettime(&timeval);
52196+ runtime = timeval.tv_sec - task->start_time.tv_sec;
52197+ wday = runtime / (3600 * 24);
52198+ runtime -= wday * (3600 * 24);
52199+ whr = runtime / 3600;
52200+ runtime -= whr * 3600;
52201+ wmin = runtime / 60;
52202+ runtime -= wmin * 60;
52203+ wsec = runtime;
52204+
52205+ cputime = (task->utime + task->stime) / HZ;
52206+ cday = cputime / (3600 * 24);
52207+ cputime -= cday * (3600 * 24);
52208+ chr = cputime / 3600;
52209+ cputime -= chr * 3600;
52210+ cmin = cputime / 60;
52211+ cputime -= cmin * 60;
52212+ csec = cputime;
52213+
52214+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
52215+
52216+ return;
52217+}
52218+
52219+void gr_set_kernel_label(struct task_struct *task)
52220+{
52221+ if (gr_status & GR_READY) {
52222+ task->role = kernel_role;
52223+ task->acl = kernel_role->root_label;
52224+ }
52225+ return;
52226+}
52227+
52228+#ifdef CONFIG_TASKSTATS
52229+int gr_is_taskstats_denied(int pid)
52230+{
52231+ struct task_struct *task;
52232+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52233+ const struct cred *cred;
52234+#endif
52235+ int ret = 0;
52236+
52237+ /* restrict taskstats viewing to un-chrooted root users
52238+ who have the 'view' subject flag if the RBAC system is enabled
52239+ */
52240+
52241+ rcu_read_lock();
52242+ read_lock(&tasklist_lock);
52243+ task = find_task_by_vpid(pid);
52244+ if (task) {
52245+#ifdef CONFIG_GRKERNSEC_CHROOT
52246+ if (proc_is_chrooted(task))
52247+ ret = -EACCES;
52248+#endif
52249+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52250+ cred = __task_cred(task);
52251+#ifdef CONFIG_GRKERNSEC_PROC_USER
52252+ if (cred->uid != 0)
52253+ ret = -EACCES;
52254+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52255+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
52256+ ret = -EACCES;
52257+#endif
52258+#endif
52259+ if (gr_status & GR_READY) {
52260+ if (!(task->acl->mode & GR_VIEW))
52261+ ret = -EACCES;
52262+ }
52263+ } else
52264+ ret = -ENOENT;
52265+
52266+ read_unlock(&tasklist_lock);
52267+ rcu_read_unlock();
52268+
52269+ return ret;
52270+}
52271+#endif
52272+
52273+/* AUXV entries are filled via a descendant of search_binary_handler
52274+ after we've already applied the subject for the target
52275+*/
52276+int gr_acl_enable_at_secure(void)
52277+{
52278+ if (unlikely(!(gr_status & GR_READY)))
52279+ return 0;
52280+
52281+ if (current->acl->mode & GR_ATSECURE)
52282+ return 1;
52283+
52284+ return 0;
52285+}
52286+
52287+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
52288+{
52289+ struct task_struct *task = current;
52290+ struct dentry *dentry = file->f_path.dentry;
52291+ struct vfsmount *mnt = file->f_path.mnt;
52292+ struct acl_object_label *obj, *tmp;
52293+ struct acl_subject_label *subj;
52294+ unsigned int bufsize;
52295+ int is_not_root;
52296+ char *path;
52297+ dev_t dev = __get_dev(dentry);
52298+
52299+ if (unlikely(!(gr_status & GR_READY)))
52300+ return 1;
52301+
52302+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52303+ return 1;
52304+
52305+ /* ignore Eric Biederman */
52306+ if (IS_PRIVATE(dentry->d_inode))
52307+ return 1;
52308+
52309+ subj = task->acl;
52310+ do {
52311+ obj = lookup_acl_obj_label(ino, dev, subj);
52312+ if (obj != NULL)
52313+ return (obj->mode & GR_FIND) ? 1 : 0;
52314+ } while ((subj = subj->parent_subject));
52315+
52316+ /* this is purely an optimization since we're looking for an object
52317+ for the directory we're doing a readdir on
52318+ if it's possible for any globbed object to match the entry we're
52319+ filling into the directory, then the object we find here will be
52320+ an anchor point with attached globbed objects
52321+ */
52322+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
52323+ if (obj->globbed == NULL)
52324+ return (obj->mode & GR_FIND) ? 1 : 0;
52325+
52326+ is_not_root = ((obj->filename[0] == '/') &&
52327+ (obj->filename[1] == '\0')) ? 0 : 1;
52328+ bufsize = PAGE_SIZE - namelen - is_not_root;
52329+
52330+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
52331+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
52332+ return 1;
52333+
52334+ preempt_disable();
52335+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
52336+ bufsize);
52337+
52338+ bufsize = strlen(path);
52339+
52340+ /* if base is "/", don't append an additional slash */
52341+ if (is_not_root)
52342+ *(path + bufsize) = '/';
52343+ memcpy(path + bufsize + is_not_root, name, namelen);
52344+ *(path + bufsize + namelen + is_not_root) = '\0';
52345+
52346+ tmp = obj->globbed;
52347+ while (tmp) {
52348+ if (!glob_match(tmp->filename, path)) {
52349+ preempt_enable();
52350+ return (tmp->mode & GR_FIND) ? 1 : 0;
52351+ }
52352+ tmp = tmp->next;
52353+ }
52354+ preempt_enable();
52355+ return (obj->mode & GR_FIND) ? 1 : 0;
52356+}
52357+
52358+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
52359+EXPORT_SYMBOL(gr_acl_is_enabled);
52360+#endif
52361+EXPORT_SYMBOL(gr_learn_resource);
52362+EXPORT_SYMBOL(gr_set_kernel_label);
52363+#ifdef CONFIG_SECURITY
52364+EXPORT_SYMBOL(gr_check_user_change);
52365+EXPORT_SYMBOL(gr_check_group_change);
52366+#endif
52367+
52368diff -urNp linux-3.1.4/grsecurity/gracl_cap.c linux-3.1.4/grsecurity/gracl_cap.c
52369--- linux-3.1.4/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
52370+++ linux-3.1.4/grsecurity/gracl_cap.c 2011-11-16 18:40:31.000000000 -0500
52371@@ -0,0 +1,101 @@
52372+#include <linux/kernel.h>
52373+#include <linux/module.h>
52374+#include <linux/sched.h>
52375+#include <linux/gracl.h>
52376+#include <linux/grsecurity.h>
52377+#include <linux/grinternal.h>
52378+
52379+extern const char *captab_log[];
52380+extern int captab_log_entries;
52381+
52382+int
52383+gr_acl_is_capable(const int cap)
52384+{
52385+ struct task_struct *task = current;
52386+ const struct cred *cred = current_cred();
52387+ struct acl_subject_label *curracl;
52388+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52389+ kernel_cap_t cap_audit = __cap_empty_set;
52390+
52391+ if (!gr_acl_is_enabled())
52392+ return 1;
52393+
52394+ curracl = task->acl;
52395+
52396+ cap_drop = curracl->cap_lower;
52397+ cap_mask = curracl->cap_mask;
52398+ cap_audit = curracl->cap_invert_audit;
52399+
52400+ while ((curracl = curracl->parent_subject)) {
52401+ /* if the cap isn't specified in the current computed mask but is specified in the
52402+ current level subject, and is lowered in the current level subject, then add
52403+ it to the set of dropped capabilities
52404+ otherwise, add the current level subject's mask to the current computed mask
52405+ */
52406+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52407+ cap_raise(cap_mask, cap);
52408+ if (cap_raised(curracl->cap_lower, cap))
52409+ cap_raise(cap_drop, cap);
52410+ if (cap_raised(curracl->cap_invert_audit, cap))
52411+ cap_raise(cap_audit, cap);
52412+ }
52413+ }
52414+
52415+ if (!cap_raised(cap_drop, cap)) {
52416+ if (cap_raised(cap_audit, cap))
52417+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
52418+ return 1;
52419+ }
52420+
52421+ curracl = task->acl;
52422+
52423+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
52424+ && cap_raised(cred->cap_effective, cap)) {
52425+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
52426+ task->role->roletype, cred->uid,
52427+ cred->gid, task->exec_file ?
52428+ gr_to_filename(task->exec_file->f_path.dentry,
52429+ task->exec_file->f_path.mnt) : curracl->filename,
52430+ curracl->filename, 0UL,
52431+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
52432+ return 1;
52433+ }
52434+
52435+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
52436+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
52437+ return 0;
52438+}
52439+
52440+int
52441+gr_acl_is_capable_nolog(const int cap)
52442+{
52443+ struct acl_subject_label *curracl;
52444+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52445+
52446+ if (!gr_acl_is_enabled())
52447+ return 1;
52448+
52449+ curracl = current->acl;
52450+
52451+ cap_drop = curracl->cap_lower;
52452+ cap_mask = curracl->cap_mask;
52453+
52454+ while ((curracl = curracl->parent_subject)) {
52455+ /* if the cap isn't specified in the current computed mask but is specified in the
52456+ current level subject, and is lowered in the current level subject, then add
52457+ it to the set of dropped capabilities
52458+ otherwise, add the current level subject's mask to the current computed mask
52459+ */
52460+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52461+ cap_raise(cap_mask, cap);
52462+ if (cap_raised(curracl->cap_lower, cap))
52463+ cap_raise(cap_drop, cap);
52464+ }
52465+ }
52466+
52467+ if (!cap_raised(cap_drop, cap))
52468+ return 1;
52469+
52470+ return 0;
52471+}
52472+
52473diff -urNp linux-3.1.4/grsecurity/gracl_fs.c linux-3.1.4/grsecurity/gracl_fs.c
52474--- linux-3.1.4/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
52475+++ linux-3.1.4/grsecurity/gracl_fs.c 2011-11-17 00:25:32.000000000 -0500
52476@@ -0,0 +1,433 @@
52477+#include <linux/kernel.h>
52478+#include <linux/sched.h>
52479+#include <linux/types.h>
52480+#include <linux/fs.h>
52481+#include <linux/file.h>
52482+#include <linux/stat.h>
52483+#include <linux/grsecurity.h>
52484+#include <linux/grinternal.h>
52485+#include <linux/gracl.h>
52486+
52487+__u32
52488+gr_acl_handle_hidden_file(const struct dentry * dentry,
52489+ const struct vfsmount * mnt)
52490+{
52491+ __u32 mode;
52492+
52493+ if (unlikely(!dentry->d_inode))
52494+ return GR_FIND;
52495+
52496+ mode =
52497+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
52498+
52499+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
52500+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52501+ return mode;
52502+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
52503+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52504+ return 0;
52505+ } else if (unlikely(!(mode & GR_FIND)))
52506+ return 0;
52507+
52508+ return GR_FIND;
52509+}
52510+
52511+__u32
52512+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
52513+ int acc_mode)
52514+{
52515+ __u32 reqmode = GR_FIND;
52516+ __u32 mode;
52517+
52518+ if (unlikely(!dentry->d_inode))
52519+ return reqmode;
52520+
52521+ if (acc_mode & MAY_APPEND)
52522+ reqmode |= GR_APPEND;
52523+ else if (acc_mode & MAY_WRITE)
52524+ reqmode |= GR_WRITE;
52525+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
52526+ reqmode |= GR_READ;
52527+
52528+ mode =
52529+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52530+ mnt);
52531+
52532+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52533+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52534+ reqmode & GR_READ ? " reading" : "",
52535+ reqmode & GR_WRITE ? " writing" : reqmode &
52536+ GR_APPEND ? " appending" : "");
52537+ return reqmode;
52538+ } else
52539+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52540+ {
52541+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52542+ reqmode & GR_READ ? " reading" : "",
52543+ reqmode & GR_WRITE ? " writing" : reqmode &
52544+ GR_APPEND ? " appending" : "");
52545+ return 0;
52546+ } else if (unlikely((mode & reqmode) != reqmode))
52547+ return 0;
52548+
52549+ return reqmode;
52550+}
52551+
52552+__u32
52553+gr_acl_handle_creat(const struct dentry * dentry,
52554+ const struct dentry * p_dentry,
52555+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
52556+ const int imode)
52557+{
52558+ __u32 reqmode = GR_WRITE | GR_CREATE;
52559+ __u32 mode;
52560+
52561+ if (acc_mode & MAY_APPEND)
52562+ reqmode |= GR_APPEND;
52563+ // if a directory was required or the directory already exists, then
52564+ // don't count this open as a read
52565+ if ((acc_mode & MAY_READ) &&
52566+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
52567+ reqmode |= GR_READ;
52568+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
52569+ reqmode |= GR_SETID;
52570+
52571+ mode =
52572+ gr_check_create(dentry, p_dentry, p_mnt,
52573+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
52574+
52575+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52576+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52577+ reqmode & GR_READ ? " reading" : "",
52578+ reqmode & GR_WRITE ? " writing" : reqmode &
52579+ GR_APPEND ? " appending" : "");
52580+ return reqmode;
52581+ } else
52582+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52583+ {
52584+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52585+ reqmode & GR_READ ? " reading" : "",
52586+ reqmode & GR_WRITE ? " writing" : reqmode &
52587+ GR_APPEND ? " appending" : "");
52588+ return 0;
52589+ } else if (unlikely((mode & reqmode) != reqmode))
52590+ return 0;
52591+
52592+ return reqmode;
52593+}
52594+
52595+__u32
52596+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
52597+ const int fmode)
52598+{
52599+ __u32 mode, reqmode = GR_FIND;
52600+
52601+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
52602+ reqmode |= GR_EXEC;
52603+ if (fmode & S_IWOTH)
52604+ reqmode |= GR_WRITE;
52605+ if (fmode & S_IROTH)
52606+ reqmode |= GR_READ;
52607+
52608+ mode =
52609+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52610+ mnt);
52611+
52612+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52613+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
52614+ reqmode & GR_READ ? " reading" : "",
52615+ reqmode & GR_WRITE ? " writing" : "",
52616+ reqmode & GR_EXEC ? " executing" : "");
52617+ return reqmode;
52618+ } else
52619+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52620+ {
52621+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
52622+ reqmode & GR_READ ? " reading" : "",
52623+ reqmode & GR_WRITE ? " writing" : "",
52624+ reqmode & GR_EXEC ? " executing" : "");
52625+ return 0;
52626+ } else if (unlikely((mode & reqmode) != reqmode))
52627+ return 0;
52628+
52629+ return reqmode;
52630+}
52631+
52632+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
52633+{
52634+ __u32 mode;
52635+
52636+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
52637+
52638+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
52639+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
52640+ return mode;
52641+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
52642+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
52643+ return 0;
52644+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
52645+ return 0;
52646+
52647+ return (reqmode);
52648+}
52649+
52650+__u32
52651+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
52652+{
52653+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
52654+}
52655+
52656+__u32
52657+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
52658+{
52659+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
52660+}
52661+
52662+__u32
52663+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
52664+{
52665+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
52666+}
52667+
52668+__u32
52669+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
52670+{
52671+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
52672+}
52673+
52674+__u32
52675+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
52676+ mode_t mode)
52677+{
52678+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
52679+ return 1;
52680+
52681+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
52682+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
52683+ GR_FCHMOD_ACL_MSG);
52684+ } else {
52685+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
52686+ }
52687+}
52688+
52689+__u32
52690+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
52691+ mode_t mode)
52692+{
52693+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
52694+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
52695+ GR_CHMOD_ACL_MSG);
52696+ } else {
52697+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
52698+ }
52699+}
52700+
52701+__u32
52702+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
52703+{
52704+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
52705+}
52706+
52707+__u32
52708+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
52709+{
52710+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
52711+}
52712+
52713+__u32
52714+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
52715+{
52716+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
52717+}
52718+
52719+__u32
52720+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
52721+{
52722+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
52723+ GR_UNIXCONNECT_ACL_MSG);
52724+}
52725+
52726+/* hardlinks require at minimum create and link permission,
52727+ any additional privilege required is based on the
52728+ privilege of the file being linked to
52729+*/
52730+__u32
52731+gr_acl_handle_link(const struct dentry * new_dentry,
52732+ const struct dentry * parent_dentry,
52733+ const struct vfsmount * parent_mnt,
52734+ const struct dentry * old_dentry,
52735+ const struct vfsmount * old_mnt, const char *to)
52736+{
52737+ __u32 mode;
52738+ __u32 needmode = GR_CREATE | GR_LINK;
52739+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
52740+
52741+ mode =
52742+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
52743+ old_mnt);
52744+
52745+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
52746+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
52747+ return mode;
52748+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
52749+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
52750+ return 0;
52751+ } else if (unlikely((mode & needmode) != needmode))
52752+ return 0;
52753+
52754+ return 1;
52755+}
52756+
52757+__u32
52758+gr_acl_handle_symlink(const struct dentry * new_dentry,
52759+ const struct dentry * parent_dentry,
52760+ const struct vfsmount * parent_mnt, const char *from)
52761+{
52762+ __u32 needmode = GR_WRITE | GR_CREATE;
52763+ __u32 mode;
52764+
52765+ mode =
52766+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
52767+ GR_CREATE | GR_AUDIT_CREATE |
52768+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
52769+
52770+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
52771+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
52772+ return mode;
52773+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
52774+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
52775+ return 0;
52776+ } else if (unlikely((mode & needmode) != needmode))
52777+ return 0;
52778+
52779+ return (GR_WRITE | GR_CREATE);
52780+}
52781+
52782+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
52783+{
52784+ __u32 mode;
52785+
52786+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
52787+
52788+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
52789+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
52790+ return mode;
52791+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
52792+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
52793+ return 0;
52794+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
52795+ return 0;
52796+
52797+ return (reqmode);
52798+}
52799+
52800+__u32
52801+gr_acl_handle_mknod(const struct dentry * new_dentry,
52802+ const struct dentry * parent_dentry,
52803+ const struct vfsmount * parent_mnt,
52804+ const int mode)
52805+{
52806+ __u32 reqmode = GR_WRITE | GR_CREATE;
52807+ if (unlikely(mode & (S_ISUID | S_ISGID)))
52808+ reqmode |= GR_SETID;
52809+
52810+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
52811+ reqmode, GR_MKNOD_ACL_MSG);
52812+}
52813+
52814+__u32
52815+gr_acl_handle_mkdir(const struct dentry *new_dentry,
52816+ const struct dentry *parent_dentry,
52817+ const struct vfsmount *parent_mnt)
52818+{
52819+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
52820+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
52821+}
52822+
52823+#define RENAME_CHECK_SUCCESS(old, new) \
52824+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
52825+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
52826+
52827+int
52828+gr_acl_handle_rename(struct dentry *new_dentry,
52829+ struct dentry *parent_dentry,
52830+ const struct vfsmount *parent_mnt,
52831+ struct dentry *old_dentry,
52832+ struct inode *old_parent_inode,
52833+ struct vfsmount *old_mnt, const char *newname)
52834+{
52835+ __u32 comp1, comp2;
52836+ int error = 0;
52837+
52838+ if (unlikely(!gr_acl_is_enabled()))
52839+ return 0;
52840+
52841+ if (!new_dentry->d_inode) {
52842+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
52843+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
52844+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
52845+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
52846+ GR_DELETE | GR_AUDIT_DELETE |
52847+ GR_AUDIT_READ | GR_AUDIT_WRITE |
52848+ GR_SUPPRESS, old_mnt);
52849+ } else {
52850+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
52851+ GR_CREATE | GR_DELETE |
52852+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
52853+ GR_AUDIT_READ | GR_AUDIT_WRITE |
52854+ GR_SUPPRESS, parent_mnt);
52855+ comp2 =
52856+ gr_search_file(old_dentry,
52857+ GR_READ | GR_WRITE | GR_AUDIT_READ |
52858+ GR_DELETE | GR_AUDIT_DELETE |
52859+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
52860+ }
52861+
52862+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
52863+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
52864+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
52865+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
52866+ && !(comp2 & GR_SUPPRESS)) {
52867+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
52868+ error = -EACCES;
52869+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
52870+ error = -EACCES;
52871+
52872+ return error;
52873+}
52874+
52875+void
52876+gr_acl_handle_exit(void)
52877+{
52878+ u16 id;
52879+ char *rolename;
52880+ struct file *exec_file;
52881+
52882+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
52883+ !(current->role->roletype & GR_ROLE_PERSIST))) {
52884+ id = current->acl_role_id;
52885+ rolename = current->role->rolename;
52886+ gr_set_acls(1);
52887+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
52888+ }
52889+
52890+ write_lock(&grsec_exec_file_lock);
52891+ exec_file = current->exec_file;
52892+ current->exec_file = NULL;
52893+ write_unlock(&grsec_exec_file_lock);
52894+
52895+ if (exec_file)
52896+ fput(exec_file);
52897+}
52898+
52899+int
52900+gr_acl_handle_procpidmem(const struct task_struct *task)
52901+{
52902+ if (unlikely(!gr_acl_is_enabled()))
52903+ return 0;
52904+
52905+ if (task != current && task->acl->mode & GR_PROTPROCFD)
52906+ return -EACCES;
52907+
52908+ return 0;
52909+}
52910diff -urNp linux-3.1.4/grsecurity/gracl_ip.c linux-3.1.4/grsecurity/gracl_ip.c
52911--- linux-3.1.4/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
52912+++ linux-3.1.4/grsecurity/gracl_ip.c 2011-11-16 18:40:31.000000000 -0500
52913@@ -0,0 +1,381 @@
52914+#include <linux/kernel.h>
52915+#include <asm/uaccess.h>
52916+#include <asm/errno.h>
52917+#include <net/sock.h>
52918+#include <linux/file.h>
52919+#include <linux/fs.h>
52920+#include <linux/net.h>
52921+#include <linux/in.h>
52922+#include <linux/skbuff.h>
52923+#include <linux/ip.h>
52924+#include <linux/udp.h>
52925+#include <linux/types.h>
52926+#include <linux/sched.h>
52927+#include <linux/netdevice.h>
52928+#include <linux/inetdevice.h>
52929+#include <linux/gracl.h>
52930+#include <linux/grsecurity.h>
52931+#include <linux/grinternal.h>
52932+
52933+#define GR_BIND 0x01
52934+#define GR_CONNECT 0x02
52935+#define GR_INVERT 0x04
52936+#define GR_BINDOVERRIDE 0x08
52937+#define GR_CONNECTOVERRIDE 0x10
52938+#define GR_SOCK_FAMILY 0x20
52939+
52940+static const char * gr_protocols[IPPROTO_MAX] = {
52941+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
52942+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
52943+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
52944+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
52945+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
52946+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
52947+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
52948+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
52949+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
52950+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
52951+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
52952+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
52953+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
52954+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
52955+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
52956+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
52957+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
52958+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
52959+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
52960+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
52961+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
52962+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
52963+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
52964+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
52965+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
52966+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
52967+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
52968+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
52969+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
52970+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
52971+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
52972+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
52973+ };
52974+
52975+static const char * gr_socktypes[SOCK_MAX] = {
52976+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
52977+ "unknown:7", "unknown:8", "unknown:9", "packet"
52978+ };
52979+
52980+static const char * gr_sockfamilies[AF_MAX+1] = {
52981+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
52982+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
52983+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
52984+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
52985+ };
52986+
52987+const char *
52988+gr_proto_to_name(unsigned char proto)
52989+{
52990+ return gr_protocols[proto];
52991+}
52992+
52993+const char *
52994+gr_socktype_to_name(unsigned char type)
52995+{
52996+ return gr_socktypes[type];
52997+}
52998+
52999+const char *
53000+gr_sockfamily_to_name(unsigned char family)
53001+{
53002+ return gr_sockfamilies[family];
53003+}
53004+
53005+int
53006+gr_search_socket(const int domain, const int type, const int protocol)
53007+{
53008+ struct acl_subject_label *curr;
53009+ const struct cred *cred = current_cred();
53010+
53011+ if (unlikely(!gr_acl_is_enabled()))
53012+ goto exit;
53013+
53014+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
53015+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
53016+ goto exit; // let the kernel handle it
53017+
53018+ curr = current->acl;
53019+
53020+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
53021+ /* the family is allowed, if this is PF_INET allow it only if
53022+ the extra sock type/protocol checks pass */
53023+ if (domain == PF_INET)
53024+ goto inet_check;
53025+ goto exit;
53026+ } else {
53027+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53028+ __u32 fakeip = 0;
53029+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53030+ current->role->roletype, cred->uid,
53031+ cred->gid, current->exec_file ?
53032+ gr_to_filename(current->exec_file->f_path.dentry,
53033+ current->exec_file->f_path.mnt) :
53034+ curr->filename, curr->filename,
53035+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
53036+ &current->signal->saved_ip);
53037+ goto exit;
53038+ }
53039+ goto exit_fail;
53040+ }
53041+
53042+inet_check:
53043+ /* the rest of this checking is for IPv4 only */
53044+ if (!curr->ips)
53045+ goto exit;
53046+
53047+ if ((curr->ip_type & (1 << type)) &&
53048+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
53049+ goto exit;
53050+
53051+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53052+ /* we don't place acls on raw sockets , and sometimes
53053+ dgram/ip sockets are opened for ioctl and not
53054+ bind/connect, so we'll fake a bind learn log */
53055+ if (type == SOCK_RAW || type == SOCK_PACKET) {
53056+ __u32 fakeip = 0;
53057+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53058+ current->role->roletype, cred->uid,
53059+ cred->gid, current->exec_file ?
53060+ gr_to_filename(current->exec_file->f_path.dentry,
53061+ current->exec_file->f_path.mnt) :
53062+ curr->filename, curr->filename,
53063+ &fakeip, 0, type,
53064+ protocol, GR_CONNECT, &current->signal->saved_ip);
53065+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
53066+ __u32 fakeip = 0;
53067+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53068+ current->role->roletype, cred->uid,
53069+ cred->gid, current->exec_file ?
53070+ gr_to_filename(current->exec_file->f_path.dentry,
53071+ current->exec_file->f_path.mnt) :
53072+ curr->filename, curr->filename,
53073+ &fakeip, 0, type,
53074+ protocol, GR_BIND, &current->signal->saved_ip);
53075+ }
53076+ /* we'll log when they use connect or bind */
53077+ goto exit;
53078+ }
53079+
53080+exit_fail:
53081+ if (domain == PF_INET)
53082+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
53083+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
53084+ else
53085+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
53086+ gr_socktype_to_name(type), protocol);
53087+
53088+ return 0;
53089+exit:
53090+ return 1;
53091+}
53092+
53093+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
53094+{
53095+ if ((ip->mode & mode) &&
53096+ (ip_port >= ip->low) &&
53097+ (ip_port <= ip->high) &&
53098+ ((ntohl(ip_addr) & our_netmask) ==
53099+ (ntohl(our_addr) & our_netmask))
53100+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
53101+ && (ip->type & (1 << type))) {
53102+ if (ip->mode & GR_INVERT)
53103+ return 2; // specifically denied
53104+ else
53105+ return 1; // allowed
53106+ }
53107+
53108+ return 0; // not specifically allowed, may continue parsing
53109+}
53110+
53111+static int
53112+gr_search_connectbind(const int full_mode, struct sock *sk,
53113+ struct sockaddr_in *addr, const int type)
53114+{
53115+ char iface[IFNAMSIZ] = {0};
53116+ struct acl_subject_label *curr;
53117+ struct acl_ip_label *ip;
53118+ struct inet_sock *isk;
53119+ struct net_device *dev;
53120+ struct in_device *idev;
53121+ unsigned long i;
53122+ int ret;
53123+ int mode = full_mode & (GR_BIND | GR_CONNECT);
53124+ __u32 ip_addr = 0;
53125+ __u32 our_addr;
53126+ __u32 our_netmask;
53127+ char *p;
53128+ __u16 ip_port = 0;
53129+ const struct cred *cred = current_cred();
53130+
53131+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
53132+ return 0;
53133+
53134+ curr = current->acl;
53135+ isk = inet_sk(sk);
53136+
53137+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
53138+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
53139+ addr->sin_addr.s_addr = curr->inaddr_any_override;
53140+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
53141+ struct sockaddr_in saddr;
53142+ int err;
53143+
53144+ saddr.sin_family = AF_INET;
53145+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
53146+ saddr.sin_port = isk->inet_sport;
53147+
53148+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53149+ if (err)
53150+ return err;
53151+
53152+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53153+ if (err)
53154+ return err;
53155+ }
53156+
53157+ if (!curr->ips)
53158+ return 0;
53159+
53160+ ip_addr = addr->sin_addr.s_addr;
53161+ ip_port = ntohs(addr->sin_port);
53162+
53163+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53164+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53165+ current->role->roletype, cred->uid,
53166+ cred->gid, current->exec_file ?
53167+ gr_to_filename(current->exec_file->f_path.dentry,
53168+ current->exec_file->f_path.mnt) :
53169+ curr->filename, curr->filename,
53170+ &ip_addr, ip_port, type,
53171+ sk->sk_protocol, mode, &current->signal->saved_ip);
53172+ return 0;
53173+ }
53174+
53175+ for (i = 0; i < curr->ip_num; i++) {
53176+ ip = *(curr->ips + i);
53177+ if (ip->iface != NULL) {
53178+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
53179+ p = strchr(iface, ':');
53180+ if (p != NULL)
53181+ *p = '\0';
53182+ dev = dev_get_by_name(sock_net(sk), iface);
53183+ if (dev == NULL)
53184+ continue;
53185+ idev = in_dev_get(dev);
53186+ if (idev == NULL) {
53187+ dev_put(dev);
53188+ continue;
53189+ }
53190+ rcu_read_lock();
53191+ for_ifa(idev) {
53192+ if (!strcmp(ip->iface, ifa->ifa_label)) {
53193+ our_addr = ifa->ifa_address;
53194+ our_netmask = 0xffffffff;
53195+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53196+ if (ret == 1) {
53197+ rcu_read_unlock();
53198+ in_dev_put(idev);
53199+ dev_put(dev);
53200+ return 0;
53201+ } else if (ret == 2) {
53202+ rcu_read_unlock();
53203+ in_dev_put(idev);
53204+ dev_put(dev);
53205+ goto denied;
53206+ }
53207+ }
53208+ } endfor_ifa(idev);
53209+ rcu_read_unlock();
53210+ in_dev_put(idev);
53211+ dev_put(dev);
53212+ } else {
53213+ our_addr = ip->addr;
53214+ our_netmask = ip->netmask;
53215+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53216+ if (ret == 1)
53217+ return 0;
53218+ else if (ret == 2)
53219+ goto denied;
53220+ }
53221+ }
53222+
53223+denied:
53224+ if (mode == GR_BIND)
53225+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53226+ else if (mode == GR_CONNECT)
53227+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53228+
53229+ return -EACCES;
53230+}
53231+
53232+int
53233+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
53234+{
53235+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
53236+}
53237+
53238+int
53239+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
53240+{
53241+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
53242+}
53243+
53244+int gr_search_listen(struct socket *sock)
53245+{
53246+ struct sock *sk = sock->sk;
53247+ struct sockaddr_in addr;
53248+
53249+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53250+ addr.sin_port = inet_sk(sk)->inet_sport;
53251+
53252+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53253+}
53254+
53255+int gr_search_accept(struct socket *sock)
53256+{
53257+ struct sock *sk = sock->sk;
53258+ struct sockaddr_in addr;
53259+
53260+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53261+ addr.sin_port = inet_sk(sk)->inet_sport;
53262+
53263+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53264+}
53265+
53266+int
53267+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
53268+{
53269+ if (addr)
53270+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
53271+ else {
53272+ struct sockaddr_in sin;
53273+ const struct inet_sock *inet = inet_sk(sk);
53274+
53275+ sin.sin_addr.s_addr = inet->inet_daddr;
53276+ sin.sin_port = inet->inet_dport;
53277+
53278+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53279+ }
53280+}
53281+
53282+int
53283+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
53284+{
53285+ struct sockaddr_in sin;
53286+
53287+ if (unlikely(skb->len < sizeof (struct udphdr)))
53288+ return 0; // skip this packet
53289+
53290+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
53291+ sin.sin_port = udp_hdr(skb)->source;
53292+
53293+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53294+}
53295diff -urNp linux-3.1.4/grsecurity/gracl_learn.c linux-3.1.4/grsecurity/gracl_learn.c
53296--- linux-3.1.4/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
53297+++ linux-3.1.4/grsecurity/gracl_learn.c 2011-11-16 18:40:31.000000000 -0500
53298@@ -0,0 +1,207 @@
53299+#include <linux/kernel.h>
53300+#include <linux/mm.h>
53301+#include <linux/sched.h>
53302+#include <linux/poll.h>
53303+#include <linux/string.h>
53304+#include <linux/file.h>
53305+#include <linux/types.h>
53306+#include <linux/vmalloc.h>
53307+#include <linux/grinternal.h>
53308+
53309+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
53310+ size_t count, loff_t *ppos);
53311+extern int gr_acl_is_enabled(void);
53312+
53313+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
53314+static int gr_learn_attached;
53315+
53316+/* use a 512k buffer */
53317+#define LEARN_BUFFER_SIZE (512 * 1024)
53318+
53319+static DEFINE_SPINLOCK(gr_learn_lock);
53320+static DEFINE_MUTEX(gr_learn_user_mutex);
53321+
53322+/* we need to maintain two buffers, so that the kernel context of grlearn
53323+ uses a semaphore around the userspace copying, and the other kernel contexts
53324+ use a spinlock when copying into the buffer, since they cannot sleep
53325+*/
53326+static char *learn_buffer;
53327+static char *learn_buffer_user;
53328+static int learn_buffer_len;
53329+static int learn_buffer_user_len;
53330+
53331+static ssize_t
53332+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
53333+{
53334+ DECLARE_WAITQUEUE(wait, current);
53335+ ssize_t retval = 0;
53336+
53337+ add_wait_queue(&learn_wait, &wait);
53338+ set_current_state(TASK_INTERRUPTIBLE);
53339+ do {
53340+ mutex_lock(&gr_learn_user_mutex);
53341+ spin_lock(&gr_learn_lock);
53342+ if (learn_buffer_len)
53343+ break;
53344+ spin_unlock(&gr_learn_lock);
53345+ mutex_unlock(&gr_learn_user_mutex);
53346+ if (file->f_flags & O_NONBLOCK) {
53347+ retval = -EAGAIN;
53348+ goto out;
53349+ }
53350+ if (signal_pending(current)) {
53351+ retval = -ERESTARTSYS;
53352+ goto out;
53353+ }
53354+
53355+ schedule();
53356+ } while (1);
53357+
53358+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
53359+ learn_buffer_user_len = learn_buffer_len;
53360+ retval = learn_buffer_len;
53361+ learn_buffer_len = 0;
53362+
53363+ spin_unlock(&gr_learn_lock);
53364+
53365+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
53366+ retval = -EFAULT;
53367+
53368+ mutex_unlock(&gr_learn_user_mutex);
53369+out:
53370+ set_current_state(TASK_RUNNING);
53371+ remove_wait_queue(&learn_wait, &wait);
53372+ return retval;
53373+}
53374+
53375+static unsigned int
53376+poll_learn(struct file * file, poll_table * wait)
53377+{
53378+ poll_wait(file, &learn_wait, wait);
53379+
53380+ if (learn_buffer_len)
53381+ return (POLLIN | POLLRDNORM);
53382+
53383+ return 0;
53384+}
53385+
53386+void
53387+gr_clear_learn_entries(void)
53388+{
53389+ char *tmp;
53390+
53391+ mutex_lock(&gr_learn_user_mutex);
53392+ spin_lock(&gr_learn_lock);
53393+ tmp = learn_buffer;
53394+ learn_buffer = NULL;
53395+ spin_unlock(&gr_learn_lock);
53396+ if (tmp)
53397+ vfree(tmp);
53398+ if (learn_buffer_user != NULL) {
53399+ vfree(learn_buffer_user);
53400+ learn_buffer_user = NULL;
53401+ }
53402+ learn_buffer_len = 0;
53403+ mutex_unlock(&gr_learn_user_mutex);
53404+
53405+ return;
53406+}
53407+
53408+void
53409+gr_add_learn_entry(const char *fmt, ...)
53410+{
53411+ va_list args;
53412+ unsigned int len;
53413+
53414+ if (!gr_learn_attached)
53415+ return;
53416+
53417+ spin_lock(&gr_learn_lock);
53418+
53419+ /* leave a gap at the end so we know when it's "full" but don't have to
53420+ compute the exact length of the string we're trying to append
53421+ */
53422+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
53423+ spin_unlock(&gr_learn_lock);
53424+ wake_up_interruptible(&learn_wait);
53425+ return;
53426+ }
53427+ if (learn_buffer == NULL) {
53428+ spin_unlock(&gr_learn_lock);
53429+ return;
53430+ }
53431+
53432+ va_start(args, fmt);
53433+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
53434+ va_end(args);
53435+
53436+ learn_buffer_len += len + 1;
53437+
53438+ spin_unlock(&gr_learn_lock);
53439+ wake_up_interruptible(&learn_wait);
53440+
53441+ return;
53442+}
53443+
53444+static int
53445+open_learn(struct inode *inode, struct file *file)
53446+{
53447+ if (file->f_mode & FMODE_READ && gr_learn_attached)
53448+ return -EBUSY;
53449+ if (file->f_mode & FMODE_READ) {
53450+ int retval = 0;
53451+ mutex_lock(&gr_learn_user_mutex);
53452+ if (learn_buffer == NULL)
53453+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
53454+ if (learn_buffer_user == NULL)
53455+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
53456+ if (learn_buffer == NULL) {
53457+ retval = -ENOMEM;
53458+ goto out_error;
53459+ }
53460+ if (learn_buffer_user == NULL) {
53461+ retval = -ENOMEM;
53462+ goto out_error;
53463+ }
53464+ learn_buffer_len = 0;
53465+ learn_buffer_user_len = 0;
53466+ gr_learn_attached = 1;
53467+out_error:
53468+ mutex_unlock(&gr_learn_user_mutex);
53469+ return retval;
53470+ }
53471+ return 0;
53472+}
53473+
53474+static int
53475+close_learn(struct inode *inode, struct file *file)
53476+{
53477+ if (file->f_mode & FMODE_READ) {
53478+ char *tmp = NULL;
53479+ mutex_lock(&gr_learn_user_mutex);
53480+ spin_lock(&gr_learn_lock);
53481+ tmp = learn_buffer;
53482+ learn_buffer = NULL;
53483+ spin_unlock(&gr_learn_lock);
53484+ if (tmp)
53485+ vfree(tmp);
53486+ if (learn_buffer_user != NULL) {
53487+ vfree(learn_buffer_user);
53488+ learn_buffer_user = NULL;
53489+ }
53490+ learn_buffer_len = 0;
53491+ learn_buffer_user_len = 0;
53492+ gr_learn_attached = 0;
53493+ mutex_unlock(&gr_learn_user_mutex);
53494+ }
53495+
53496+ return 0;
53497+}
53498+
53499+const struct file_operations grsec_fops = {
53500+ .read = read_learn,
53501+ .write = write_grsec_handler,
53502+ .open = open_learn,
53503+ .release = close_learn,
53504+ .poll = poll_learn,
53505+};
53506diff -urNp linux-3.1.4/grsecurity/gracl_res.c linux-3.1.4/grsecurity/gracl_res.c
53507--- linux-3.1.4/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
53508+++ linux-3.1.4/grsecurity/gracl_res.c 2011-11-16 18:40:31.000000000 -0500
53509@@ -0,0 +1,68 @@
53510+#include <linux/kernel.h>
53511+#include <linux/sched.h>
53512+#include <linux/gracl.h>
53513+#include <linux/grinternal.h>
53514+
53515+static const char *restab_log[] = {
53516+ [RLIMIT_CPU] = "RLIMIT_CPU",
53517+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
53518+ [RLIMIT_DATA] = "RLIMIT_DATA",
53519+ [RLIMIT_STACK] = "RLIMIT_STACK",
53520+ [RLIMIT_CORE] = "RLIMIT_CORE",
53521+ [RLIMIT_RSS] = "RLIMIT_RSS",
53522+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
53523+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
53524+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
53525+ [RLIMIT_AS] = "RLIMIT_AS",
53526+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
53527+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
53528+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
53529+ [RLIMIT_NICE] = "RLIMIT_NICE",
53530+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
53531+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
53532+ [GR_CRASH_RES] = "RLIMIT_CRASH"
53533+};
53534+
53535+void
53536+gr_log_resource(const struct task_struct *task,
53537+ const int res, const unsigned long wanted, const int gt)
53538+{
53539+ const struct cred *cred;
53540+ unsigned long rlim;
53541+
53542+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
53543+ return;
53544+
53545+ // not yet supported resource
53546+ if (unlikely(!restab_log[res]))
53547+ return;
53548+
53549+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
53550+ rlim = task_rlimit_max(task, res);
53551+ else
53552+ rlim = task_rlimit(task, res);
53553+
53554+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
53555+ return;
53556+
53557+ rcu_read_lock();
53558+ cred = __task_cred(task);
53559+
53560+ if (res == RLIMIT_NPROC &&
53561+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
53562+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
53563+ goto out_rcu_unlock;
53564+ else if (res == RLIMIT_MEMLOCK &&
53565+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
53566+ goto out_rcu_unlock;
53567+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
53568+ goto out_rcu_unlock;
53569+ rcu_read_unlock();
53570+
53571+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
53572+
53573+ return;
53574+out_rcu_unlock:
53575+ rcu_read_unlock();
53576+ return;
53577+}
53578diff -urNp linux-3.1.4/grsecurity/gracl_segv.c linux-3.1.4/grsecurity/gracl_segv.c
53579--- linux-3.1.4/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
53580+++ linux-3.1.4/grsecurity/gracl_segv.c 2011-11-16 18:40:31.000000000 -0500
53581@@ -0,0 +1,299 @@
53582+#include <linux/kernel.h>
53583+#include <linux/mm.h>
53584+#include <asm/uaccess.h>
53585+#include <asm/errno.h>
53586+#include <asm/mman.h>
53587+#include <net/sock.h>
53588+#include <linux/file.h>
53589+#include <linux/fs.h>
53590+#include <linux/net.h>
53591+#include <linux/in.h>
53592+#include <linux/slab.h>
53593+#include <linux/types.h>
53594+#include <linux/sched.h>
53595+#include <linux/timer.h>
53596+#include <linux/gracl.h>
53597+#include <linux/grsecurity.h>
53598+#include <linux/grinternal.h>
53599+
53600+static struct crash_uid *uid_set;
53601+static unsigned short uid_used;
53602+static DEFINE_SPINLOCK(gr_uid_lock);
53603+extern rwlock_t gr_inode_lock;
53604+extern struct acl_subject_label *
53605+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
53606+ struct acl_role_label *role);
53607+
53608+#ifdef CONFIG_BTRFS_FS
53609+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
53610+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
53611+#endif
53612+
53613+static inline dev_t __get_dev(const struct dentry *dentry)
53614+{
53615+#ifdef CONFIG_BTRFS_FS
53616+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
53617+ return get_btrfs_dev_from_inode(dentry->d_inode);
53618+ else
53619+#endif
53620+ return dentry->d_inode->i_sb->s_dev;
53621+}
53622+
53623+int
53624+gr_init_uidset(void)
53625+{
53626+ uid_set =
53627+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
53628+ uid_used = 0;
53629+
53630+ return uid_set ? 1 : 0;
53631+}
53632+
53633+void
53634+gr_free_uidset(void)
53635+{
53636+ if (uid_set)
53637+ kfree(uid_set);
53638+
53639+ return;
53640+}
53641+
53642+int
53643+gr_find_uid(const uid_t uid)
53644+{
53645+ struct crash_uid *tmp = uid_set;
53646+ uid_t buid;
53647+ int low = 0, high = uid_used - 1, mid;
53648+
53649+ while (high >= low) {
53650+ mid = (low + high) >> 1;
53651+ buid = tmp[mid].uid;
53652+ if (buid == uid)
53653+ return mid;
53654+ if (buid > uid)
53655+ high = mid - 1;
53656+ if (buid < uid)
53657+ low = mid + 1;
53658+ }
53659+
53660+ return -1;
53661+}
53662+
53663+static __inline__ void
53664+gr_insertsort(void)
53665+{
53666+ unsigned short i, j;
53667+ struct crash_uid index;
53668+
53669+ for (i = 1; i < uid_used; i++) {
53670+ index = uid_set[i];
53671+ j = i;
53672+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
53673+ uid_set[j] = uid_set[j - 1];
53674+ j--;
53675+ }
53676+ uid_set[j] = index;
53677+ }
53678+
53679+ return;
53680+}
53681+
53682+static __inline__ void
53683+gr_insert_uid(const uid_t uid, const unsigned long expires)
53684+{
53685+ int loc;
53686+
53687+ if (uid_used == GR_UIDTABLE_MAX)
53688+ return;
53689+
53690+ loc = gr_find_uid(uid);
53691+
53692+ if (loc >= 0) {
53693+ uid_set[loc].expires = expires;
53694+ return;
53695+ }
53696+
53697+ uid_set[uid_used].uid = uid;
53698+ uid_set[uid_used].expires = expires;
53699+ uid_used++;
53700+
53701+ gr_insertsort();
53702+
53703+ return;
53704+}
53705+
53706+void
53707+gr_remove_uid(const unsigned short loc)
53708+{
53709+ unsigned short i;
53710+
53711+ for (i = loc + 1; i < uid_used; i++)
53712+ uid_set[i - 1] = uid_set[i];
53713+
53714+ uid_used--;
53715+
53716+ return;
53717+}
53718+
53719+int
53720+gr_check_crash_uid(const uid_t uid)
53721+{
53722+ int loc;
53723+ int ret = 0;
53724+
53725+ if (unlikely(!gr_acl_is_enabled()))
53726+ return 0;
53727+
53728+ spin_lock(&gr_uid_lock);
53729+ loc = gr_find_uid(uid);
53730+
53731+ if (loc < 0)
53732+ goto out_unlock;
53733+
53734+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
53735+ gr_remove_uid(loc);
53736+ else
53737+ ret = 1;
53738+
53739+out_unlock:
53740+ spin_unlock(&gr_uid_lock);
53741+ return ret;
53742+}
53743+
53744+static __inline__ int
53745+proc_is_setxid(const struct cred *cred)
53746+{
53747+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
53748+ cred->uid != cred->fsuid)
53749+ return 1;
53750+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
53751+ cred->gid != cred->fsgid)
53752+ return 1;
53753+
53754+ return 0;
53755+}
53756+
53757+extern int gr_fake_force_sig(int sig, struct task_struct *t);
53758+
53759+void
53760+gr_handle_crash(struct task_struct *task, const int sig)
53761+{
53762+ struct acl_subject_label *curr;
53763+ struct task_struct *tsk, *tsk2;
53764+ const struct cred *cred;
53765+ const struct cred *cred2;
53766+
53767+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
53768+ return;
53769+
53770+ if (unlikely(!gr_acl_is_enabled()))
53771+ return;
53772+
53773+ curr = task->acl;
53774+
53775+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
53776+ return;
53777+
53778+ if (time_before_eq(curr->expires, get_seconds())) {
53779+ curr->expires = 0;
53780+ curr->crashes = 0;
53781+ }
53782+
53783+ curr->crashes++;
53784+
53785+ if (!curr->expires)
53786+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
53787+
53788+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
53789+ time_after(curr->expires, get_seconds())) {
53790+ rcu_read_lock();
53791+ cred = __task_cred(task);
53792+ if (cred->uid && proc_is_setxid(cred)) {
53793+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
53794+ spin_lock(&gr_uid_lock);
53795+ gr_insert_uid(cred->uid, curr->expires);
53796+ spin_unlock(&gr_uid_lock);
53797+ curr->expires = 0;
53798+ curr->crashes = 0;
53799+ read_lock(&tasklist_lock);
53800+ do_each_thread(tsk2, tsk) {
53801+ cred2 = __task_cred(tsk);
53802+ if (tsk != task && cred2->uid == cred->uid)
53803+ gr_fake_force_sig(SIGKILL, tsk);
53804+ } while_each_thread(tsk2, tsk);
53805+ read_unlock(&tasklist_lock);
53806+ } else {
53807+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
53808+ read_lock(&tasklist_lock);
53809+ read_lock(&grsec_exec_file_lock);
53810+ do_each_thread(tsk2, tsk) {
53811+ if (likely(tsk != task)) {
53812+ // if this thread has the same subject as the one that triggered
53813+ // RES_CRASH and it's the same binary, kill it
53814+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
53815+ gr_fake_force_sig(SIGKILL, tsk);
53816+ }
53817+ } while_each_thread(tsk2, tsk);
53818+ read_unlock(&grsec_exec_file_lock);
53819+ read_unlock(&tasklist_lock);
53820+ }
53821+ rcu_read_unlock();
53822+ }
53823+
53824+ return;
53825+}
53826+
53827+int
53828+gr_check_crash_exec(const struct file *filp)
53829+{
53830+ struct acl_subject_label *curr;
53831+
53832+ if (unlikely(!gr_acl_is_enabled()))
53833+ return 0;
53834+
53835+ read_lock(&gr_inode_lock);
53836+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
53837+ __get_dev(filp->f_path.dentry),
53838+ current->role);
53839+ read_unlock(&gr_inode_lock);
53840+
53841+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
53842+ (!curr->crashes && !curr->expires))
53843+ return 0;
53844+
53845+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
53846+ time_after(curr->expires, get_seconds()))
53847+ return 1;
53848+ else if (time_before_eq(curr->expires, get_seconds())) {
53849+ curr->crashes = 0;
53850+ curr->expires = 0;
53851+ }
53852+
53853+ return 0;
53854+}
53855+
53856+void
53857+gr_handle_alertkill(struct task_struct *task)
53858+{
53859+ struct acl_subject_label *curracl;
53860+ __u32 curr_ip;
53861+ struct task_struct *p, *p2;
53862+
53863+ if (unlikely(!gr_acl_is_enabled()))
53864+ return;
53865+
53866+ curracl = task->acl;
53867+ curr_ip = task->signal->curr_ip;
53868+
53869+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
53870+ read_lock(&tasklist_lock);
53871+ do_each_thread(p2, p) {
53872+ if (p->signal->curr_ip == curr_ip)
53873+ gr_fake_force_sig(SIGKILL, p);
53874+ } while_each_thread(p2, p);
53875+ read_unlock(&tasklist_lock);
53876+ } else if (curracl->mode & GR_KILLPROC)
53877+ gr_fake_force_sig(SIGKILL, task);
53878+
53879+ return;
53880+}
53881diff -urNp linux-3.1.4/grsecurity/gracl_shm.c linux-3.1.4/grsecurity/gracl_shm.c
53882--- linux-3.1.4/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
53883+++ linux-3.1.4/grsecurity/gracl_shm.c 2011-11-16 18:40:31.000000000 -0500
53884@@ -0,0 +1,40 @@
53885+#include <linux/kernel.h>
53886+#include <linux/mm.h>
53887+#include <linux/sched.h>
53888+#include <linux/file.h>
53889+#include <linux/ipc.h>
53890+#include <linux/gracl.h>
53891+#include <linux/grsecurity.h>
53892+#include <linux/grinternal.h>
53893+
53894+int
53895+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53896+ const time_t shm_createtime, const uid_t cuid, const int shmid)
53897+{
53898+ struct task_struct *task;
53899+
53900+ if (!gr_acl_is_enabled())
53901+ return 1;
53902+
53903+ rcu_read_lock();
53904+ read_lock(&tasklist_lock);
53905+
53906+ task = find_task_by_vpid(shm_cprid);
53907+
53908+ if (unlikely(!task))
53909+ task = find_task_by_vpid(shm_lapid);
53910+
53911+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
53912+ (task->pid == shm_lapid)) &&
53913+ (task->acl->mode & GR_PROTSHM) &&
53914+ (task->acl != current->acl))) {
53915+ read_unlock(&tasklist_lock);
53916+ rcu_read_unlock();
53917+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
53918+ return 0;
53919+ }
53920+ read_unlock(&tasklist_lock);
53921+ rcu_read_unlock();
53922+
53923+ return 1;
53924+}
53925diff -urNp linux-3.1.4/grsecurity/grsec_chdir.c linux-3.1.4/grsecurity/grsec_chdir.c
53926--- linux-3.1.4/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
53927+++ linux-3.1.4/grsecurity/grsec_chdir.c 2011-11-16 18:40:31.000000000 -0500
53928@@ -0,0 +1,19 @@
53929+#include <linux/kernel.h>
53930+#include <linux/sched.h>
53931+#include <linux/fs.h>
53932+#include <linux/file.h>
53933+#include <linux/grsecurity.h>
53934+#include <linux/grinternal.h>
53935+
53936+void
53937+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
53938+{
53939+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
53940+ if ((grsec_enable_chdir && grsec_enable_group &&
53941+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
53942+ !grsec_enable_group)) {
53943+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
53944+ }
53945+#endif
53946+ return;
53947+}
53948diff -urNp linux-3.1.4/grsecurity/grsec_chroot.c linux-3.1.4/grsecurity/grsec_chroot.c
53949--- linux-3.1.4/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
53950+++ linux-3.1.4/grsecurity/grsec_chroot.c 2011-11-16 18:40:31.000000000 -0500
53951@@ -0,0 +1,351 @@
53952+#include <linux/kernel.h>
53953+#include <linux/module.h>
53954+#include <linux/sched.h>
53955+#include <linux/file.h>
53956+#include <linux/fs.h>
53957+#include <linux/mount.h>
53958+#include <linux/types.h>
53959+#include <linux/pid_namespace.h>
53960+#include <linux/grsecurity.h>
53961+#include <linux/grinternal.h>
53962+
53963+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
53964+{
53965+#ifdef CONFIG_GRKERNSEC
53966+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
53967+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
53968+ task->gr_is_chrooted = 1;
53969+ else
53970+ task->gr_is_chrooted = 0;
53971+
53972+ task->gr_chroot_dentry = path->dentry;
53973+#endif
53974+ return;
53975+}
53976+
53977+void gr_clear_chroot_entries(struct task_struct *task)
53978+{
53979+#ifdef CONFIG_GRKERNSEC
53980+ task->gr_is_chrooted = 0;
53981+ task->gr_chroot_dentry = NULL;
53982+#endif
53983+ return;
53984+}
53985+
53986+int
53987+gr_handle_chroot_unix(const pid_t pid)
53988+{
53989+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
53990+ struct task_struct *p;
53991+
53992+ if (unlikely(!grsec_enable_chroot_unix))
53993+ return 1;
53994+
53995+ if (likely(!proc_is_chrooted(current)))
53996+ return 1;
53997+
53998+ rcu_read_lock();
53999+ read_lock(&tasklist_lock);
54000+ p = find_task_by_vpid_unrestricted(pid);
54001+ if (unlikely(p && !have_same_root(current, p))) {
54002+ read_unlock(&tasklist_lock);
54003+ rcu_read_unlock();
54004+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
54005+ return 0;
54006+ }
54007+ read_unlock(&tasklist_lock);
54008+ rcu_read_unlock();
54009+#endif
54010+ return 1;
54011+}
54012+
54013+int
54014+gr_handle_chroot_nice(void)
54015+{
54016+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54017+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
54018+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
54019+ return -EPERM;
54020+ }
54021+#endif
54022+ return 0;
54023+}
54024+
54025+int
54026+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
54027+{
54028+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54029+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
54030+ && proc_is_chrooted(current)) {
54031+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
54032+ return -EACCES;
54033+ }
54034+#endif
54035+ return 0;
54036+}
54037+
54038+int
54039+gr_handle_chroot_rawio(const struct inode *inode)
54040+{
54041+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54042+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
54043+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
54044+ return 1;
54045+#endif
54046+ return 0;
54047+}
54048+
54049+int
54050+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
54051+{
54052+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54053+ struct task_struct *p;
54054+ int ret = 0;
54055+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
54056+ return ret;
54057+
54058+ read_lock(&tasklist_lock);
54059+ do_each_pid_task(pid, type, p) {
54060+ if (!have_same_root(current, p)) {
54061+ ret = 1;
54062+ goto out;
54063+ }
54064+ } while_each_pid_task(pid, type, p);
54065+out:
54066+ read_unlock(&tasklist_lock);
54067+ return ret;
54068+#endif
54069+ return 0;
54070+}
54071+
54072+int
54073+gr_pid_is_chrooted(struct task_struct *p)
54074+{
54075+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54076+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
54077+ return 0;
54078+
54079+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
54080+ !have_same_root(current, p)) {
54081+ return 1;
54082+ }
54083+#endif
54084+ return 0;
54085+}
54086+
54087+EXPORT_SYMBOL(gr_pid_is_chrooted);
54088+
54089+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
54090+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
54091+{
54092+ struct path path, currentroot;
54093+ int ret = 0;
54094+
54095+ path.dentry = (struct dentry *)u_dentry;
54096+ path.mnt = (struct vfsmount *)u_mnt;
54097+ get_fs_root(current->fs, &currentroot);
54098+ if (path_is_under(&path, &currentroot))
54099+ ret = 1;
54100+ path_put(&currentroot);
54101+
54102+ return ret;
54103+}
54104+#endif
54105+
54106+int
54107+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
54108+{
54109+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54110+ if (!grsec_enable_chroot_fchdir)
54111+ return 1;
54112+
54113+ if (!proc_is_chrooted(current))
54114+ return 1;
54115+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
54116+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
54117+ return 0;
54118+ }
54119+#endif
54120+ return 1;
54121+}
54122+
54123+int
54124+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54125+ const time_t shm_createtime)
54126+{
54127+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54128+ struct task_struct *p;
54129+ time_t starttime;
54130+
54131+ if (unlikely(!grsec_enable_chroot_shmat))
54132+ return 1;
54133+
54134+ if (likely(!proc_is_chrooted(current)))
54135+ return 1;
54136+
54137+ rcu_read_lock();
54138+ read_lock(&tasklist_lock);
54139+
54140+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
54141+ starttime = p->start_time.tv_sec;
54142+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
54143+ if (have_same_root(current, p)) {
54144+ goto allow;
54145+ } else {
54146+ read_unlock(&tasklist_lock);
54147+ rcu_read_unlock();
54148+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54149+ return 0;
54150+ }
54151+ }
54152+ /* creator exited, pid reuse, fall through to next check */
54153+ }
54154+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
54155+ if (unlikely(!have_same_root(current, p))) {
54156+ read_unlock(&tasklist_lock);
54157+ rcu_read_unlock();
54158+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54159+ return 0;
54160+ }
54161+ }
54162+
54163+allow:
54164+ read_unlock(&tasklist_lock);
54165+ rcu_read_unlock();
54166+#endif
54167+ return 1;
54168+}
54169+
54170+void
54171+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
54172+{
54173+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54174+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
54175+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
54176+#endif
54177+ return;
54178+}
54179+
54180+int
54181+gr_handle_chroot_mknod(const struct dentry *dentry,
54182+ const struct vfsmount *mnt, const int mode)
54183+{
54184+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54185+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
54186+ proc_is_chrooted(current)) {
54187+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
54188+ return -EPERM;
54189+ }
54190+#endif
54191+ return 0;
54192+}
54193+
54194+int
54195+gr_handle_chroot_mount(const struct dentry *dentry,
54196+ const struct vfsmount *mnt, const char *dev_name)
54197+{
54198+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54199+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
54200+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
54201+ return -EPERM;
54202+ }
54203+#endif
54204+ return 0;
54205+}
54206+
54207+int
54208+gr_handle_chroot_pivot(void)
54209+{
54210+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54211+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
54212+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
54213+ return -EPERM;
54214+ }
54215+#endif
54216+ return 0;
54217+}
54218+
54219+int
54220+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
54221+{
54222+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54223+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
54224+ !gr_is_outside_chroot(dentry, mnt)) {
54225+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
54226+ return -EPERM;
54227+ }
54228+#endif
54229+ return 0;
54230+}
54231+
54232+extern const char *captab_log[];
54233+extern int captab_log_entries;
54234+
54235+int
54236+gr_chroot_is_capable(const int cap)
54237+{
54238+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54239+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54240+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54241+ if (cap_raised(chroot_caps, cap)) {
54242+ const struct cred *creds = current_cred();
54243+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
54244+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
54245+ }
54246+ return 0;
54247+ }
54248+ }
54249+#endif
54250+ return 1;
54251+}
54252+
54253+int
54254+gr_chroot_is_capable_nolog(const int cap)
54255+{
54256+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54257+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54258+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54259+ if (cap_raised(chroot_caps, cap)) {
54260+ return 0;
54261+ }
54262+ }
54263+#endif
54264+ return 1;
54265+}
54266+
54267+int
54268+gr_handle_chroot_sysctl(const int op)
54269+{
54270+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54271+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
54272+ proc_is_chrooted(current))
54273+ return -EACCES;
54274+#endif
54275+ return 0;
54276+}
54277+
54278+void
54279+gr_handle_chroot_chdir(struct path *path)
54280+{
54281+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54282+ if (grsec_enable_chroot_chdir)
54283+ set_fs_pwd(current->fs, path);
54284+#endif
54285+ return;
54286+}
54287+
54288+int
54289+gr_handle_chroot_chmod(const struct dentry *dentry,
54290+ const struct vfsmount *mnt, const int mode)
54291+{
54292+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54293+ /* allow chmod +s on directories, but not files */
54294+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
54295+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
54296+ proc_is_chrooted(current)) {
54297+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
54298+ return -EPERM;
54299+ }
54300+#endif
54301+ return 0;
54302+}
54303diff -urNp linux-3.1.4/grsecurity/grsec_disabled.c linux-3.1.4/grsecurity/grsec_disabled.c
54304--- linux-3.1.4/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
54305+++ linux-3.1.4/grsecurity/grsec_disabled.c 2011-11-17 00:16:25.000000000 -0500
54306@@ -0,0 +1,439 @@
54307+#include <linux/kernel.h>
54308+#include <linux/module.h>
54309+#include <linux/sched.h>
54310+#include <linux/file.h>
54311+#include <linux/fs.h>
54312+#include <linux/kdev_t.h>
54313+#include <linux/net.h>
54314+#include <linux/in.h>
54315+#include <linux/ip.h>
54316+#include <linux/skbuff.h>
54317+#include <linux/sysctl.h>
54318+
54319+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54320+void
54321+pax_set_initial_flags(struct linux_binprm *bprm)
54322+{
54323+ return;
54324+}
54325+#endif
54326+
54327+#ifdef CONFIG_SYSCTL
54328+__u32
54329+gr_handle_sysctl(const struct ctl_table * table, const int op)
54330+{
54331+ return 0;
54332+}
54333+#endif
54334+
54335+#ifdef CONFIG_TASKSTATS
54336+int gr_is_taskstats_denied(int pid)
54337+{
54338+ return 0;
54339+}
54340+#endif
54341+
54342+int
54343+gr_acl_is_enabled(void)
54344+{
54345+ return 0;
54346+}
54347+
54348+void
54349+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
54350+{
54351+ return;
54352+}
54353+
54354+int
54355+gr_handle_rawio(const struct inode *inode)
54356+{
54357+ return 0;
54358+}
54359+
54360+void
54361+gr_acl_handle_psacct(struct task_struct *task, const long code)
54362+{
54363+ return;
54364+}
54365+
54366+int
54367+gr_handle_ptrace(struct task_struct *task, const long request)
54368+{
54369+ return 0;
54370+}
54371+
54372+int
54373+gr_handle_proc_ptrace(struct task_struct *task)
54374+{
54375+ return 0;
54376+}
54377+
54378+void
54379+gr_learn_resource(const struct task_struct *task,
54380+ const int res, const unsigned long wanted, const int gt)
54381+{
54382+ return;
54383+}
54384+
54385+int
54386+gr_set_acls(const int type)
54387+{
54388+ return 0;
54389+}
54390+
54391+int
54392+gr_check_hidden_task(const struct task_struct *tsk)
54393+{
54394+ return 0;
54395+}
54396+
54397+int
54398+gr_check_protected_task(const struct task_struct *task)
54399+{
54400+ return 0;
54401+}
54402+
54403+int
54404+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
54405+{
54406+ return 0;
54407+}
54408+
54409+void
54410+gr_copy_label(struct task_struct *tsk)
54411+{
54412+ return;
54413+}
54414+
54415+void
54416+gr_set_pax_flags(struct task_struct *task)
54417+{
54418+ return;
54419+}
54420+
54421+int
54422+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
54423+ const int unsafe_share)
54424+{
54425+ return 0;
54426+}
54427+
54428+void
54429+gr_handle_delete(const ino_t ino, const dev_t dev)
54430+{
54431+ return;
54432+}
54433+
54434+void
54435+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
54436+{
54437+ return;
54438+}
54439+
54440+void
54441+gr_handle_crash(struct task_struct *task, const int sig)
54442+{
54443+ return;
54444+}
54445+
54446+int
54447+gr_check_crash_exec(const struct file *filp)
54448+{
54449+ return 0;
54450+}
54451+
54452+int
54453+gr_check_crash_uid(const uid_t uid)
54454+{
54455+ return 0;
54456+}
54457+
54458+void
54459+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
54460+ struct dentry *old_dentry,
54461+ struct dentry *new_dentry,
54462+ struct vfsmount *mnt, const __u8 replace)
54463+{
54464+ return;
54465+}
54466+
54467+int
54468+gr_search_socket(const int family, const int type, const int protocol)
54469+{
54470+ return 1;
54471+}
54472+
54473+int
54474+gr_search_connectbind(const int mode, const struct socket *sock,
54475+ const struct sockaddr_in *addr)
54476+{
54477+ return 0;
54478+}
54479+
54480+void
54481+gr_handle_alertkill(struct task_struct *task)
54482+{
54483+ return;
54484+}
54485+
54486+__u32
54487+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
54488+{
54489+ return 1;
54490+}
54491+
54492+__u32
54493+gr_acl_handle_hidden_file(const struct dentry * dentry,
54494+ const struct vfsmount * mnt)
54495+{
54496+ return 1;
54497+}
54498+
54499+__u32
54500+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54501+ int acc_mode)
54502+{
54503+ return 1;
54504+}
54505+
54506+__u32
54507+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54508+{
54509+ return 1;
54510+}
54511+
54512+__u32
54513+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
54514+{
54515+ return 1;
54516+}
54517+
54518+int
54519+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
54520+ unsigned int *vm_flags)
54521+{
54522+ return 1;
54523+}
54524+
54525+__u32
54526+gr_acl_handle_truncate(const struct dentry * dentry,
54527+ const struct vfsmount * mnt)
54528+{
54529+ return 1;
54530+}
54531+
54532+__u32
54533+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
54534+{
54535+ return 1;
54536+}
54537+
54538+__u32
54539+gr_acl_handle_access(const struct dentry * dentry,
54540+ const struct vfsmount * mnt, const int fmode)
54541+{
54542+ return 1;
54543+}
54544+
54545+__u32
54546+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
54547+ mode_t mode)
54548+{
54549+ return 1;
54550+}
54551+
54552+__u32
54553+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
54554+ mode_t mode)
54555+{
54556+ return 1;
54557+}
54558+
54559+__u32
54560+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
54561+{
54562+ return 1;
54563+}
54564+
54565+__u32
54566+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
54567+{
54568+ return 1;
54569+}
54570+
54571+void
54572+grsecurity_init(void)
54573+{
54574+ return;
54575+}
54576+
54577+__u32
54578+gr_acl_handle_mknod(const struct dentry * new_dentry,
54579+ const struct dentry * parent_dentry,
54580+ const struct vfsmount * parent_mnt,
54581+ const int mode)
54582+{
54583+ return 1;
54584+}
54585+
54586+__u32
54587+gr_acl_handle_mkdir(const struct dentry * new_dentry,
54588+ const struct dentry * parent_dentry,
54589+ const struct vfsmount * parent_mnt)
54590+{
54591+ return 1;
54592+}
54593+
54594+__u32
54595+gr_acl_handle_symlink(const struct dentry * new_dentry,
54596+ const struct dentry * parent_dentry,
54597+ const struct vfsmount * parent_mnt, const char *from)
54598+{
54599+ return 1;
54600+}
54601+
54602+__u32
54603+gr_acl_handle_link(const struct dentry * new_dentry,
54604+ const struct dentry * parent_dentry,
54605+ const struct vfsmount * parent_mnt,
54606+ const struct dentry * old_dentry,
54607+ const struct vfsmount * old_mnt, const char *to)
54608+{
54609+ return 1;
54610+}
54611+
54612+int
54613+gr_acl_handle_rename(const struct dentry *new_dentry,
54614+ const struct dentry *parent_dentry,
54615+ const struct vfsmount *parent_mnt,
54616+ const struct dentry *old_dentry,
54617+ const struct inode *old_parent_inode,
54618+ const struct vfsmount *old_mnt, const char *newname)
54619+{
54620+ return 0;
54621+}
54622+
54623+int
54624+gr_acl_handle_filldir(const struct file *file, const char *name,
54625+ const int namelen, const ino_t ino)
54626+{
54627+ return 1;
54628+}
54629+
54630+int
54631+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54632+ const time_t shm_createtime, const uid_t cuid, const int shmid)
54633+{
54634+ return 1;
54635+}
54636+
54637+int
54638+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
54639+{
54640+ return 0;
54641+}
54642+
54643+int
54644+gr_search_accept(const struct socket *sock)
54645+{
54646+ return 0;
54647+}
54648+
54649+int
54650+gr_search_listen(const struct socket *sock)
54651+{
54652+ return 0;
54653+}
54654+
54655+int
54656+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
54657+{
54658+ return 0;
54659+}
54660+
54661+__u32
54662+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
54663+{
54664+ return 1;
54665+}
54666+
54667+__u32
54668+gr_acl_handle_creat(const struct dentry * dentry,
54669+ const struct dentry * p_dentry,
54670+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
54671+ const int imode)
54672+{
54673+ return 1;
54674+}
54675+
54676+void
54677+gr_acl_handle_exit(void)
54678+{
54679+ return;
54680+}
54681+
54682+int
54683+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
54684+{
54685+ return 1;
54686+}
54687+
54688+void
54689+gr_set_role_label(const uid_t uid, const gid_t gid)
54690+{
54691+ return;
54692+}
54693+
54694+int
54695+gr_acl_handle_procpidmem(const struct task_struct *task)
54696+{
54697+ return 0;
54698+}
54699+
54700+int
54701+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
54702+{
54703+ return 0;
54704+}
54705+
54706+int
54707+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
54708+{
54709+ return 0;
54710+}
54711+
54712+void
54713+gr_set_kernel_label(struct task_struct *task)
54714+{
54715+ return;
54716+}
54717+
54718+int
54719+gr_check_user_change(int real, int effective, int fs)
54720+{
54721+ return 0;
54722+}
54723+
54724+int
54725+gr_check_group_change(int real, int effective, int fs)
54726+{
54727+ return 0;
54728+}
54729+
54730+int gr_acl_enable_at_secure(void)
54731+{
54732+ return 0;
54733+}
54734+
54735+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
54736+{
54737+ return dentry->d_inode->i_sb->s_dev;
54738+}
54739+
54740+EXPORT_SYMBOL(gr_learn_resource);
54741+EXPORT_SYMBOL(gr_set_kernel_label);
54742+#ifdef CONFIG_SECURITY
54743+EXPORT_SYMBOL(gr_check_user_change);
54744+EXPORT_SYMBOL(gr_check_group_change);
54745+#endif
54746diff -urNp linux-3.1.4/grsecurity/grsec_exec.c linux-3.1.4/grsecurity/grsec_exec.c
54747--- linux-3.1.4/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
54748+++ linux-3.1.4/grsecurity/grsec_exec.c 2011-11-16 18:40:31.000000000 -0500
54749@@ -0,0 +1,146 @@
54750+#include <linux/kernel.h>
54751+#include <linux/sched.h>
54752+#include <linux/file.h>
54753+#include <linux/binfmts.h>
54754+#include <linux/fs.h>
54755+#include <linux/types.h>
54756+#include <linux/grdefs.h>
54757+#include <linux/grsecurity.h>
54758+#include <linux/grinternal.h>
54759+#include <linux/capability.h>
54760+#include <linux/module.h>
54761+
54762+#include <asm/uaccess.h>
54763+
54764+#ifdef CONFIG_GRKERNSEC_EXECLOG
54765+static char gr_exec_arg_buf[132];
54766+static DEFINE_MUTEX(gr_exec_arg_mutex);
54767+#endif
54768+
54769+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
54770+
54771+void
54772+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
54773+{
54774+#ifdef CONFIG_GRKERNSEC_EXECLOG
54775+ char *grarg = gr_exec_arg_buf;
54776+ unsigned int i, x, execlen = 0;
54777+ char c;
54778+
54779+ if (!((grsec_enable_execlog && grsec_enable_group &&
54780+ in_group_p(grsec_audit_gid))
54781+ || (grsec_enable_execlog && !grsec_enable_group)))
54782+ return;
54783+
54784+ mutex_lock(&gr_exec_arg_mutex);
54785+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
54786+
54787+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
54788+ const char __user *p;
54789+ unsigned int len;
54790+
54791+ p = get_user_arg_ptr(argv, i);
54792+ if (IS_ERR(p))
54793+ goto log;
54794+
54795+ len = strnlen_user(p, 128 - execlen);
54796+ if (len > 128 - execlen)
54797+ len = 128 - execlen;
54798+ else if (len > 0)
54799+ len--;
54800+ if (copy_from_user(grarg + execlen, p, len))
54801+ goto log;
54802+
54803+ /* rewrite unprintable characters */
54804+ for (x = 0; x < len; x++) {
54805+ c = *(grarg + execlen + x);
54806+ if (c < 32 || c > 126)
54807+ *(grarg + execlen + x) = ' ';
54808+ }
54809+
54810+ execlen += len;
54811+ *(grarg + execlen) = ' ';
54812+ *(grarg + execlen + 1) = '\0';
54813+ execlen++;
54814+ }
54815+
54816+ log:
54817+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
54818+ bprm->file->f_path.mnt, grarg);
54819+ mutex_unlock(&gr_exec_arg_mutex);
54820+#endif
54821+ return;
54822+}
54823+
54824+#ifdef CONFIG_GRKERNSEC
54825+extern int gr_acl_is_capable(const int cap);
54826+extern int gr_acl_is_capable_nolog(const int cap);
54827+extern int gr_chroot_is_capable(const int cap);
54828+extern int gr_chroot_is_capable_nolog(const int cap);
54829+#endif
54830+
54831+const char *captab_log[] = {
54832+ "CAP_CHOWN",
54833+ "CAP_DAC_OVERRIDE",
54834+ "CAP_DAC_READ_SEARCH",
54835+ "CAP_FOWNER",
54836+ "CAP_FSETID",
54837+ "CAP_KILL",
54838+ "CAP_SETGID",
54839+ "CAP_SETUID",
54840+ "CAP_SETPCAP",
54841+ "CAP_LINUX_IMMUTABLE",
54842+ "CAP_NET_BIND_SERVICE",
54843+ "CAP_NET_BROADCAST",
54844+ "CAP_NET_ADMIN",
54845+ "CAP_NET_RAW",
54846+ "CAP_IPC_LOCK",
54847+ "CAP_IPC_OWNER",
54848+ "CAP_SYS_MODULE",
54849+ "CAP_SYS_RAWIO",
54850+ "CAP_SYS_CHROOT",
54851+ "CAP_SYS_PTRACE",
54852+ "CAP_SYS_PACCT",
54853+ "CAP_SYS_ADMIN",
54854+ "CAP_SYS_BOOT",
54855+ "CAP_SYS_NICE",
54856+ "CAP_SYS_RESOURCE",
54857+ "CAP_SYS_TIME",
54858+ "CAP_SYS_TTY_CONFIG",
54859+ "CAP_MKNOD",
54860+ "CAP_LEASE",
54861+ "CAP_AUDIT_WRITE",
54862+ "CAP_AUDIT_CONTROL",
54863+ "CAP_SETFCAP",
54864+ "CAP_MAC_OVERRIDE",
54865+ "CAP_MAC_ADMIN",
54866+ "CAP_SYSLOG",
54867+ "CAP_WAKE_ALARM"
54868+};
54869+
54870+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
54871+
54872+int gr_is_capable(const int cap)
54873+{
54874+#ifdef CONFIG_GRKERNSEC
54875+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
54876+ return 1;
54877+ return 0;
54878+#else
54879+ return 1;
54880+#endif
54881+}
54882+
54883+int gr_is_capable_nolog(const int cap)
54884+{
54885+#ifdef CONFIG_GRKERNSEC
54886+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
54887+ return 1;
54888+ return 0;
54889+#else
54890+ return 1;
54891+#endif
54892+}
54893+
54894+EXPORT_SYMBOL(gr_is_capable);
54895+EXPORT_SYMBOL(gr_is_capable_nolog);
54896diff -urNp linux-3.1.4/grsecurity/grsec_fifo.c linux-3.1.4/grsecurity/grsec_fifo.c
54897--- linux-3.1.4/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
54898+++ linux-3.1.4/grsecurity/grsec_fifo.c 2011-11-16 18:40:31.000000000 -0500
54899@@ -0,0 +1,24 @@
54900+#include <linux/kernel.h>
54901+#include <linux/sched.h>
54902+#include <linux/fs.h>
54903+#include <linux/file.h>
54904+#include <linux/grinternal.h>
54905+
54906+int
54907+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
54908+ const struct dentry *dir, const int flag, const int acc_mode)
54909+{
54910+#ifdef CONFIG_GRKERNSEC_FIFO
54911+ const struct cred *cred = current_cred();
54912+
54913+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
54914+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
54915+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
54916+ (cred->fsuid != dentry->d_inode->i_uid)) {
54917+ if (!inode_permission(dentry->d_inode, acc_mode))
54918+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
54919+ return -EACCES;
54920+ }
54921+#endif
54922+ return 0;
54923+}
54924diff -urNp linux-3.1.4/grsecurity/grsec_fork.c linux-3.1.4/grsecurity/grsec_fork.c
54925--- linux-3.1.4/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
54926+++ linux-3.1.4/grsecurity/grsec_fork.c 2011-11-16 18:40:31.000000000 -0500
54927@@ -0,0 +1,23 @@
54928+#include <linux/kernel.h>
54929+#include <linux/sched.h>
54930+#include <linux/grsecurity.h>
54931+#include <linux/grinternal.h>
54932+#include <linux/errno.h>
54933+
54934+void
54935+gr_log_forkfail(const int retval)
54936+{
54937+#ifdef CONFIG_GRKERNSEC_FORKFAIL
54938+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
54939+ switch (retval) {
54940+ case -EAGAIN:
54941+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
54942+ break;
54943+ case -ENOMEM:
54944+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
54945+ break;
54946+ }
54947+ }
54948+#endif
54949+ return;
54950+}
54951diff -urNp linux-3.1.4/grsecurity/grsec_init.c linux-3.1.4/grsecurity/grsec_init.c
54952--- linux-3.1.4/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
54953+++ linux-3.1.4/grsecurity/grsec_init.c 2011-11-16 18:40:31.000000000 -0500
54954@@ -0,0 +1,269 @@
54955+#include <linux/kernel.h>
54956+#include <linux/sched.h>
54957+#include <linux/mm.h>
54958+#include <linux/gracl.h>
54959+#include <linux/slab.h>
54960+#include <linux/vmalloc.h>
54961+#include <linux/percpu.h>
54962+#include <linux/module.h>
54963+
54964+int grsec_enable_brute;
54965+int grsec_enable_link;
54966+int grsec_enable_dmesg;
54967+int grsec_enable_harden_ptrace;
54968+int grsec_enable_fifo;
54969+int grsec_enable_execlog;
54970+int grsec_enable_signal;
54971+int grsec_enable_forkfail;
54972+int grsec_enable_audit_ptrace;
54973+int grsec_enable_time;
54974+int grsec_enable_audit_textrel;
54975+int grsec_enable_group;
54976+int grsec_audit_gid;
54977+int grsec_enable_chdir;
54978+int grsec_enable_mount;
54979+int grsec_enable_rofs;
54980+int grsec_enable_chroot_findtask;
54981+int grsec_enable_chroot_mount;
54982+int grsec_enable_chroot_shmat;
54983+int grsec_enable_chroot_fchdir;
54984+int grsec_enable_chroot_double;
54985+int grsec_enable_chroot_pivot;
54986+int grsec_enable_chroot_chdir;
54987+int grsec_enable_chroot_chmod;
54988+int grsec_enable_chroot_mknod;
54989+int grsec_enable_chroot_nice;
54990+int grsec_enable_chroot_execlog;
54991+int grsec_enable_chroot_caps;
54992+int grsec_enable_chroot_sysctl;
54993+int grsec_enable_chroot_unix;
54994+int grsec_enable_tpe;
54995+int grsec_tpe_gid;
54996+int grsec_enable_blackhole;
54997+#ifdef CONFIG_IPV6_MODULE
54998+EXPORT_SYMBOL(grsec_enable_blackhole);
54999+#endif
55000+int grsec_lastack_retries;
55001+int grsec_enable_tpe_all;
55002+int grsec_enable_tpe_invert;
55003+int grsec_enable_socket_all;
55004+int grsec_socket_all_gid;
55005+int grsec_enable_socket_client;
55006+int grsec_socket_client_gid;
55007+int grsec_enable_socket_server;
55008+int grsec_socket_server_gid;
55009+int grsec_resource_logging;
55010+int grsec_disable_privio;
55011+int grsec_enable_log_rwxmaps;
55012+int grsec_lock;
55013+
55014+DEFINE_SPINLOCK(grsec_alert_lock);
55015+unsigned long grsec_alert_wtime = 0;
55016+unsigned long grsec_alert_fyet = 0;
55017+
55018+DEFINE_SPINLOCK(grsec_audit_lock);
55019+
55020+DEFINE_RWLOCK(grsec_exec_file_lock);
55021+
55022+char *gr_shared_page[4];
55023+
55024+char *gr_alert_log_fmt;
55025+char *gr_audit_log_fmt;
55026+char *gr_alert_log_buf;
55027+char *gr_audit_log_buf;
55028+
55029+extern struct gr_arg *gr_usermode;
55030+extern unsigned char *gr_system_salt;
55031+extern unsigned char *gr_system_sum;
55032+
55033+void __init
55034+grsecurity_init(void)
55035+{
55036+ int j;
55037+ /* create the per-cpu shared pages */
55038+
55039+#ifdef CONFIG_X86
55040+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
55041+#endif
55042+
55043+ for (j = 0; j < 4; j++) {
55044+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
55045+ if (gr_shared_page[j] == NULL) {
55046+ panic("Unable to allocate grsecurity shared page");
55047+ return;
55048+ }
55049+ }
55050+
55051+ /* allocate log buffers */
55052+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
55053+ if (!gr_alert_log_fmt) {
55054+ panic("Unable to allocate grsecurity alert log format buffer");
55055+ return;
55056+ }
55057+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
55058+ if (!gr_audit_log_fmt) {
55059+ panic("Unable to allocate grsecurity audit log format buffer");
55060+ return;
55061+ }
55062+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55063+ if (!gr_alert_log_buf) {
55064+ panic("Unable to allocate grsecurity alert log buffer");
55065+ return;
55066+ }
55067+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55068+ if (!gr_audit_log_buf) {
55069+ panic("Unable to allocate grsecurity audit log buffer");
55070+ return;
55071+ }
55072+
55073+ /* allocate memory for authentication structure */
55074+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
55075+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
55076+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
55077+
55078+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
55079+ panic("Unable to allocate grsecurity authentication structure");
55080+ return;
55081+ }
55082+
55083+
55084+#ifdef CONFIG_GRKERNSEC_IO
55085+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
55086+ grsec_disable_privio = 1;
55087+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55088+ grsec_disable_privio = 1;
55089+#else
55090+ grsec_disable_privio = 0;
55091+#endif
55092+#endif
55093+
55094+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55095+ /* for backward compatibility, tpe_invert always defaults to on if
55096+ enabled in the kernel
55097+ */
55098+ grsec_enable_tpe_invert = 1;
55099+#endif
55100+
55101+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55102+#ifndef CONFIG_GRKERNSEC_SYSCTL
55103+ grsec_lock = 1;
55104+#endif
55105+
55106+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55107+ grsec_enable_audit_textrel = 1;
55108+#endif
55109+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55110+ grsec_enable_log_rwxmaps = 1;
55111+#endif
55112+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55113+ grsec_enable_group = 1;
55114+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
55115+#endif
55116+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55117+ grsec_enable_chdir = 1;
55118+#endif
55119+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55120+ grsec_enable_harden_ptrace = 1;
55121+#endif
55122+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55123+ grsec_enable_mount = 1;
55124+#endif
55125+#ifdef CONFIG_GRKERNSEC_LINK
55126+ grsec_enable_link = 1;
55127+#endif
55128+#ifdef CONFIG_GRKERNSEC_BRUTE
55129+ grsec_enable_brute = 1;
55130+#endif
55131+#ifdef CONFIG_GRKERNSEC_DMESG
55132+ grsec_enable_dmesg = 1;
55133+#endif
55134+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
55135+ grsec_enable_blackhole = 1;
55136+ grsec_lastack_retries = 4;
55137+#endif
55138+#ifdef CONFIG_GRKERNSEC_FIFO
55139+ grsec_enable_fifo = 1;
55140+#endif
55141+#ifdef CONFIG_GRKERNSEC_EXECLOG
55142+ grsec_enable_execlog = 1;
55143+#endif
55144+#ifdef CONFIG_GRKERNSEC_SIGNAL
55145+ grsec_enable_signal = 1;
55146+#endif
55147+#ifdef CONFIG_GRKERNSEC_FORKFAIL
55148+ grsec_enable_forkfail = 1;
55149+#endif
55150+#ifdef CONFIG_GRKERNSEC_TIME
55151+ grsec_enable_time = 1;
55152+#endif
55153+#ifdef CONFIG_GRKERNSEC_RESLOG
55154+ grsec_resource_logging = 1;
55155+#endif
55156+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55157+ grsec_enable_chroot_findtask = 1;
55158+#endif
55159+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55160+ grsec_enable_chroot_unix = 1;
55161+#endif
55162+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55163+ grsec_enable_chroot_mount = 1;
55164+#endif
55165+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55166+ grsec_enable_chroot_fchdir = 1;
55167+#endif
55168+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55169+ grsec_enable_chroot_shmat = 1;
55170+#endif
55171+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55172+ grsec_enable_audit_ptrace = 1;
55173+#endif
55174+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55175+ grsec_enable_chroot_double = 1;
55176+#endif
55177+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55178+ grsec_enable_chroot_pivot = 1;
55179+#endif
55180+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55181+ grsec_enable_chroot_chdir = 1;
55182+#endif
55183+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55184+ grsec_enable_chroot_chmod = 1;
55185+#endif
55186+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55187+ grsec_enable_chroot_mknod = 1;
55188+#endif
55189+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55190+ grsec_enable_chroot_nice = 1;
55191+#endif
55192+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55193+ grsec_enable_chroot_execlog = 1;
55194+#endif
55195+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55196+ grsec_enable_chroot_caps = 1;
55197+#endif
55198+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55199+ grsec_enable_chroot_sysctl = 1;
55200+#endif
55201+#ifdef CONFIG_GRKERNSEC_TPE
55202+ grsec_enable_tpe = 1;
55203+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
55204+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55205+ grsec_enable_tpe_all = 1;
55206+#endif
55207+#endif
55208+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55209+ grsec_enable_socket_all = 1;
55210+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
55211+#endif
55212+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55213+ grsec_enable_socket_client = 1;
55214+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
55215+#endif
55216+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55217+ grsec_enable_socket_server = 1;
55218+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
55219+#endif
55220+#endif
55221+
55222+ return;
55223+}
55224diff -urNp linux-3.1.4/grsecurity/grsec_link.c linux-3.1.4/grsecurity/grsec_link.c
55225--- linux-3.1.4/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
55226+++ linux-3.1.4/grsecurity/grsec_link.c 2011-11-16 18:40:31.000000000 -0500
55227@@ -0,0 +1,43 @@
55228+#include <linux/kernel.h>
55229+#include <linux/sched.h>
55230+#include <linux/fs.h>
55231+#include <linux/file.h>
55232+#include <linux/grinternal.h>
55233+
55234+int
55235+gr_handle_follow_link(const struct inode *parent,
55236+ const struct inode *inode,
55237+ const struct dentry *dentry, const struct vfsmount *mnt)
55238+{
55239+#ifdef CONFIG_GRKERNSEC_LINK
55240+ const struct cred *cred = current_cred();
55241+
55242+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
55243+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
55244+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
55245+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
55246+ return -EACCES;
55247+ }
55248+#endif
55249+ return 0;
55250+}
55251+
55252+int
55253+gr_handle_hardlink(const struct dentry *dentry,
55254+ const struct vfsmount *mnt,
55255+ struct inode *inode, const int mode, const char *to)
55256+{
55257+#ifdef CONFIG_GRKERNSEC_LINK
55258+ const struct cred *cred = current_cred();
55259+
55260+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
55261+ (!S_ISREG(mode) || (mode & S_ISUID) ||
55262+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
55263+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
55264+ !capable(CAP_FOWNER) && cred->uid) {
55265+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
55266+ return -EPERM;
55267+ }
55268+#endif
55269+ return 0;
55270+}
55271diff -urNp linux-3.1.4/grsecurity/grsec_log.c linux-3.1.4/grsecurity/grsec_log.c
55272--- linux-3.1.4/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
55273+++ linux-3.1.4/grsecurity/grsec_log.c 2011-11-16 18:40:31.000000000 -0500
55274@@ -0,0 +1,322 @@
55275+#include <linux/kernel.h>
55276+#include <linux/sched.h>
55277+#include <linux/file.h>
55278+#include <linux/tty.h>
55279+#include <linux/fs.h>
55280+#include <linux/grinternal.h>
55281+
55282+#ifdef CONFIG_TREE_PREEMPT_RCU
55283+#define DISABLE_PREEMPT() preempt_disable()
55284+#define ENABLE_PREEMPT() preempt_enable()
55285+#else
55286+#define DISABLE_PREEMPT()
55287+#define ENABLE_PREEMPT()
55288+#endif
55289+
55290+#define BEGIN_LOCKS(x) \
55291+ DISABLE_PREEMPT(); \
55292+ rcu_read_lock(); \
55293+ read_lock(&tasklist_lock); \
55294+ read_lock(&grsec_exec_file_lock); \
55295+ if (x != GR_DO_AUDIT) \
55296+ spin_lock(&grsec_alert_lock); \
55297+ else \
55298+ spin_lock(&grsec_audit_lock)
55299+
55300+#define END_LOCKS(x) \
55301+ if (x != GR_DO_AUDIT) \
55302+ spin_unlock(&grsec_alert_lock); \
55303+ else \
55304+ spin_unlock(&grsec_audit_lock); \
55305+ read_unlock(&grsec_exec_file_lock); \
55306+ read_unlock(&tasklist_lock); \
55307+ rcu_read_unlock(); \
55308+ ENABLE_PREEMPT(); \
55309+ if (x == GR_DONT_AUDIT) \
55310+ gr_handle_alertkill(current)
55311+
55312+enum {
55313+ FLOODING,
55314+ NO_FLOODING
55315+};
55316+
55317+extern char *gr_alert_log_fmt;
55318+extern char *gr_audit_log_fmt;
55319+extern char *gr_alert_log_buf;
55320+extern char *gr_audit_log_buf;
55321+
55322+static int gr_log_start(int audit)
55323+{
55324+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
55325+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
55326+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55327+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
55328+ unsigned long curr_secs = get_seconds();
55329+
55330+ if (audit == GR_DO_AUDIT)
55331+ goto set_fmt;
55332+
55333+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
55334+ grsec_alert_wtime = curr_secs;
55335+ grsec_alert_fyet = 0;
55336+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
55337+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
55338+ grsec_alert_fyet++;
55339+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
55340+ grsec_alert_wtime = curr_secs;
55341+ grsec_alert_fyet++;
55342+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
55343+ return FLOODING;
55344+ }
55345+ else return FLOODING;
55346+
55347+set_fmt:
55348+#endif
55349+ memset(buf, 0, PAGE_SIZE);
55350+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
55351+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
55352+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55353+ } else if (current->signal->curr_ip) {
55354+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
55355+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
55356+ } else if (gr_acl_is_enabled()) {
55357+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
55358+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55359+ } else {
55360+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
55361+ strcpy(buf, fmt);
55362+ }
55363+
55364+ return NO_FLOODING;
55365+}
55366+
55367+static void gr_log_middle(int audit, const char *msg, va_list ap)
55368+ __attribute__ ((format (printf, 2, 0)));
55369+
55370+static void gr_log_middle(int audit, const char *msg, va_list ap)
55371+{
55372+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55373+ unsigned int len = strlen(buf);
55374+
55375+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55376+
55377+ return;
55378+}
55379+
55380+static void gr_log_middle_varargs(int audit, const char *msg, ...)
55381+ __attribute__ ((format (printf, 2, 3)));
55382+
55383+static void gr_log_middle_varargs(int audit, const char *msg, ...)
55384+{
55385+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55386+ unsigned int len = strlen(buf);
55387+ va_list ap;
55388+
55389+ va_start(ap, msg);
55390+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55391+ va_end(ap);
55392+
55393+ return;
55394+}
55395+
55396+static void gr_log_end(int audit, int append_default)
55397+{
55398+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55399+
55400+ if (append_default) {
55401+ unsigned int len = strlen(buf);
55402+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
55403+ }
55404+
55405+ printk("%s\n", buf);
55406+
55407+ return;
55408+}
55409+
55410+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
55411+{
55412+ int logtype;
55413+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
55414+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
55415+ void *voidptr = NULL;
55416+ int num1 = 0, num2 = 0;
55417+ unsigned long ulong1 = 0, ulong2 = 0;
55418+ struct dentry *dentry = NULL;
55419+ struct vfsmount *mnt = NULL;
55420+ struct file *file = NULL;
55421+ struct task_struct *task = NULL;
55422+ const struct cred *cred, *pcred;
55423+ va_list ap;
55424+
55425+ BEGIN_LOCKS(audit);
55426+ logtype = gr_log_start(audit);
55427+ if (logtype == FLOODING) {
55428+ END_LOCKS(audit);
55429+ return;
55430+ }
55431+ va_start(ap, argtypes);
55432+ switch (argtypes) {
55433+ case GR_TTYSNIFF:
55434+ task = va_arg(ap, struct task_struct *);
55435+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
55436+ break;
55437+ case GR_SYSCTL_HIDDEN:
55438+ str1 = va_arg(ap, char *);
55439+ gr_log_middle_varargs(audit, msg, result, str1);
55440+ break;
55441+ case GR_RBAC:
55442+ dentry = va_arg(ap, struct dentry *);
55443+ mnt = va_arg(ap, struct vfsmount *);
55444+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
55445+ break;
55446+ case GR_RBAC_STR:
55447+ dentry = va_arg(ap, struct dentry *);
55448+ mnt = va_arg(ap, struct vfsmount *);
55449+ str1 = va_arg(ap, char *);
55450+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
55451+ break;
55452+ case GR_STR_RBAC:
55453+ str1 = va_arg(ap, char *);
55454+ dentry = va_arg(ap, struct dentry *);
55455+ mnt = va_arg(ap, struct vfsmount *);
55456+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
55457+ break;
55458+ case GR_RBAC_MODE2:
55459+ dentry = va_arg(ap, struct dentry *);
55460+ mnt = va_arg(ap, struct vfsmount *);
55461+ str1 = va_arg(ap, char *);
55462+ str2 = va_arg(ap, char *);
55463+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
55464+ break;
55465+ case GR_RBAC_MODE3:
55466+ dentry = va_arg(ap, struct dentry *);
55467+ mnt = va_arg(ap, struct vfsmount *);
55468+ str1 = va_arg(ap, char *);
55469+ str2 = va_arg(ap, char *);
55470+ str3 = va_arg(ap, char *);
55471+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
55472+ break;
55473+ case GR_FILENAME:
55474+ dentry = va_arg(ap, struct dentry *);
55475+ mnt = va_arg(ap, struct vfsmount *);
55476+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
55477+ break;
55478+ case GR_STR_FILENAME:
55479+ str1 = va_arg(ap, char *);
55480+ dentry = va_arg(ap, struct dentry *);
55481+ mnt = va_arg(ap, struct vfsmount *);
55482+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
55483+ break;
55484+ case GR_FILENAME_STR:
55485+ dentry = va_arg(ap, struct dentry *);
55486+ mnt = va_arg(ap, struct vfsmount *);
55487+ str1 = va_arg(ap, char *);
55488+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
55489+ break;
55490+ case GR_FILENAME_TWO_INT:
55491+ dentry = va_arg(ap, struct dentry *);
55492+ mnt = va_arg(ap, struct vfsmount *);
55493+ num1 = va_arg(ap, int);
55494+ num2 = va_arg(ap, int);
55495+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
55496+ break;
55497+ case GR_FILENAME_TWO_INT_STR:
55498+ dentry = va_arg(ap, struct dentry *);
55499+ mnt = va_arg(ap, struct vfsmount *);
55500+ num1 = va_arg(ap, int);
55501+ num2 = va_arg(ap, int);
55502+ str1 = va_arg(ap, char *);
55503+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
55504+ break;
55505+ case GR_TEXTREL:
55506+ file = va_arg(ap, struct file *);
55507+ ulong1 = va_arg(ap, unsigned long);
55508+ ulong2 = va_arg(ap, unsigned long);
55509+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
55510+ break;
55511+ case GR_PTRACE:
55512+ task = va_arg(ap, struct task_struct *);
55513+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
55514+ break;
55515+ case GR_RESOURCE:
55516+ task = va_arg(ap, struct task_struct *);
55517+ cred = __task_cred(task);
55518+ pcred = __task_cred(task->real_parent);
55519+ ulong1 = va_arg(ap, unsigned long);
55520+ str1 = va_arg(ap, char *);
55521+ ulong2 = va_arg(ap, unsigned long);
55522+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55523+ break;
55524+ case GR_CAP:
55525+ task = va_arg(ap, struct task_struct *);
55526+ cred = __task_cred(task);
55527+ pcred = __task_cred(task->real_parent);
55528+ str1 = va_arg(ap, char *);
55529+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55530+ break;
55531+ case GR_SIG:
55532+ str1 = va_arg(ap, char *);
55533+ voidptr = va_arg(ap, void *);
55534+ gr_log_middle_varargs(audit, msg, str1, voidptr);
55535+ break;
55536+ case GR_SIG2:
55537+ task = va_arg(ap, struct task_struct *);
55538+ cred = __task_cred(task);
55539+ pcred = __task_cred(task->real_parent);
55540+ num1 = va_arg(ap, int);
55541+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55542+ break;
55543+ case GR_CRASH1:
55544+ task = va_arg(ap, struct task_struct *);
55545+ cred = __task_cred(task);
55546+ pcred = __task_cred(task->real_parent);
55547+ ulong1 = va_arg(ap, unsigned long);
55548+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
55549+ break;
55550+ case GR_CRASH2:
55551+ task = va_arg(ap, struct task_struct *);
55552+ cred = __task_cred(task);
55553+ pcred = __task_cred(task->real_parent);
55554+ ulong1 = va_arg(ap, unsigned long);
55555+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
55556+ break;
55557+ case GR_RWXMAP:
55558+ file = va_arg(ap, struct file *);
55559+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
55560+ break;
55561+ case GR_PSACCT:
55562+ {
55563+ unsigned int wday, cday;
55564+ __u8 whr, chr;
55565+ __u8 wmin, cmin;
55566+ __u8 wsec, csec;
55567+ char cur_tty[64] = { 0 };
55568+ char parent_tty[64] = { 0 };
55569+
55570+ task = va_arg(ap, struct task_struct *);
55571+ wday = va_arg(ap, unsigned int);
55572+ cday = va_arg(ap, unsigned int);
55573+ whr = va_arg(ap, int);
55574+ chr = va_arg(ap, int);
55575+ wmin = va_arg(ap, int);
55576+ cmin = va_arg(ap, int);
55577+ wsec = va_arg(ap, int);
55578+ csec = va_arg(ap, int);
55579+ ulong1 = va_arg(ap, unsigned long);
55580+ cred = __task_cred(task);
55581+ pcred = __task_cred(task->real_parent);
55582+
55583+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55584+ }
55585+ break;
55586+ default:
55587+ gr_log_middle(audit, msg, ap);
55588+ }
55589+ va_end(ap);
55590+ // these don't need DEFAULTSECARGS printed on the end
55591+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
55592+ gr_log_end(audit, 0);
55593+ else
55594+ gr_log_end(audit, 1);
55595+ END_LOCKS(audit);
55596+}
55597diff -urNp linux-3.1.4/grsecurity/grsec_mem.c linux-3.1.4/grsecurity/grsec_mem.c
55598--- linux-3.1.4/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
55599+++ linux-3.1.4/grsecurity/grsec_mem.c 2011-11-16 18:40:31.000000000 -0500
55600@@ -0,0 +1,33 @@
55601+#include <linux/kernel.h>
55602+#include <linux/sched.h>
55603+#include <linux/mm.h>
55604+#include <linux/mman.h>
55605+#include <linux/grinternal.h>
55606+
55607+void
55608+gr_handle_ioperm(void)
55609+{
55610+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
55611+ return;
55612+}
55613+
55614+void
55615+gr_handle_iopl(void)
55616+{
55617+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
55618+ return;
55619+}
55620+
55621+void
55622+gr_handle_mem_readwrite(u64 from, u64 to)
55623+{
55624+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
55625+ return;
55626+}
55627+
55628+void
55629+gr_handle_vm86(void)
55630+{
55631+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
55632+ return;
55633+}
55634diff -urNp linux-3.1.4/grsecurity/grsec_mount.c linux-3.1.4/grsecurity/grsec_mount.c
55635--- linux-3.1.4/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
55636+++ linux-3.1.4/grsecurity/grsec_mount.c 2011-11-16 18:40:31.000000000 -0500
55637@@ -0,0 +1,62 @@
55638+#include <linux/kernel.h>
55639+#include <linux/sched.h>
55640+#include <linux/mount.h>
55641+#include <linux/grsecurity.h>
55642+#include <linux/grinternal.h>
55643+
55644+void
55645+gr_log_remount(const char *devname, const int retval)
55646+{
55647+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55648+ if (grsec_enable_mount && (retval >= 0))
55649+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
55650+#endif
55651+ return;
55652+}
55653+
55654+void
55655+gr_log_unmount(const char *devname, const int retval)
55656+{
55657+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55658+ if (grsec_enable_mount && (retval >= 0))
55659+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
55660+#endif
55661+ return;
55662+}
55663+
55664+void
55665+gr_log_mount(const char *from, const char *to, const int retval)
55666+{
55667+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55668+ if (grsec_enable_mount && (retval >= 0))
55669+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
55670+#endif
55671+ return;
55672+}
55673+
55674+int
55675+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
55676+{
55677+#ifdef CONFIG_GRKERNSEC_ROFS
55678+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
55679+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
55680+ return -EPERM;
55681+ } else
55682+ return 0;
55683+#endif
55684+ return 0;
55685+}
55686+
55687+int
55688+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
55689+{
55690+#ifdef CONFIG_GRKERNSEC_ROFS
55691+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
55692+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
55693+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
55694+ return -EPERM;
55695+ } else
55696+ return 0;
55697+#endif
55698+ return 0;
55699+}
55700diff -urNp linux-3.1.4/grsecurity/grsec_pax.c linux-3.1.4/grsecurity/grsec_pax.c
55701--- linux-3.1.4/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
55702+++ linux-3.1.4/grsecurity/grsec_pax.c 2011-11-16 18:40:31.000000000 -0500
55703@@ -0,0 +1,36 @@
55704+#include <linux/kernel.h>
55705+#include <linux/sched.h>
55706+#include <linux/mm.h>
55707+#include <linux/file.h>
55708+#include <linux/grinternal.h>
55709+#include <linux/grsecurity.h>
55710+
55711+void
55712+gr_log_textrel(struct vm_area_struct * vma)
55713+{
55714+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55715+ if (grsec_enable_audit_textrel)
55716+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
55717+#endif
55718+ return;
55719+}
55720+
55721+void
55722+gr_log_rwxmmap(struct file *file)
55723+{
55724+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55725+ if (grsec_enable_log_rwxmaps)
55726+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
55727+#endif
55728+ return;
55729+}
55730+
55731+void
55732+gr_log_rwxmprotect(struct file *file)
55733+{
55734+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55735+ if (grsec_enable_log_rwxmaps)
55736+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
55737+#endif
55738+ return;
55739+}
55740diff -urNp linux-3.1.4/grsecurity/grsec_ptrace.c linux-3.1.4/grsecurity/grsec_ptrace.c
55741--- linux-3.1.4/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
55742+++ linux-3.1.4/grsecurity/grsec_ptrace.c 2011-11-16 18:40:31.000000000 -0500
55743@@ -0,0 +1,14 @@
55744+#include <linux/kernel.h>
55745+#include <linux/sched.h>
55746+#include <linux/grinternal.h>
55747+#include <linux/grsecurity.h>
55748+
55749+void
55750+gr_audit_ptrace(struct task_struct *task)
55751+{
55752+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55753+ if (grsec_enable_audit_ptrace)
55754+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
55755+#endif
55756+ return;
55757+}
55758diff -urNp linux-3.1.4/grsecurity/grsec_sig.c linux-3.1.4/grsecurity/grsec_sig.c
55759--- linux-3.1.4/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
55760+++ linux-3.1.4/grsecurity/grsec_sig.c 2011-11-16 18:40:31.000000000 -0500
55761@@ -0,0 +1,206 @@
55762+#include <linux/kernel.h>
55763+#include <linux/sched.h>
55764+#include <linux/delay.h>
55765+#include <linux/grsecurity.h>
55766+#include <linux/grinternal.h>
55767+#include <linux/hardirq.h>
55768+
55769+char *signames[] = {
55770+ [SIGSEGV] = "Segmentation fault",
55771+ [SIGILL] = "Illegal instruction",
55772+ [SIGABRT] = "Abort",
55773+ [SIGBUS] = "Invalid alignment/Bus error"
55774+};
55775+
55776+void
55777+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
55778+{
55779+#ifdef CONFIG_GRKERNSEC_SIGNAL
55780+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
55781+ (sig == SIGABRT) || (sig == SIGBUS))) {
55782+ if (t->pid == current->pid) {
55783+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
55784+ } else {
55785+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
55786+ }
55787+ }
55788+#endif
55789+ return;
55790+}
55791+
55792+int
55793+gr_handle_signal(const struct task_struct *p, const int sig)
55794+{
55795+#ifdef CONFIG_GRKERNSEC
55796+ if (current->pid > 1 && gr_check_protected_task(p)) {
55797+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
55798+ return -EPERM;
55799+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
55800+ return -EPERM;
55801+ }
55802+#endif
55803+ return 0;
55804+}
55805+
55806+#ifdef CONFIG_GRKERNSEC
55807+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
55808+
55809+int gr_fake_force_sig(int sig, struct task_struct *t)
55810+{
55811+ unsigned long int flags;
55812+ int ret, blocked, ignored;
55813+ struct k_sigaction *action;
55814+
55815+ spin_lock_irqsave(&t->sighand->siglock, flags);
55816+ action = &t->sighand->action[sig-1];
55817+ ignored = action->sa.sa_handler == SIG_IGN;
55818+ blocked = sigismember(&t->blocked, sig);
55819+ if (blocked || ignored) {
55820+ action->sa.sa_handler = SIG_DFL;
55821+ if (blocked) {
55822+ sigdelset(&t->blocked, sig);
55823+ recalc_sigpending_and_wake(t);
55824+ }
55825+ }
55826+ if (action->sa.sa_handler == SIG_DFL)
55827+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
55828+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
55829+
55830+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
55831+
55832+ return ret;
55833+}
55834+#endif
55835+
55836+#ifdef CONFIG_GRKERNSEC_BRUTE
55837+#define GR_USER_BAN_TIME (15 * 60)
55838+
55839+static int __get_dumpable(unsigned long mm_flags)
55840+{
55841+ int ret;
55842+
55843+ ret = mm_flags & MMF_DUMPABLE_MASK;
55844+ return (ret >= 2) ? 2 : ret;
55845+}
55846+#endif
55847+
55848+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
55849+{
55850+#ifdef CONFIG_GRKERNSEC_BRUTE
55851+ uid_t uid = 0;
55852+
55853+ if (!grsec_enable_brute)
55854+ return;
55855+
55856+ rcu_read_lock();
55857+ read_lock(&tasklist_lock);
55858+ read_lock(&grsec_exec_file_lock);
55859+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
55860+ p->real_parent->brute = 1;
55861+ else {
55862+ const struct cred *cred = __task_cred(p), *cred2;
55863+ struct task_struct *tsk, *tsk2;
55864+
55865+ if (!__get_dumpable(mm_flags) && cred->uid) {
55866+ struct user_struct *user;
55867+
55868+ uid = cred->uid;
55869+
55870+ /* this is put upon execution past expiration */
55871+ user = find_user(uid);
55872+ if (user == NULL)
55873+ goto unlock;
55874+ user->banned = 1;
55875+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
55876+ if (user->ban_expires == ~0UL)
55877+ user->ban_expires--;
55878+
55879+ do_each_thread(tsk2, tsk) {
55880+ cred2 = __task_cred(tsk);
55881+ if (tsk != p && cred2->uid == uid)
55882+ gr_fake_force_sig(SIGKILL, tsk);
55883+ } while_each_thread(tsk2, tsk);
55884+ }
55885+ }
55886+unlock:
55887+ read_unlock(&grsec_exec_file_lock);
55888+ read_unlock(&tasklist_lock);
55889+ rcu_read_unlock();
55890+
55891+ if (uid)
55892+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
55893+
55894+#endif
55895+ return;
55896+}
55897+
55898+void gr_handle_brute_check(void)
55899+{
55900+#ifdef CONFIG_GRKERNSEC_BRUTE
55901+ if (current->brute)
55902+ msleep(30 * 1000);
55903+#endif
55904+ return;
55905+}
55906+
55907+void gr_handle_kernel_exploit(void)
55908+{
55909+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
55910+ const struct cred *cred;
55911+ struct task_struct *tsk, *tsk2;
55912+ struct user_struct *user;
55913+ uid_t uid;
55914+
55915+ if (in_irq() || in_serving_softirq() || in_nmi())
55916+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
55917+
55918+ uid = current_uid();
55919+
55920+ if (uid == 0)
55921+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
55922+ else {
55923+ /* kill all the processes of this user, hold a reference
55924+ to their creds struct, and prevent them from creating
55925+ another process until system reset
55926+ */
55927+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
55928+ /* we intentionally leak this ref */
55929+ user = get_uid(current->cred->user);
55930+ if (user) {
55931+ user->banned = 1;
55932+ user->ban_expires = ~0UL;
55933+ }
55934+
55935+ read_lock(&tasklist_lock);
55936+ do_each_thread(tsk2, tsk) {
55937+ cred = __task_cred(tsk);
55938+ if (cred->uid == uid)
55939+ gr_fake_force_sig(SIGKILL, tsk);
55940+ } while_each_thread(tsk2, tsk);
55941+ read_unlock(&tasklist_lock);
55942+ }
55943+#endif
55944+}
55945+
55946+int __gr_process_user_ban(struct user_struct *user)
55947+{
55948+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
55949+ if (unlikely(user->banned)) {
55950+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
55951+ user->banned = 0;
55952+ user->ban_expires = 0;
55953+ free_uid(user);
55954+ } else
55955+ return -EPERM;
55956+ }
55957+#endif
55958+ return 0;
55959+}
55960+
55961+int gr_process_user_ban(void)
55962+{
55963+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
55964+ return __gr_process_user_ban(current->cred->user);
55965+#endif
55966+ return 0;
55967+}
55968diff -urNp linux-3.1.4/grsecurity/grsec_sock.c linux-3.1.4/grsecurity/grsec_sock.c
55969--- linux-3.1.4/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
55970+++ linux-3.1.4/grsecurity/grsec_sock.c 2011-11-16 18:40:31.000000000 -0500
55971@@ -0,0 +1,244 @@
55972+#include <linux/kernel.h>
55973+#include <linux/module.h>
55974+#include <linux/sched.h>
55975+#include <linux/file.h>
55976+#include <linux/net.h>
55977+#include <linux/in.h>
55978+#include <linux/ip.h>
55979+#include <net/sock.h>
55980+#include <net/inet_sock.h>
55981+#include <linux/grsecurity.h>
55982+#include <linux/grinternal.h>
55983+#include <linux/gracl.h>
55984+
55985+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
55986+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
55987+
55988+EXPORT_SYMBOL(gr_search_udp_recvmsg);
55989+EXPORT_SYMBOL(gr_search_udp_sendmsg);
55990+
55991+#ifdef CONFIG_UNIX_MODULE
55992+EXPORT_SYMBOL(gr_acl_handle_unix);
55993+EXPORT_SYMBOL(gr_acl_handle_mknod);
55994+EXPORT_SYMBOL(gr_handle_chroot_unix);
55995+EXPORT_SYMBOL(gr_handle_create);
55996+#endif
55997+
55998+#ifdef CONFIG_GRKERNSEC
55999+#define gr_conn_table_size 32749
56000+struct conn_table_entry {
56001+ struct conn_table_entry *next;
56002+ struct signal_struct *sig;
56003+};
56004+
56005+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
56006+DEFINE_SPINLOCK(gr_conn_table_lock);
56007+
56008+extern const char * gr_socktype_to_name(unsigned char type);
56009+extern const char * gr_proto_to_name(unsigned char proto);
56010+extern const char * gr_sockfamily_to_name(unsigned char family);
56011+
56012+static __inline__ int
56013+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
56014+{
56015+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
56016+}
56017+
56018+static __inline__ int
56019+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
56020+ __u16 sport, __u16 dport)
56021+{
56022+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
56023+ sig->gr_sport == sport && sig->gr_dport == dport))
56024+ return 1;
56025+ else
56026+ return 0;
56027+}
56028+
56029+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
56030+{
56031+ struct conn_table_entry **match;
56032+ unsigned int index;
56033+
56034+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56035+ sig->gr_sport, sig->gr_dport,
56036+ gr_conn_table_size);
56037+
56038+ newent->sig = sig;
56039+
56040+ match = &gr_conn_table[index];
56041+ newent->next = *match;
56042+ *match = newent;
56043+
56044+ return;
56045+}
56046+
56047+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
56048+{
56049+ struct conn_table_entry *match, *last = NULL;
56050+ unsigned int index;
56051+
56052+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56053+ sig->gr_sport, sig->gr_dport,
56054+ gr_conn_table_size);
56055+
56056+ match = gr_conn_table[index];
56057+ while (match && !conn_match(match->sig,
56058+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
56059+ sig->gr_dport)) {
56060+ last = match;
56061+ match = match->next;
56062+ }
56063+
56064+ if (match) {
56065+ if (last)
56066+ last->next = match->next;
56067+ else
56068+ gr_conn_table[index] = NULL;
56069+ kfree(match);
56070+ }
56071+
56072+ return;
56073+}
56074+
56075+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
56076+ __u16 sport, __u16 dport)
56077+{
56078+ struct conn_table_entry *match;
56079+ unsigned int index;
56080+
56081+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
56082+
56083+ match = gr_conn_table[index];
56084+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
56085+ match = match->next;
56086+
56087+ if (match)
56088+ return match->sig;
56089+ else
56090+ return NULL;
56091+}
56092+
56093+#endif
56094+
56095+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
56096+{
56097+#ifdef CONFIG_GRKERNSEC
56098+ struct signal_struct *sig = task->signal;
56099+ struct conn_table_entry *newent;
56100+
56101+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
56102+ if (newent == NULL)
56103+ return;
56104+ /* no bh lock needed since we are called with bh disabled */
56105+ spin_lock(&gr_conn_table_lock);
56106+ gr_del_task_from_ip_table_nolock(sig);
56107+ sig->gr_saddr = inet->inet_rcv_saddr;
56108+ sig->gr_daddr = inet->inet_daddr;
56109+ sig->gr_sport = inet->inet_sport;
56110+ sig->gr_dport = inet->inet_dport;
56111+ gr_add_to_task_ip_table_nolock(sig, newent);
56112+ spin_unlock(&gr_conn_table_lock);
56113+#endif
56114+ return;
56115+}
56116+
56117+void gr_del_task_from_ip_table(struct task_struct *task)
56118+{
56119+#ifdef CONFIG_GRKERNSEC
56120+ spin_lock_bh(&gr_conn_table_lock);
56121+ gr_del_task_from_ip_table_nolock(task->signal);
56122+ spin_unlock_bh(&gr_conn_table_lock);
56123+#endif
56124+ return;
56125+}
56126+
56127+void
56128+gr_attach_curr_ip(const struct sock *sk)
56129+{
56130+#ifdef CONFIG_GRKERNSEC
56131+ struct signal_struct *p, *set;
56132+ const struct inet_sock *inet = inet_sk(sk);
56133+
56134+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
56135+ return;
56136+
56137+ set = current->signal;
56138+
56139+ spin_lock_bh(&gr_conn_table_lock);
56140+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
56141+ inet->inet_dport, inet->inet_sport);
56142+ if (unlikely(p != NULL)) {
56143+ set->curr_ip = p->curr_ip;
56144+ set->used_accept = 1;
56145+ gr_del_task_from_ip_table_nolock(p);
56146+ spin_unlock_bh(&gr_conn_table_lock);
56147+ return;
56148+ }
56149+ spin_unlock_bh(&gr_conn_table_lock);
56150+
56151+ set->curr_ip = inet->inet_daddr;
56152+ set->used_accept = 1;
56153+#endif
56154+ return;
56155+}
56156+
56157+int
56158+gr_handle_sock_all(const int family, const int type, const int protocol)
56159+{
56160+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56161+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
56162+ (family != AF_UNIX)) {
56163+ if (family == AF_INET)
56164+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
56165+ else
56166+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
56167+ return -EACCES;
56168+ }
56169+#endif
56170+ return 0;
56171+}
56172+
56173+int
56174+gr_handle_sock_server(const struct sockaddr *sck)
56175+{
56176+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56177+ if (grsec_enable_socket_server &&
56178+ in_group_p(grsec_socket_server_gid) &&
56179+ sck && (sck->sa_family != AF_UNIX) &&
56180+ (sck->sa_family != AF_LOCAL)) {
56181+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56182+ return -EACCES;
56183+ }
56184+#endif
56185+ return 0;
56186+}
56187+
56188+int
56189+gr_handle_sock_server_other(const struct sock *sck)
56190+{
56191+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56192+ if (grsec_enable_socket_server &&
56193+ in_group_p(grsec_socket_server_gid) &&
56194+ sck && (sck->sk_family != AF_UNIX) &&
56195+ (sck->sk_family != AF_LOCAL)) {
56196+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56197+ return -EACCES;
56198+ }
56199+#endif
56200+ return 0;
56201+}
56202+
56203+int
56204+gr_handle_sock_client(const struct sockaddr *sck)
56205+{
56206+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56207+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
56208+ sck && (sck->sa_family != AF_UNIX) &&
56209+ (sck->sa_family != AF_LOCAL)) {
56210+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
56211+ return -EACCES;
56212+ }
56213+#endif
56214+ return 0;
56215+}
56216diff -urNp linux-3.1.4/grsecurity/grsec_sysctl.c linux-3.1.4/grsecurity/grsec_sysctl.c
56217--- linux-3.1.4/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
56218+++ linux-3.1.4/grsecurity/grsec_sysctl.c 2011-11-16 18:40:31.000000000 -0500
56219@@ -0,0 +1,433 @@
56220+#include <linux/kernel.h>
56221+#include <linux/sched.h>
56222+#include <linux/sysctl.h>
56223+#include <linux/grsecurity.h>
56224+#include <linux/grinternal.h>
56225+
56226+int
56227+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
56228+{
56229+#ifdef CONFIG_GRKERNSEC_SYSCTL
56230+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
56231+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
56232+ return -EACCES;
56233+ }
56234+#endif
56235+ return 0;
56236+}
56237+
56238+#ifdef CONFIG_GRKERNSEC_ROFS
56239+static int __maybe_unused one = 1;
56240+#endif
56241+
56242+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
56243+struct ctl_table grsecurity_table[] = {
56244+#ifdef CONFIG_GRKERNSEC_SYSCTL
56245+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
56246+#ifdef CONFIG_GRKERNSEC_IO
56247+ {
56248+ .procname = "disable_priv_io",
56249+ .data = &grsec_disable_privio,
56250+ .maxlen = sizeof(int),
56251+ .mode = 0600,
56252+ .proc_handler = &proc_dointvec,
56253+ },
56254+#endif
56255+#endif
56256+#ifdef CONFIG_GRKERNSEC_LINK
56257+ {
56258+ .procname = "linking_restrictions",
56259+ .data = &grsec_enable_link,
56260+ .maxlen = sizeof(int),
56261+ .mode = 0600,
56262+ .proc_handler = &proc_dointvec,
56263+ },
56264+#endif
56265+#ifdef CONFIG_GRKERNSEC_BRUTE
56266+ {
56267+ .procname = "deter_bruteforce",
56268+ .data = &grsec_enable_brute,
56269+ .maxlen = sizeof(int),
56270+ .mode = 0600,
56271+ .proc_handler = &proc_dointvec,
56272+ },
56273+#endif
56274+#ifdef CONFIG_GRKERNSEC_FIFO
56275+ {
56276+ .procname = "fifo_restrictions",
56277+ .data = &grsec_enable_fifo,
56278+ .maxlen = sizeof(int),
56279+ .mode = 0600,
56280+ .proc_handler = &proc_dointvec,
56281+ },
56282+#endif
56283+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
56284+ {
56285+ .procname = "ip_blackhole",
56286+ .data = &grsec_enable_blackhole,
56287+ .maxlen = sizeof(int),
56288+ .mode = 0600,
56289+ .proc_handler = &proc_dointvec,
56290+ },
56291+ {
56292+ .procname = "lastack_retries",
56293+ .data = &grsec_lastack_retries,
56294+ .maxlen = sizeof(int),
56295+ .mode = 0600,
56296+ .proc_handler = &proc_dointvec,
56297+ },
56298+#endif
56299+#ifdef CONFIG_GRKERNSEC_EXECLOG
56300+ {
56301+ .procname = "exec_logging",
56302+ .data = &grsec_enable_execlog,
56303+ .maxlen = sizeof(int),
56304+ .mode = 0600,
56305+ .proc_handler = &proc_dointvec,
56306+ },
56307+#endif
56308+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56309+ {
56310+ .procname = "rwxmap_logging",
56311+ .data = &grsec_enable_log_rwxmaps,
56312+ .maxlen = sizeof(int),
56313+ .mode = 0600,
56314+ .proc_handler = &proc_dointvec,
56315+ },
56316+#endif
56317+#ifdef CONFIG_GRKERNSEC_SIGNAL
56318+ {
56319+ .procname = "signal_logging",
56320+ .data = &grsec_enable_signal,
56321+ .maxlen = sizeof(int),
56322+ .mode = 0600,
56323+ .proc_handler = &proc_dointvec,
56324+ },
56325+#endif
56326+#ifdef CONFIG_GRKERNSEC_FORKFAIL
56327+ {
56328+ .procname = "forkfail_logging",
56329+ .data = &grsec_enable_forkfail,
56330+ .maxlen = sizeof(int),
56331+ .mode = 0600,
56332+ .proc_handler = &proc_dointvec,
56333+ },
56334+#endif
56335+#ifdef CONFIG_GRKERNSEC_TIME
56336+ {
56337+ .procname = "timechange_logging",
56338+ .data = &grsec_enable_time,
56339+ .maxlen = sizeof(int),
56340+ .mode = 0600,
56341+ .proc_handler = &proc_dointvec,
56342+ },
56343+#endif
56344+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56345+ {
56346+ .procname = "chroot_deny_shmat",
56347+ .data = &grsec_enable_chroot_shmat,
56348+ .maxlen = sizeof(int),
56349+ .mode = 0600,
56350+ .proc_handler = &proc_dointvec,
56351+ },
56352+#endif
56353+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56354+ {
56355+ .procname = "chroot_deny_unix",
56356+ .data = &grsec_enable_chroot_unix,
56357+ .maxlen = sizeof(int),
56358+ .mode = 0600,
56359+ .proc_handler = &proc_dointvec,
56360+ },
56361+#endif
56362+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56363+ {
56364+ .procname = "chroot_deny_mount",
56365+ .data = &grsec_enable_chroot_mount,
56366+ .maxlen = sizeof(int),
56367+ .mode = 0600,
56368+ .proc_handler = &proc_dointvec,
56369+ },
56370+#endif
56371+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56372+ {
56373+ .procname = "chroot_deny_fchdir",
56374+ .data = &grsec_enable_chroot_fchdir,
56375+ .maxlen = sizeof(int),
56376+ .mode = 0600,
56377+ .proc_handler = &proc_dointvec,
56378+ },
56379+#endif
56380+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56381+ {
56382+ .procname = "chroot_deny_chroot",
56383+ .data = &grsec_enable_chroot_double,
56384+ .maxlen = sizeof(int),
56385+ .mode = 0600,
56386+ .proc_handler = &proc_dointvec,
56387+ },
56388+#endif
56389+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56390+ {
56391+ .procname = "chroot_deny_pivot",
56392+ .data = &grsec_enable_chroot_pivot,
56393+ .maxlen = sizeof(int),
56394+ .mode = 0600,
56395+ .proc_handler = &proc_dointvec,
56396+ },
56397+#endif
56398+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56399+ {
56400+ .procname = "chroot_enforce_chdir",
56401+ .data = &grsec_enable_chroot_chdir,
56402+ .maxlen = sizeof(int),
56403+ .mode = 0600,
56404+ .proc_handler = &proc_dointvec,
56405+ },
56406+#endif
56407+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56408+ {
56409+ .procname = "chroot_deny_chmod",
56410+ .data = &grsec_enable_chroot_chmod,
56411+ .maxlen = sizeof(int),
56412+ .mode = 0600,
56413+ .proc_handler = &proc_dointvec,
56414+ },
56415+#endif
56416+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56417+ {
56418+ .procname = "chroot_deny_mknod",
56419+ .data = &grsec_enable_chroot_mknod,
56420+ .maxlen = sizeof(int),
56421+ .mode = 0600,
56422+ .proc_handler = &proc_dointvec,
56423+ },
56424+#endif
56425+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56426+ {
56427+ .procname = "chroot_restrict_nice",
56428+ .data = &grsec_enable_chroot_nice,
56429+ .maxlen = sizeof(int),
56430+ .mode = 0600,
56431+ .proc_handler = &proc_dointvec,
56432+ },
56433+#endif
56434+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56435+ {
56436+ .procname = "chroot_execlog",
56437+ .data = &grsec_enable_chroot_execlog,
56438+ .maxlen = sizeof(int),
56439+ .mode = 0600,
56440+ .proc_handler = &proc_dointvec,
56441+ },
56442+#endif
56443+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56444+ {
56445+ .procname = "chroot_caps",
56446+ .data = &grsec_enable_chroot_caps,
56447+ .maxlen = sizeof(int),
56448+ .mode = 0600,
56449+ .proc_handler = &proc_dointvec,
56450+ },
56451+#endif
56452+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56453+ {
56454+ .procname = "chroot_deny_sysctl",
56455+ .data = &grsec_enable_chroot_sysctl,
56456+ .maxlen = sizeof(int),
56457+ .mode = 0600,
56458+ .proc_handler = &proc_dointvec,
56459+ },
56460+#endif
56461+#ifdef CONFIG_GRKERNSEC_TPE
56462+ {
56463+ .procname = "tpe",
56464+ .data = &grsec_enable_tpe,
56465+ .maxlen = sizeof(int),
56466+ .mode = 0600,
56467+ .proc_handler = &proc_dointvec,
56468+ },
56469+ {
56470+ .procname = "tpe_gid",
56471+ .data = &grsec_tpe_gid,
56472+ .maxlen = sizeof(int),
56473+ .mode = 0600,
56474+ .proc_handler = &proc_dointvec,
56475+ },
56476+#endif
56477+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56478+ {
56479+ .procname = "tpe_invert",
56480+ .data = &grsec_enable_tpe_invert,
56481+ .maxlen = sizeof(int),
56482+ .mode = 0600,
56483+ .proc_handler = &proc_dointvec,
56484+ },
56485+#endif
56486+#ifdef CONFIG_GRKERNSEC_TPE_ALL
56487+ {
56488+ .procname = "tpe_restrict_all",
56489+ .data = &grsec_enable_tpe_all,
56490+ .maxlen = sizeof(int),
56491+ .mode = 0600,
56492+ .proc_handler = &proc_dointvec,
56493+ },
56494+#endif
56495+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56496+ {
56497+ .procname = "socket_all",
56498+ .data = &grsec_enable_socket_all,
56499+ .maxlen = sizeof(int),
56500+ .mode = 0600,
56501+ .proc_handler = &proc_dointvec,
56502+ },
56503+ {
56504+ .procname = "socket_all_gid",
56505+ .data = &grsec_socket_all_gid,
56506+ .maxlen = sizeof(int),
56507+ .mode = 0600,
56508+ .proc_handler = &proc_dointvec,
56509+ },
56510+#endif
56511+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56512+ {
56513+ .procname = "socket_client",
56514+ .data = &grsec_enable_socket_client,
56515+ .maxlen = sizeof(int),
56516+ .mode = 0600,
56517+ .proc_handler = &proc_dointvec,
56518+ },
56519+ {
56520+ .procname = "socket_client_gid",
56521+ .data = &grsec_socket_client_gid,
56522+ .maxlen = sizeof(int),
56523+ .mode = 0600,
56524+ .proc_handler = &proc_dointvec,
56525+ },
56526+#endif
56527+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56528+ {
56529+ .procname = "socket_server",
56530+ .data = &grsec_enable_socket_server,
56531+ .maxlen = sizeof(int),
56532+ .mode = 0600,
56533+ .proc_handler = &proc_dointvec,
56534+ },
56535+ {
56536+ .procname = "socket_server_gid",
56537+ .data = &grsec_socket_server_gid,
56538+ .maxlen = sizeof(int),
56539+ .mode = 0600,
56540+ .proc_handler = &proc_dointvec,
56541+ },
56542+#endif
56543+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
56544+ {
56545+ .procname = "audit_group",
56546+ .data = &grsec_enable_group,
56547+ .maxlen = sizeof(int),
56548+ .mode = 0600,
56549+ .proc_handler = &proc_dointvec,
56550+ },
56551+ {
56552+ .procname = "audit_gid",
56553+ .data = &grsec_audit_gid,
56554+ .maxlen = sizeof(int),
56555+ .mode = 0600,
56556+ .proc_handler = &proc_dointvec,
56557+ },
56558+#endif
56559+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56560+ {
56561+ .procname = "audit_chdir",
56562+ .data = &grsec_enable_chdir,
56563+ .maxlen = sizeof(int),
56564+ .mode = 0600,
56565+ .proc_handler = &proc_dointvec,
56566+ },
56567+#endif
56568+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56569+ {
56570+ .procname = "audit_mount",
56571+ .data = &grsec_enable_mount,
56572+ .maxlen = sizeof(int),
56573+ .mode = 0600,
56574+ .proc_handler = &proc_dointvec,
56575+ },
56576+#endif
56577+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
56578+ {
56579+ .procname = "audit_textrel",
56580+ .data = &grsec_enable_audit_textrel,
56581+ .maxlen = sizeof(int),
56582+ .mode = 0600,
56583+ .proc_handler = &proc_dointvec,
56584+ },
56585+#endif
56586+#ifdef CONFIG_GRKERNSEC_DMESG
56587+ {
56588+ .procname = "dmesg",
56589+ .data = &grsec_enable_dmesg,
56590+ .maxlen = sizeof(int),
56591+ .mode = 0600,
56592+ .proc_handler = &proc_dointvec,
56593+ },
56594+#endif
56595+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56596+ {
56597+ .procname = "chroot_findtask",
56598+ .data = &grsec_enable_chroot_findtask,
56599+ .maxlen = sizeof(int),
56600+ .mode = 0600,
56601+ .proc_handler = &proc_dointvec,
56602+ },
56603+#endif
56604+#ifdef CONFIG_GRKERNSEC_RESLOG
56605+ {
56606+ .procname = "resource_logging",
56607+ .data = &grsec_resource_logging,
56608+ .maxlen = sizeof(int),
56609+ .mode = 0600,
56610+ .proc_handler = &proc_dointvec,
56611+ },
56612+#endif
56613+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
56614+ {
56615+ .procname = "audit_ptrace",
56616+ .data = &grsec_enable_audit_ptrace,
56617+ .maxlen = sizeof(int),
56618+ .mode = 0600,
56619+ .proc_handler = &proc_dointvec,
56620+ },
56621+#endif
56622+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
56623+ {
56624+ .procname = "harden_ptrace",
56625+ .data = &grsec_enable_harden_ptrace,
56626+ .maxlen = sizeof(int),
56627+ .mode = 0600,
56628+ .proc_handler = &proc_dointvec,
56629+ },
56630+#endif
56631+ {
56632+ .procname = "grsec_lock",
56633+ .data = &grsec_lock,
56634+ .maxlen = sizeof(int),
56635+ .mode = 0600,
56636+ .proc_handler = &proc_dointvec,
56637+ },
56638+#endif
56639+#ifdef CONFIG_GRKERNSEC_ROFS
56640+ {
56641+ .procname = "romount_protect",
56642+ .data = &grsec_enable_rofs,
56643+ .maxlen = sizeof(int),
56644+ .mode = 0600,
56645+ .proc_handler = &proc_dointvec_minmax,
56646+ .extra1 = &one,
56647+ .extra2 = &one,
56648+ },
56649+#endif
56650+ { }
56651+};
56652+#endif
56653diff -urNp linux-3.1.4/grsecurity/grsec_time.c linux-3.1.4/grsecurity/grsec_time.c
56654--- linux-3.1.4/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
56655+++ linux-3.1.4/grsecurity/grsec_time.c 2011-11-16 18:40:31.000000000 -0500
56656@@ -0,0 +1,16 @@
56657+#include <linux/kernel.h>
56658+#include <linux/sched.h>
56659+#include <linux/grinternal.h>
56660+#include <linux/module.h>
56661+
56662+void
56663+gr_log_timechange(void)
56664+{
56665+#ifdef CONFIG_GRKERNSEC_TIME
56666+ if (grsec_enable_time)
56667+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
56668+#endif
56669+ return;
56670+}
56671+
56672+EXPORT_SYMBOL(gr_log_timechange);
56673diff -urNp linux-3.1.4/grsecurity/grsec_tpe.c linux-3.1.4/grsecurity/grsec_tpe.c
56674--- linux-3.1.4/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
56675+++ linux-3.1.4/grsecurity/grsec_tpe.c 2011-11-16 18:40:31.000000000 -0500
56676@@ -0,0 +1,39 @@
56677+#include <linux/kernel.h>
56678+#include <linux/sched.h>
56679+#include <linux/file.h>
56680+#include <linux/fs.h>
56681+#include <linux/grinternal.h>
56682+
56683+extern int gr_acl_tpe_check(void);
56684+
56685+int
56686+gr_tpe_allow(const struct file *file)
56687+{
56688+#ifdef CONFIG_GRKERNSEC
56689+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
56690+ const struct cred *cred = current_cred();
56691+
56692+ if (cred->uid && ((grsec_enable_tpe &&
56693+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56694+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
56695+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
56696+#else
56697+ in_group_p(grsec_tpe_gid)
56698+#endif
56699+ ) || gr_acl_tpe_check()) &&
56700+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
56701+ (inode->i_mode & S_IWOTH))))) {
56702+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
56703+ return 0;
56704+ }
56705+#ifdef CONFIG_GRKERNSEC_TPE_ALL
56706+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
56707+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
56708+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
56709+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
56710+ return 0;
56711+ }
56712+#endif
56713+#endif
56714+ return 1;
56715+}
56716diff -urNp linux-3.1.4/grsecurity/grsum.c linux-3.1.4/grsecurity/grsum.c
56717--- linux-3.1.4/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
56718+++ linux-3.1.4/grsecurity/grsum.c 2011-11-16 18:40:31.000000000 -0500
56719@@ -0,0 +1,61 @@
56720+#include <linux/err.h>
56721+#include <linux/kernel.h>
56722+#include <linux/sched.h>
56723+#include <linux/mm.h>
56724+#include <linux/scatterlist.h>
56725+#include <linux/crypto.h>
56726+#include <linux/gracl.h>
56727+
56728+
56729+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
56730+#error "crypto and sha256 must be built into the kernel"
56731+#endif
56732+
56733+int
56734+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
56735+{
56736+ char *p;
56737+ struct crypto_hash *tfm;
56738+ struct hash_desc desc;
56739+ struct scatterlist sg;
56740+ unsigned char temp_sum[GR_SHA_LEN];
56741+ volatile int retval = 0;
56742+ volatile int dummy = 0;
56743+ unsigned int i;
56744+
56745+ sg_init_table(&sg, 1);
56746+
56747+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
56748+ if (IS_ERR(tfm)) {
56749+ /* should never happen, since sha256 should be built in */
56750+ return 1;
56751+ }
56752+
56753+ desc.tfm = tfm;
56754+ desc.flags = 0;
56755+
56756+ crypto_hash_init(&desc);
56757+
56758+ p = salt;
56759+ sg_set_buf(&sg, p, GR_SALT_LEN);
56760+ crypto_hash_update(&desc, &sg, sg.length);
56761+
56762+ p = entry->pw;
56763+ sg_set_buf(&sg, p, strlen(p));
56764+
56765+ crypto_hash_update(&desc, &sg, sg.length);
56766+
56767+ crypto_hash_final(&desc, temp_sum);
56768+
56769+ memset(entry->pw, 0, GR_PW_LEN);
56770+
56771+ for (i = 0; i < GR_SHA_LEN; i++)
56772+ if (sum[i] != temp_sum[i])
56773+ retval = 1;
56774+ else
56775+ dummy = 1; // waste a cycle
56776+
56777+ crypto_free_hash(tfm);
56778+
56779+ return retval;
56780+}
56781diff -urNp linux-3.1.4/grsecurity/Kconfig linux-3.1.4/grsecurity/Kconfig
56782--- linux-3.1.4/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
56783+++ linux-3.1.4/grsecurity/Kconfig 2011-11-16 18:40:31.000000000 -0500
56784@@ -0,0 +1,1037 @@
56785+#
56786+# grecurity configuration
56787+#
56788+
56789+menu "Grsecurity"
56790+
56791+config GRKERNSEC
56792+ bool "Grsecurity"
56793+ select CRYPTO
56794+ select CRYPTO_SHA256
56795+ help
56796+ If you say Y here, you will be able to configure many features
56797+ that will enhance the security of your system. It is highly
56798+ recommended that you say Y here and read through the help
56799+ for each option so that you fully understand the features and
56800+ can evaluate their usefulness for your machine.
56801+
56802+choice
56803+ prompt "Security Level"
56804+ depends on GRKERNSEC
56805+ default GRKERNSEC_CUSTOM
56806+
56807+config GRKERNSEC_LOW
56808+ bool "Low"
56809+ select GRKERNSEC_LINK
56810+ select GRKERNSEC_FIFO
56811+ select GRKERNSEC_RANDNET
56812+ select GRKERNSEC_DMESG
56813+ select GRKERNSEC_CHROOT
56814+ select GRKERNSEC_CHROOT_CHDIR
56815+
56816+ help
56817+ If you choose this option, several of the grsecurity options will
56818+ be enabled that will give you greater protection against a number
56819+ of attacks, while assuring that none of your software will have any
56820+ conflicts with the additional security measures. If you run a lot
56821+ of unusual software, or you are having problems with the higher
56822+ security levels, you should say Y here. With this option, the
56823+ following features are enabled:
56824+
56825+ - Linking restrictions
56826+ - FIFO restrictions
56827+ - Restricted dmesg
56828+ - Enforced chdir("/") on chroot
56829+ - Runtime module disabling
56830+
56831+config GRKERNSEC_MEDIUM
56832+ bool "Medium"
56833+ select PAX
56834+ select PAX_EI_PAX
56835+ select PAX_PT_PAX_FLAGS
56836+ select PAX_HAVE_ACL_FLAGS
56837+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
56838+ select GRKERNSEC_CHROOT
56839+ select GRKERNSEC_CHROOT_SYSCTL
56840+ select GRKERNSEC_LINK
56841+ select GRKERNSEC_FIFO
56842+ select GRKERNSEC_DMESG
56843+ select GRKERNSEC_RANDNET
56844+ select GRKERNSEC_FORKFAIL
56845+ select GRKERNSEC_TIME
56846+ select GRKERNSEC_SIGNAL
56847+ select GRKERNSEC_CHROOT
56848+ select GRKERNSEC_CHROOT_UNIX
56849+ select GRKERNSEC_CHROOT_MOUNT
56850+ select GRKERNSEC_CHROOT_PIVOT
56851+ select GRKERNSEC_CHROOT_DOUBLE
56852+ select GRKERNSEC_CHROOT_CHDIR
56853+ select GRKERNSEC_CHROOT_MKNOD
56854+ select GRKERNSEC_PROC
56855+ select GRKERNSEC_PROC_USERGROUP
56856+ select PAX_RANDUSTACK
56857+ select PAX_ASLR
56858+ select PAX_RANDMMAP
56859+ select PAX_REFCOUNT if (X86 || SPARC64)
56860+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
56861+
56862+ help
56863+ If you say Y here, several features in addition to those included
56864+ in the low additional security level will be enabled. These
56865+ features provide even more security to your system, though in rare
56866+ cases they may be incompatible with very old or poorly written
56867+ software. If you enable this option, make sure that your auth
56868+ service (identd) is running as gid 1001. With this option,
56869+ the following features (in addition to those provided in the
56870+ low additional security level) will be enabled:
56871+
56872+ - Failed fork logging
56873+ - Time change logging
56874+ - Signal logging
56875+ - Deny mounts in chroot
56876+ - Deny double chrooting
56877+ - Deny sysctl writes in chroot
56878+ - Deny mknod in chroot
56879+ - Deny access to abstract AF_UNIX sockets out of chroot
56880+ - Deny pivot_root in chroot
56881+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
56882+ - /proc restrictions with special GID set to 10 (usually wheel)
56883+ - Address Space Layout Randomization (ASLR)
56884+ - Prevent exploitation of most refcount overflows
56885+ - Bounds checking of copying between the kernel and userland
56886+
56887+config GRKERNSEC_HIGH
56888+ bool "High"
56889+ select GRKERNSEC_LINK
56890+ select GRKERNSEC_FIFO
56891+ select GRKERNSEC_DMESG
56892+ select GRKERNSEC_FORKFAIL
56893+ select GRKERNSEC_TIME
56894+ select GRKERNSEC_SIGNAL
56895+ select GRKERNSEC_CHROOT
56896+ select GRKERNSEC_CHROOT_SHMAT
56897+ select GRKERNSEC_CHROOT_UNIX
56898+ select GRKERNSEC_CHROOT_MOUNT
56899+ select GRKERNSEC_CHROOT_FCHDIR
56900+ select GRKERNSEC_CHROOT_PIVOT
56901+ select GRKERNSEC_CHROOT_DOUBLE
56902+ select GRKERNSEC_CHROOT_CHDIR
56903+ select GRKERNSEC_CHROOT_MKNOD
56904+ select GRKERNSEC_CHROOT_CAPS
56905+ select GRKERNSEC_CHROOT_SYSCTL
56906+ select GRKERNSEC_CHROOT_FINDTASK
56907+ select GRKERNSEC_SYSFS_RESTRICT
56908+ select GRKERNSEC_PROC
56909+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
56910+ select GRKERNSEC_HIDESYM
56911+ select GRKERNSEC_BRUTE
56912+ select GRKERNSEC_PROC_USERGROUP
56913+ select GRKERNSEC_KMEM
56914+ select GRKERNSEC_RESLOG
56915+ select GRKERNSEC_RANDNET
56916+ select GRKERNSEC_PROC_ADD
56917+ select GRKERNSEC_CHROOT_CHMOD
56918+ select GRKERNSEC_CHROOT_NICE
56919+ select GRKERNSEC_AUDIT_MOUNT
56920+ select GRKERNSEC_MODHARDEN if (MODULES)
56921+ select GRKERNSEC_HARDEN_PTRACE
56922+ select GRKERNSEC_VM86 if (X86_32)
56923+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
56924+ select PAX
56925+ select PAX_RANDUSTACK
56926+ select PAX_ASLR
56927+ select PAX_RANDMMAP
56928+ select PAX_NOEXEC
56929+ select PAX_MPROTECT
56930+ select PAX_EI_PAX
56931+ select PAX_PT_PAX_FLAGS
56932+ select PAX_HAVE_ACL_FLAGS
56933+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
56934+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
56935+ select PAX_RANDKSTACK if (X86_TSC && X86)
56936+ select PAX_SEGMEXEC if (X86_32)
56937+ select PAX_PAGEEXEC
56938+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
56939+ select PAX_EMUTRAMP if (PARISC)
56940+ select PAX_EMUSIGRT if (PARISC)
56941+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
56942+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
56943+ select PAX_REFCOUNT if (X86 || SPARC64)
56944+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
56945+ help
56946+ If you say Y here, many of the features of grsecurity will be
56947+ enabled, which will protect you against many kinds of attacks
56948+ against your system. The heightened security comes at a cost
56949+ of an increased chance of incompatibilities with rare software
56950+ on your machine. Since this security level enables PaX, you should
56951+ view <http://pax.grsecurity.net> and read about the PaX
56952+ project. While you are there, download chpax and run it on
56953+ binaries that cause problems with PaX. Also remember that
56954+ since the /proc restrictions are enabled, you must run your
56955+ identd as gid 1001. This security level enables the following
56956+ features in addition to those listed in the low and medium
56957+ security levels:
56958+
56959+ - Additional /proc restrictions
56960+ - Chmod restrictions in chroot
56961+ - No signals, ptrace, or viewing of processes outside of chroot
56962+ - Capability restrictions in chroot
56963+ - Deny fchdir out of chroot
56964+ - Priority restrictions in chroot
56965+ - Segmentation-based implementation of PaX
56966+ - Mprotect restrictions
56967+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
56968+ - Kernel stack randomization
56969+ - Mount/unmount/remount logging
56970+ - Kernel symbol hiding
56971+ - Hardening of module auto-loading
56972+ - Ptrace restrictions
56973+ - Restricted vm86 mode
56974+ - Restricted sysfs/debugfs
56975+ - Active kernel exploit response
56976+
56977+config GRKERNSEC_CUSTOM
56978+ bool "Custom"
56979+ help
56980+ If you say Y here, you will be able to configure every grsecurity
56981+ option, which allows you to enable many more features that aren't
56982+ covered in the basic security levels. These additional features
56983+ include TPE, socket restrictions, and the sysctl system for
56984+ grsecurity. It is advised that you read through the help for
56985+ each option to determine its usefulness in your situation.
56986+
56987+endchoice
56988+
56989+menu "Address Space Protection"
56990+depends on GRKERNSEC
56991+
56992+config GRKERNSEC_KMEM
56993+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
56994+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
56995+ help
56996+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
56997+ be written to or read from to modify or leak the contents of the running
56998+ kernel. /dev/port will also not be allowed to be opened. If you have module
56999+ support disabled, enabling this will close up four ways that are
57000+ currently used to insert malicious code into the running kernel.
57001+ Even with all these features enabled, we still highly recommend that
57002+ you use the RBAC system, as it is still possible for an attacker to
57003+ modify the running kernel through privileged I/O granted by ioperm/iopl.
57004+ If you are not using XFree86, you may be able to stop this additional
57005+ case by enabling the 'Disable privileged I/O' option. Though nothing
57006+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
57007+ but only to video memory, which is the only writing we allow in this
57008+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
57009+ not be allowed to mprotect it with PROT_WRITE later.
57010+ It is highly recommended that you say Y here if you meet all the
57011+ conditions above.
57012+
57013+config GRKERNSEC_VM86
57014+ bool "Restrict VM86 mode"
57015+ depends on X86_32
57016+
57017+ help
57018+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
57019+ make use of a special execution mode on 32bit x86 processors called
57020+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
57021+ video cards and will still work with this option enabled. The purpose
57022+ of the option is to prevent exploitation of emulation errors in
57023+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
57024+ Nearly all users should be able to enable this option.
57025+
57026+config GRKERNSEC_IO
57027+ bool "Disable privileged I/O"
57028+ depends on X86
57029+ select RTC_CLASS
57030+ select RTC_INTF_DEV
57031+ select RTC_DRV_CMOS
57032+
57033+ help
57034+ If you say Y here, all ioperm and iopl calls will return an error.
57035+ Ioperm and iopl can be used to modify the running kernel.
57036+ Unfortunately, some programs need this access to operate properly,
57037+ the most notable of which are XFree86 and hwclock. hwclock can be
57038+ remedied by having RTC support in the kernel, so real-time
57039+ clock support is enabled if this option is enabled, to ensure
57040+ that hwclock operates correctly. XFree86 still will not
57041+ operate correctly with this option enabled, so DO NOT CHOOSE Y
57042+ IF YOU USE XFree86. If you use XFree86 and you still want to
57043+ protect your kernel against modification, use the RBAC system.
57044+
57045+config GRKERNSEC_PROC_MEMMAP
57046+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
57047+ default y if (PAX_NOEXEC || PAX_ASLR)
57048+ depends on PAX_NOEXEC || PAX_ASLR
57049+ help
57050+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
57051+ give no information about the addresses of its mappings if
57052+ PaX features that rely on random addresses are enabled on the task.
57053+ If you use PaX it is greatly recommended that you say Y here as it
57054+ closes up a hole that makes the full ASLR useless for suid
57055+ binaries.
57056+
57057+config GRKERNSEC_BRUTE
57058+ bool "Deter exploit bruteforcing"
57059+ help
57060+ If you say Y here, attempts to bruteforce exploits against forking
57061+ daemons such as apache or sshd, as well as against suid/sgid binaries
57062+ will be deterred. When a child of a forking daemon is killed by PaX
57063+ or crashes due to an illegal instruction or other suspicious signal,
57064+ the parent process will be delayed 30 seconds upon every subsequent
57065+ fork until the administrator is able to assess the situation and
57066+ restart the daemon.
57067+ In the suid/sgid case, the attempt is logged, the user has all their
57068+ processes terminated, and they are prevented from executing any further
57069+ processes for 15 minutes.
57070+ It is recommended that you also enable signal logging in the auditing
57071+ section so that logs are generated when a process triggers a suspicious
57072+ signal.
57073+ If the sysctl option is enabled, a sysctl option with name
57074+ "deter_bruteforce" is created.
57075+
57076+
57077+config GRKERNSEC_MODHARDEN
57078+ bool "Harden module auto-loading"
57079+ depends on MODULES
57080+ help
57081+ If you say Y here, module auto-loading in response to use of some
57082+ feature implemented by an unloaded module will be restricted to
57083+ root users. Enabling this option helps defend against attacks
57084+ by unprivileged users who abuse the auto-loading behavior to
57085+ cause a vulnerable module to load that is then exploited.
57086+
57087+ If this option prevents a legitimate use of auto-loading for a
57088+ non-root user, the administrator can execute modprobe manually
57089+ with the exact name of the module mentioned in the alert log.
57090+ Alternatively, the administrator can add the module to the list
57091+ of modules loaded at boot by modifying init scripts.
57092+
57093+ Modification of init scripts will most likely be needed on
57094+ Ubuntu servers with encrypted home directory support enabled,
57095+ as the first non-root user logging in will cause the ecb(aes),
57096+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
57097+
57098+config GRKERNSEC_HIDESYM
57099+ bool "Hide kernel symbols"
57100+ help
57101+ If you say Y here, getting information on loaded modules, and
57102+ displaying all kernel symbols through a syscall will be restricted
57103+ to users with CAP_SYS_MODULE. For software compatibility reasons,
57104+ /proc/kallsyms will be restricted to the root user. The RBAC
57105+ system can hide that entry even from root.
57106+
57107+ This option also prevents leaking of kernel addresses through
57108+ several /proc entries.
57109+
57110+ Note that this option is only effective provided the following
57111+ conditions are met:
57112+ 1) The kernel using grsecurity is not precompiled by some distribution
57113+ 2) You have also enabled GRKERNSEC_DMESG
57114+ 3) You are using the RBAC system and hiding other files such as your
57115+ kernel image and System.map. Alternatively, enabling this option
57116+ causes the permissions on /boot, /lib/modules, and the kernel
57117+ source directory to change at compile time to prevent
57118+ reading by non-root users.
57119+ If the above conditions are met, this option will aid in providing a
57120+ useful protection against local kernel exploitation of overflows
57121+ and arbitrary read/write vulnerabilities.
57122+
57123+config GRKERNSEC_KERN_LOCKOUT
57124+ bool "Active kernel exploit response"
57125+ depends on X86 || ARM || PPC || SPARC
57126+ help
57127+ If you say Y here, when a PaX alert is triggered due to suspicious
57128+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
57129+ or an OOPs occurs due to bad memory accesses, instead of just
57130+ terminating the offending process (and potentially allowing
57131+ a subsequent exploit from the same user), we will take one of two
57132+ actions:
57133+ If the user was root, we will panic the system
57134+ If the user was non-root, we will log the attempt, terminate
57135+ all processes owned by the user, then prevent them from creating
57136+ any new processes until the system is restarted
57137+ This deters repeated kernel exploitation/bruteforcing attempts
57138+ and is useful for later forensics.
57139+
57140+endmenu
57141+menu "Role Based Access Control Options"
57142+depends on GRKERNSEC
57143+
57144+config GRKERNSEC_RBAC_DEBUG
57145+ bool
57146+
57147+config GRKERNSEC_NO_RBAC
57148+ bool "Disable RBAC system"
57149+ help
57150+ If you say Y here, the /dev/grsec device will be removed from the kernel,
57151+ preventing the RBAC system from being enabled. You should only say Y
57152+ here if you have no intention of using the RBAC system, so as to prevent
57153+ an attacker with root access from misusing the RBAC system to hide files
57154+ and processes when loadable module support and /dev/[k]mem have been
57155+ locked down.
57156+
57157+config GRKERNSEC_ACL_HIDEKERN
57158+ bool "Hide kernel processes"
57159+ help
57160+ If you say Y here, all kernel threads will be hidden to all
57161+ processes but those whose subject has the "view hidden processes"
57162+ flag.
57163+
57164+config GRKERNSEC_ACL_MAXTRIES
57165+ int "Maximum tries before password lockout"
57166+ default 3
57167+ help
57168+ This option enforces the maximum number of times a user can attempt
57169+ to authorize themselves with the grsecurity RBAC system before being
57170+ denied the ability to attempt authorization again for a specified time.
57171+ The lower the number, the harder it will be to brute-force a password.
57172+
57173+config GRKERNSEC_ACL_TIMEOUT
57174+ int "Time to wait after max password tries, in seconds"
57175+ default 30
57176+ help
57177+ This option specifies the time the user must wait after attempting to
57178+ authorize to the RBAC system with the maximum number of invalid
57179+ passwords. The higher the number, the harder it will be to brute-force
57180+ a password.
57181+
57182+endmenu
57183+menu "Filesystem Protections"
57184+depends on GRKERNSEC
57185+
57186+config GRKERNSEC_PROC
57187+ bool "Proc restrictions"
57188+ help
57189+ If you say Y here, the permissions of the /proc filesystem
57190+ will be altered to enhance system security and privacy. You MUST
57191+ choose either a user only restriction or a user and group restriction.
57192+ Depending upon the option you choose, you can either restrict users to
57193+ see only the processes they themselves run, or choose a group that can
57194+ view all processes and files normally restricted to root if you choose
57195+ the "restrict to user only" option. NOTE: If you're running identd as
57196+ a non-root user, you will have to run it as the group you specify here.
57197+
57198+config GRKERNSEC_PROC_USER
57199+ bool "Restrict /proc to user only"
57200+ depends on GRKERNSEC_PROC
57201+ help
57202+ If you say Y here, non-root users will only be able to view their own
57203+ processes, and restricts them from viewing network-related information,
57204+ and viewing kernel symbol and module information.
57205+
57206+config GRKERNSEC_PROC_USERGROUP
57207+ bool "Allow special group"
57208+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
57209+ help
57210+ If you say Y here, you will be able to select a group that will be
57211+ able to view all processes and network-related information. If you've
57212+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
57213+ remain hidden. This option is useful if you want to run identd as
57214+ a non-root user.
57215+
57216+config GRKERNSEC_PROC_GID
57217+ int "GID for special group"
57218+ depends on GRKERNSEC_PROC_USERGROUP
57219+ default 1001
57220+
57221+config GRKERNSEC_PROC_ADD
57222+ bool "Additional restrictions"
57223+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
57224+ help
57225+ If you say Y here, additional restrictions will be placed on
57226+ /proc that keep normal users from viewing device information and
57227+ slabinfo information that could be useful for exploits.
57228+
57229+config GRKERNSEC_LINK
57230+ bool "Linking restrictions"
57231+ help
57232+ If you say Y here, /tmp race exploits will be prevented, since users
57233+ will no longer be able to follow symlinks owned by other users in
57234+ world-writable +t directories (e.g. /tmp), unless the owner of the
57235+ symlink is the owner of the directory. users will also not be
57236+ able to hardlink to files they do not own. If the sysctl option is
57237+ enabled, a sysctl option with name "linking_restrictions" is created.
57238+
57239+config GRKERNSEC_FIFO
57240+ bool "FIFO restrictions"
57241+ help
57242+ If you say Y here, users will not be able to write to FIFOs they don't
57243+ own in world-writable +t directories (e.g. /tmp), unless the owner of
57244+ the FIFO is the same owner of the directory it's held in. If the sysctl
57245+ option is enabled, a sysctl option with name "fifo_restrictions" is
57246+ created.
57247+
57248+config GRKERNSEC_SYSFS_RESTRICT
57249+ bool "Sysfs/debugfs restriction"
57250+ depends on SYSFS
57251+ help
57252+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
57253+ any filesystem normally mounted under it (e.g. debugfs) will only
57254+ be accessible by root. These filesystems generally provide access
57255+ to hardware and debug information that isn't appropriate for unprivileged
57256+ users of the system. Sysfs and debugfs have also become a large source
57257+ of new vulnerabilities, ranging from infoleaks to local compromise.
57258+ There has been very little oversight with an eye toward security involved
57259+ in adding new exporters of information to these filesystems, so their
57260+ use is discouraged.
57261+ This option is equivalent to a chmod 0700 of the mount paths.
57262+
57263+config GRKERNSEC_ROFS
57264+ bool "Runtime read-only mount protection"
57265+ help
57266+ If you say Y here, a sysctl option with name "romount_protect" will
57267+ be created. By setting this option to 1 at runtime, filesystems
57268+ will be protected in the following ways:
57269+ * No new writable mounts will be allowed
57270+ * Existing read-only mounts won't be able to be remounted read/write
57271+ * Write operations will be denied on all block devices
57272+ This option acts independently of grsec_lock: once it is set to 1,
57273+ it cannot be turned off. Therefore, please be mindful of the resulting
57274+ behavior if this option is enabled in an init script on a read-only
57275+ filesystem. This feature is mainly intended for secure embedded systems.
57276+
57277+config GRKERNSEC_CHROOT
57278+ bool "Chroot jail restrictions"
57279+ help
57280+ If you say Y here, you will be able to choose several options that will
57281+ make breaking out of a chrooted jail much more difficult. If you
57282+ encounter no software incompatibilities with the following options, it
57283+ is recommended that you enable each one.
57284+
57285+config GRKERNSEC_CHROOT_MOUNT
57286+ bool "Deny mounts"
57287+ depends on GRKERNSEC_CHROOT
57288+ help
57289+ If you say Y here, processes inside a chroot will not be able to
57290+ mount or remount filesystems. If the sysctl option is enabled, a
57291+ sysctl option with name "chroot_deny_mount" is created.
57292+
57293+config GRKERNSEC_CHROOT_DOUBLE
57294+ bool "Deny double-chroots"
57295+ depends on GRKERNSEC_CHROOT
57296+ help
57297+ If you say Y here, processes inside a chroot will not be able to chroot
57298+ again outside the chroot. This is a widely used method of breaking
57299+ out of a chroot jail and should not be allowed. If the sysctl
57300+ option is enabled, a sysctl option with name
57301+ "chroot_deny_chroot" is created.
57302+
57303+config GRKERNSEC_CHROOT_PIVOT
57304+ bool "Deny pivot_root in chroot"
57305+ depends on GRKERNSEC_CHROOT
57306+ help
57307+ If you say Y here, processes inside a chroot will not be able to use
57308+ a function called pivot_root() that was introduced in Linux 2.3.41. It
57309+ works similar to chroot in that it changes the root filesystem. This
57310+ function could be misused in a chrooted process to attempt to break out
57311+ of the chroot, and therefore should not be allowed. If the sysctl
57312+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
57313+ created.
57314+
57315+config GRKERNSEC_CHROOT_CHDIR
57316+ bool "Enforce chdir(\"/\") on all chroots"
57317+ depends on GRKERNSEC_CHROOT
57318+ help
57319+ If you say Y here, the current working directory of all newly-chrooted
57320+ applications will be set to the the root directory of the chroot.
57321+ The man page on chroot(2) states:
57322+ Note that this call does not change the current working
57323+ directory, so that `.' can be outside the tree rooted at
57324+ `/'. In particular, the super-user can escape from a
57325+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
57326+
57327+ It is recommended that you say Y here, since it's not known to break
57328+ any software. If the sysctl option is enabled, a sysctl option with
57329+ name "chroot_enforce_chdir" is created.
57330+
57331+config GRKERNSEC_CHROOT_CHMOD
57332+ bool "Deny (f)chmod +s"
57333+ depends on GRKERNSEC_CHROOT
57334+ help
57335+ If you say Y here, processes inside a chroot will not be able to chmod
57336+ or fchmod files to make them have suid or sgid bits. This protects
57337+ against another published method of breaking a chroot. If the sysctl
57338+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
57339+ created.
57340+
57341+config GRKERNSEC_CHROOT_FCHDIR
57342+ bool "Deny fchdir out of chroot"
57343+ depends on GRKERNSEC_CHROOT
57344+ help
57345+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
57346+ to a file descriptor of the chrooting process that points to a directory
57347+ outside the filesystem will be stopped. If the sysctl option
57348+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
57349+
57350+config GRKERNSEC_CHROOT_MKNOD
57351+ bool "Deny mknod"
57352+ depends on GRKERNSEC_CHROOT
57353+ help
57354+ If you say Y here, processes inside a chroot will not be allowed to
57355+ mknod. The problem with using mknod inside a chroot is that it
57356+ would allow an attacker to create a device entry that is the same
57357+ as one on the physical root of your system, which could range from
57358+ anything from the console device to a device for your harddrive (which
57359+ they could then use to wipe the drive or steal data). It is recommended
57360+ that you say Y here, unless you run into software incompatibilities.
57361+ If the sysctl option is enabled, a sysctl option with name
57362+ "chroot_deny_mknod" is created.
57363+
57364+config GRKERNSEC_CHROOT_SHMAT
57365+ bool "Deny shmat() out of chroot"
57366+ depends on GRKERNSEC_CHROOT
57367+ help
57368+ If you say Y here, processes inside a chroot will not be able to attach
57369+ to shared memory segments that were created outside of the chroot jail.
57370+ It is recommended that you say Y here. If the sysctl option is enabled,
57371+ a sysctl option with name "chroot_deny_shmat" is created.
57372+
57373+config GRKERNSEC_CHROOT_UNIX
57374+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
57375+ depends on GRKERNSEC_CHROOT
57376+ help
57377+ If you say Y here, processes inside a chroot will not be able to
57378+ connect to abstract (meaning not belonging to a filesystem) Unix
57379+ domain sockets that were bound outside of a chroot. It is recommended
57380+ that you say Y here. If the sysctl option is enabled, a sysctl option
57381+ with name "chroot_deny_unix" is created.
57382+
57383+config GRKERNSEC_CHROOT_FINDTASK
57384+ bool "Protect outside processes"
57385+ depends on GRKERNSEC_CHROOT
57386+ help
57387+ If you say Y here, processes inside a chroot will not be able to
57388+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
57389+ getsid, or view any process outside of the chroot. If the sysctl
57390+ option is enabled, a sysctl option with name "chroot_findtask" is
57391+ created.
57392+
57393+config GRKERNSEC_CHROOT_NICE
57394+ bool "Restrict priority changes"
57395+ depends on GRKERNSEC_CHROOT
57396+ help
57397+ If you say Y here, processes inside a chroot will not be able to raise
57398+ the priority of processes in the chroot, or alter the priority of
57399+ processes outside the chroot. This provides more security than simply
57400+ removing CAP_SYS_NICE from the process' capability set. If the
57401+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
57402+ is created.
57403+
57404+config GRKERNSEC_CHROOT_SYSCTL
57405+ bool "Deny sysctl writes"
57406+ depends on GRKERNSEC_CHROOT
57407+ help
57408+ If you say Y here, an attacker in a chroot will not be able to
57409+ write to sysctl entries, either by sysctl(2) or through a /proc
57410+ interface. It is strongly recommended that you say Y here. If the
57411+ sysctl option is enabled, a sysctl option with name
57412+ "chroot_deny_sysctl" is created.
57413+
57414+config GRKERNSEC_CHROOT_CAPS
57415+ bool "Capability restrictions"
57416+ depends on GRKERNSEC_CHROOT
57417+ help
57418+ If you say Y here, the capabilities on all processes within a
57419+ chroot jail will be lowered to stop module insertion, raw i/o,
57420+ system and net admin tasks, rebooting the system, modifying immutable
57421+ files, modifying IPC owned by another, and changing the system time.
57422+ This is left an option because it can break some apps. Disable this
57423+ if your chrooted apps are having problems performing those kinds of
57424+ tasks. If the sysctl option is enabled, a sysctl option with
57425+ name "chroot_caps" is created.
57426+
57427+endmenu
57428+menu "Kernel Auditing"
57429+depends on GRKERNSEC
57430+
57431+config GRKERNSEC_AUDIT_GROUP
57432+ bool "Single group for auditing"
57433+ help
57434+ If you say Y here, the exec, chdir, and (un)mount logging features
57435+ will only operate on a group you specify. This option is recommended
57436+ if you only want to watch certain users instead of having a large
57437+ amount of logs from the entire system. If the sysctl option is enabled,
57438+ a sysctl option with name "audit_group" is created.
57439+
57440+config GRKERNSEC_AUDIT_GID
57441+ int "GID for auditing"
57442+ depends on GRKERNSEC_AUDIT_GROUP
57443+ default 1007
57444+
57445+config GRKERNSEC_EXECLOG
57446+ bool "Exec logging"
57447+ help
57448+ If you say Y here, all execve() calls will be logged (since the
57449+ other exec*() calls are frontends to execve(), all execution
57450+ will be logged). Useful for shell-servers that like to keep track
57451+ of their users. If the sysctl option is enabled, a sysctl option with
57452+ name "exec_logging" is created.
57453+ WARNING: This option when enabled will produce a LOT of logs, especially
57454+ on an active system.
57455+
57456+config GRKERNSEC_RESLOG
57457+ bool "Resource logging"
57458+ help
57459+ If you say Y here, all attempts to overstep resource limits will
57460+ be logged with the resource name, the requested size, and the current
57461+ limit. It is highly recommended that you say Y here. If the sysctl
57462+ option is enabled, a sysctl option with name "resource_logging" is
57463+ created. If the RBAC system is enabled, the sysctl value is ignored.
57464+
57465+config GRKERNSEC_CHROOT_EXECLOG
57466+ bool "Log execs within chroot"
57467+ help
57468+ If you say Y here, all executions inside a chroot jail will be logged
57469+ to syslog. This can cause a large amount of logs if certain
57470+ applications (eg. djb's daemontools) are installed on the system, and
57471+ is therefore left as an option. If the sysctl option is enabled, a
57472+ sysctl option with name "chroot_execlog" is created.
57473+
57474+config GRKERNSEC_AUDIT_PTRACE
57475+ bool "Ptrace logging"
57476+ help
57477+ If you say Y here, all attempts to attach to a process via ptrace
57478+ will be logged. If the sysctl option is enabled, a sysctl option
57479+ with name "audit_ptrace" is created.
57480+
57481+config GRKERNSEC_AUDIT_CHDIR
57482+ bool "Chdir logging"
57483+ help
57484+ If you say Y here, all chdir() calls will be logged. If the sysctl
57485+ option is enabled, a sysctl option with name "audit_chdir" is created.
57486+
57487+config GRKERNSEC_AUDIT_MOUNT
57488+ bool "(Un)Mount logging"
57489+ help
57490+ If you say Y here, all mounts and unmounts will be logged. If the
57491+ sysctl option is enabled, a sysctl option with name "audit_mount" is
57492+ created.
57493+
57494+config GRKERNSEC_SIGNAL
57495+ bool "Signal logging"
57496+ help
57497+ If you say Y here, certain important signals will be logged, such as
57498+ SIGSEGV, which will as a result inform you of when a error in a program
57499+ occurred, which in some cases could mean a possible exploit attempt.
57500+ If the sysctl option is enabled, a sysctl option with name
57501+ "signal_logging" is created.
57502+
57503+config GRKERNSEC_FORKFAIL
57504+ bool "Fork failure logging"
57505+ help
57506+ If you say Y here, all failed fork() attempts will be logged.
57507+ This could suggest a fork bomb, or someone attempting to overstep
57508+ their process limit. If the sysctl option is enabled, a sysctl option
57509+ with name "forkfail_logging" is created.
57510+
57511+config GRKERNSEC_TIME
57512+ bool "Time change logging"
57513+ help
57514+ If you say Y here, any changes of the system clock will be logged.
57515+ If the sysctl option is enabled, a sysctl option with name
57516+ "timechange_logging" is created.
57517+
57518+config GRKERNSEC_PROC_IPADDR
57519+ bool "/proc/<pid>/ipaddr support"
57520+ help
57521+ If you say Y here, a new entry will be added to each /proc/<pid>
57522+ directory that contains the IP address of the person using the task.
57523+ The IP is carried across local TCP and AF_UNIX stream sockets.
57524+ This information can be useful for IDS/IPSes to perform remote response
57525+ to a local attack. The entry is readable by only the owner of the
57526+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
57527+ the RBAC system), and thus does not create privacy concerns.
57528+
57529+config GRKERNSEC_RWXMAP_LOG
57530+ bool 'Denied RWX mmap/mprotect logging'
57531+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
57532+ help
57533+ If you say Y here, calls to mmap() and mprotect() with explicit
57534+ usage of PROT_WRITE and PROT_EXEC together will be logged when
57535+ denied by the PAX_MPROTECT feature. If the sysctl option is
57536+ enabled, a sysctl option with name "rwxmap_logging" is created.
57537+
57538+config GRKERNSEC_AUDIT_TEXTREL
57539+ bool 'ELF text relocations logging (READ HELP)'
57540+ depends on PAX_MPROTECT
57541+ help
57542+ If you say Y here, text relocations will be logged with the filename
57543+ of the offending library or binary. The purpose of the feature is
57544+ to help Linux distribution developers get rid of libraries and
57545+ binaries that need text relocations which hinder the future progress
57546+ of PaX. Only Linux distribution developers should say Y here, and
57547+ never on a production machine, as this option creates an information
57548+ leak that could aid an attacker in defeating the randomization of
57549+ a single memory region. If the sysctl option is enabled, a sysctl
57550+ option with name "audit_textrel" is created.
57551+
57552+endmenu
57553+
57554+menu "Executable Protections"
57555+depends on GRKERNSEC
57556+
57557+config GRKERNSEC_DMESG
57558+ bool "Dmesg(8) restriction"
57559+ help
57560+ If you say Y here, non-root users will not be able to use dmesg(8)
57561+ to view up to the last 4kb of messages in the kernel's log buffer.
57562+ The kernel's log buffer often contains kernel addresses and other
57563+ identifying information useful to an attacker in fingerprinting a
57564+ system for a targeted exploit.
57565+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
57566+ created.
57567+
57568+config GRKERNSEC_HARDEN_PTRACE
57569+ bool "Deter ptrace-based process snooping"
57570+ help
57571+ If you say Y here, TTY sniffers and other malicious monitoring
57572+ programs implemented through ptrace will be defeated. If you
57573+ have been using the RBAC system, this option has already been
57574+ enabled for several years for all users, with the ability to make
57575+ fine-grained exceptions.
57576+
57577+ This option only affects the ability of non-root users to ptrace
57578+ processes that are not a descendent of the ptracing process.
57579+ This means that strace ./binary and gdb ./binary will still work,
57580+ but attaching to arbitrary processes will not. If the sysctl
57581+ option is enabled, a sysctl option with name "harden_ptrace" is
57582+ created.
57583+
57584+config GRKERNSEC_TPE
57585+ bool "Trusted Path Execution (TPE)"
57586+ help
57587+ If you say Y here, you will be able to choose a gid to add to the
57588+ supplementary groups of users you want to mark as "untrusted."
57589+ These users will not be able to execute any files that are not in
57590+ root-owned directories writable only by root. If the sysctl option
57591+ is enabled, a sysctl option with name "tpe" is created.
57592+
57593+config GRKERNSEC_TPE_ALL
57594+ bool "Partially restrict all non-root users"
57595+ depends on GRKERNSEC_TPE
57596+ help
57597+ If you say Y here, all non-root users will be covered under
57598+ a weaker TPE restriction. This is separate from, and in addition to,
57599+ the main TPE options that you have selected elsewhere. Thus, if a
57600+ "trusted" GID is chosen, this restriction applies to even that GID.
57601+ Under this restriction, all non-root users will only be allowed to
57602+ execute files in directories they own that are not group or
57603+ world-writable, or in directories owned by root and writable only by
57604+ root. If the sysctl option is enabled, a sysctl option with name
57605+ "tpe_restrict_all" is created.
57606+
57607+config GRKERNSEC_TPE_INVERT
57608+ bool "Invert GID option"
57609+ depends on GRKERNSEC_TPE
57610+ help
57611+ If you say Y here, the group you specify in the TPE configuration will
57612+ decide what group TPE restrictions will be *disabled* for. This
57613+ option is useful if you want TPE restrictions to be applied to most
57614+ users on the system. If the sysctl option is enabled, a sysctl option
57615+ with name "tpe_invert" is created. Unlike other sysctl options, this
57616+ entry will default to on for backward-compatibility.
57617+
57618+config GRKERNSEC_TPE_GID
57619+ int "GID for untrusted users"
57620+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
57621+ default 1005
57622+ help
57623+ Setting this GID determines what group TPE restrictions will be
57624+ *enabled* for. If the sysctl option is enabled, a sysctl option
57625+ with name "tpe_gid" is created.
57626+
57627+config GRKERNSEC_TPE_GID
57628+ int "GID for trusted users"
57629+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
57630+ default 1005
57631+ help
57632+ Setting this GID determines what group TPE restrictions will be
57633+ *disabled* for. If the sysctl option is enabled, a sysctl option
57634+ with name "tpe_gid" is created.
57635+
57636+endmenu
57637+menu "Network Protections"
57638+depends on GRKERNSEC
57639+
57640+config GRKERNSEC_RANDNET
57641+ bool "Larger entropy pools"
57642+ help
57643+ If you say Y here, the entropy pools used for many features of Linux
57644+ and grsecurity will be doubled in size. Since several grsecurity
57645+ features use additional randomness, it is recommended that you say Y
57646+ here. Saying Y here has a similar effect as modifying
57647+ /proc/sys/kernel/random/poolsize.
57648+
57649+config GRKERNSEC_BLACKHOLE
57650+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
57651+ depends on NET
57652+ help
57653+ If you say Y here, neither TCP resets nor ICMP
57654+ destination-unreachable packets will be sent in response to packets
57655+ sent to ports for which no associated listening process exists.
57656+ This feature supports both IPV4 and IPV6 and exempts the
57657+ loopback interface from blackholing. Enabling this feature
57658+ makes a host more resilient to DoS attacks and reduces network
57659+ visibility against scanners.
57660+
57661+ The blackhole feature as-implemented is equivalent to the FreeBSD
57662+ blackhole feature, as it prevents RST responses to all packets, not
57663+ just SYNs. Under most application behavior this causes no
57664+ problems, but applications (like haproxy) may not close certain
57665+ connections in a way that cleanly terminates them on the remote
57666+ end, leaving the remote host in LAST_ACK state. Because of this
57667+ side-effect and to prevent intentional LAST_ACK DoSes, this
57668+ feature also adds automatic mitigation against such attacks.
57669+ The mitigation drastically reduces the amount of time a socket
57670+ can spend in LAST_ACK state. If you're using haproxy and not
57671+ all servers it connects to have this option enabled, consider
57672+ disabling this feature on the haproxy host.
57673+
57674+ If the sysctl option is enabled, two sysctl options with names
57675+ "ip_blackhole" and "lastack_retries" will be created.
57676+ While "ip_blackhole" takes the standard zero/non-zero on/off
57677+ toggle, "lastack_retries" uses the same kinds of values as
57678+ "tcp_retries1" and "tcp_retries2". The default value of 4
57679+ prevents a socket from lasting more than 45 seconds in LAST_ACK
57680+ state.
57681+
57682+config GRKERNSEC_SOCKET
57683+ bool "Socket restrictions"
57684+ depends on NET
57685+ help
57686+ If you say Y here, you will be able to choose from several options.
57687+ If you assign a GID on your system and add it to the supplementary
57688+ groups of users you want to restrict socket access to, this patch
57689+ will perform up to three things, based on the option(s) you choose.
57690+
57691+config GRKERNSEC_SOCKET_ALL
57692+ bool "Deny any sockets to group"
57693+ depends on GRKERNSEC_SOCKET
57694+ help
57695+ If you say Y here, you will be able to choose a GID of whose users will
57696+ be unable to connect to other hosts from your machine or run server
57697+ applications from your machine. If the sysctl option is enabled, a
57698+ sysctl option with name "socket_all" is created.
57699+
57700+config GRKERNSEC_SOCKET_ALL_GID
57701+ int "GID to deny all sockets for"
57702+ depends on GRKERNSEC_SOCKET_ALL
57703+ default 1004
57704+ help
57705+ Here you can choose the GID to disable socket access for. Remember to
57706+ add the users you want socket access disabled for to the GID
57707+ specified here. If the sysctl option is enabled, a sysctl option
57708+ with name "socket_all_gid" is created.
57709+
57710+config GRKERNSEC_SOCKET_CLIENT
57711+ bool "Deny client sockets to group"
57712+ depends on GRKERNSEC_SOCKET
57713+ help
57714+ If you say Y here, you will be able to choose a GID of whose users will
57715+ be unable to connect to other hosts from your machine, but will be
57716+ able to run servers. If this option is enabled, all users in the group
57717+ you specify will have to use passive mode when initiating ftp transfers
57718+ from the shell on your machine. If the sysctl option is enabled, a
57719+ sysctl option with name "socket_client" is created.
57720+
57721+config GRKERNSEC_SOCKET_CLIENT_GID
57722+ int "GID to deny client sockets for"
57723+ depends on GRKERNSEC_SOCKET_CLIENT
57724+ default 1003
57725+ help
57726+ Here you can choose the GID to disable client socket access for.
57727+ Remember to add the users you want client socket access disabled for to
57728+ the GID specified here. If the sysctl option is enabled, a sysctl
57729+ option with name "socket_client_gid" is created.
57730+
57731+config GRKERNSEC_SOCKET_SERVER
57732+ bool "Deny server sockets to group"
57733+ depends on GRKERNSEC_SOCKET
57734+ help
57735+ If you say Y here, you will be able to choose a GID of whose users will
57736+ be unable to run server applications from your machine. If the sysctl
57737+ option is enabled, a sysctl option with name "socket_server" is created.
57738+
57739+config GRKERNSEC_SOCKET_SERVER_GID
57740+ int "GID to deny server sockets for"
57741+ depends on GRKERNSEC_SOCKET_SERVER
57742+ default 1002
57743+ help
57744+ Here you can choose the GID to disable server socket access for.
57745+ Remember to add the users you want server socket access disabled for to
57746+ the GID specified here. If the sysctl option is enabled, a sysctl
57747+ option with name "socket_server_gid" is created.
57748+
57749+endmenu
57750+menu "Sysctl support"
57751+depends on GRKERNSEC && SYSCTL
57752+
57753+config GRKERNSEC_SYSCTL
57754+ bool "Sysctl support"
57755+ help
57756+ If you say Y here, you will be able to change the options that
57757+ grsecurity runs with at bootup, without having to recompile your
57758+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
57759+ to enable (1) or disable (0) various features. All the sysctl entries
57760+ are mutable until the "grsec_lock" entry is set to a non-zero value.
57761+ All features enabled in the kernel configuration are disabled at boot
57762+ if you do not say Y to the "Turn on features by default" option.
57763+ All options should be set at startup, and the grsec_lock entry should
57764+ be set to a non-zero value after all the options are set.
57765+ *THIS IS EXTREMELY IMPORTANT*
57766+
57767+config GRKERNSEC_SYSCTL_DISTRO
57768+ bool "Extra sysctl support for distro makers (READ HELP)"
57769+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
57770+ help
57771+ If you say Y here, additional sysctl options will be created
57772+ for features that affect processes running as root. Therefore,
57773+ it is critical when using this option that the grsec_lock entry be
57774+ enabled after boot. Only distros with prebuilt kernel packages
57775+ with this option enabled that can ensure grsec_lock is enabled
57776+ after boot should use this option.
57777+ *Failure to set grsec_lock after boot makes all grsec features
57778+ this option covers useless*
57779+
57780+ Currently this option creates the following sysctl entries:
57781+ "Disable Privileged I/O": "disable_priv_io"
57782+
57783+config GRKERNSEC_SYSCTL_ON
57784+ bool "Turn on features by default"
57785+ depends on GRKERNSEC_SYSCTL
57786+ help
57787+ If you say Y here, instead of having all features enabled in the
57788+ kernel configuration disabled at boot time, the features will be
57789+ enabled at boot time. It is recommended you say Y here unless
57790+ there is some reason you would want all sysctl-tunable features to
57791+ be disabled by default. As mentioned elsewhere, it is important
57792+ to enable the grsec_lock entry once you have finished modifying
57793+ the sysctl entries.
57794+
57795+endmenu
57796+menu "Logging Options"
57797+depends on GRKERNSEC
57798+
57799+config GRKERNSEC_FLOODTIME
57800+ int "Seconds in between log messages (minimum)"
57801+ default 10
57802+ help
57803+ This option allows you to enforce the number of seconds between
57804+ grsecurity log messages. The default should be suitable for most
57805+ people, however, if you choose to change it, choose a value small enough
57806+ to allow informative logs to be produced, but large enough to
57807+ prevent flooding.
57808+
57809+config GRKERNSEC_FLOODBURST
57810+ int "Number of messages in a burst (maximum)"
57811+ default 6
57812+ help
57813+ This option allows you to choose the maximum number of messages allowed
57814+ within the flood time interval you chose in a separate option. The
57815+ default should be suitable for most people, however if you find that
57816+ many of your logs are being interpreted as flooding, you may want to
57817+ raise this value.
57818+
57819+endmenu
57820+
57821+endmenu
57822diff -urNp linux-3.1.4/grsecurity/Makefile linux-3.1.4/grsecurity/Makefile
57823--- linux-3.1.4/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
57824+++ linux-3.1.4/grsecurity/Makefile 2011-11-16 18:40:31.000000000 -0500
57825@@ -0,0 +1,36 @@
57826+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
57827+# during 2001-2009 it has been completely redesigned by Brad Spengler
57828+# into an RBAC system
57829+#
57830+# All code in this directory and various hooks inserted throughout the kernel
57831+# are copyright Brad Spengler - Open Source Security, Inc., and released
57832+# under the GPL v2 or higher
57833+
57834+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
57835+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
57836+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
57837+
57838+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
57839+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
57840+ gracl_learn.o grsec_log.o
57841+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
57842+
57843+ifdef CONFIG_NET
57844+obj-y += grsec_sock.o
57845+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
57846+endif
57847+
57848+ifndef CONFIG_GRKERNSEC
57849+obj-y += grsec_disabled.o
57850+endif
57851+
57852+ifdef CONFIG_GRKERNSEC_HIDESYM
57853+extra-y := grsec_hidesym.o
57854+$(obj)/grsec_hidesym.o:
57855+ @-chmod -f 500 /boot
57856+ @-chmod -f 500 /lib/modules
57857+ @-chmod -f 500 /lib64/modules
57858+ @-chmod -f 500 /lib32/modules
57859+ @-chmod -f 700 .
57860+ @echo ' grsec: protected kernel image paths'
57861+endif
57862diff -urNp linux-3.1.4/include/acpi/acpi_bus.h linux-3.1.4/include/acpi/acpi_bus.h
57863--- linux-3.1.4/include/acpi/acpi_bus.h 2011-11-11 15:19:27.000000000 -0500
57864+++ linux-3.1.4/include/acpi/acpi_bus.h 2011-11-16 18:39:08.000000000 -0500
57865@@ -107,7 +107,7 @@ struct acpi_device_ops {
57866 acpi_op_bind bind;
57867 acpi_op_unbind unbind;
57868 acpi_op_notify notify;
57869-};
57870+} __no_const;
57871
57872 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
57873
57874diff -urNp linux-3.1.4/include/asm-generic/atomic-long.h linux-3.1.4/include/asm-generic/atomic-long.h
57875--- linux-3.1.4/include/asm-generic/atomic-long.h 2011-11-11 15:19:27.000000000 -0500
57876+++ linux-3.1.4/include/asm-generic/atomic-long.h 2011-11-16 18:39:08.000000000 -0500
57877@@ -22,6 +22,12 @@
57878
57879 typedef atomic64_t atomic_long_t;
57880
57881+#ifdef CONFIG_PAX_REFCOUNT
57882+typedef atomic64_unchecked_t atomic_long_unchecked_t;
57883+#else
57884+typedef atomic64_t atomic_long_unchecked_t;
57885+#endif
57886+
57887 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
57888
57889 static inline long atomic_long_read(atomic_long_t *l)
57890@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
57891 return (long)atomic64_read(v);
57892 }
57893
57894+#ifdef CONFIG_PAX_REFCOUNT
57895+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57896+{
57897+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57898+
57899+ return (long)atomic64_read_unchecked(v);
57900+}
57901+#endif
57902+
57903 static inline void atomic_long_set(atomic_long_t *l, long i)
57904 {
57905 atomic64_t *v = (atomic64_t *)l;
57906@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
57907 atomic64_set(v, i);
57908 }
57909
57910+#ifdef CONFIG_PAX_REFCOUNT
57911+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57912+{
57913+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57914+
57915+ atomic64_set_unchecked(v, i);
57916+}
57917+#endif
57918+
57919 static inline void atomic_long_inc(atomic_long_t *l)
57920 {
57921 atomic64_t *v = (atomic64_t *)l;
57922@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
57923 atomic64_inc(v);
57924 }
57925
57926+#ifdef CONFIG_PAX_REFCOUNT
57927+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57928+{
57929+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57930+
57931+ atomic64_inc_unchecked(v);
57932+}
57933+#endif
57934+
57935 static inline void atomic_long_dec(atomic_long_t *l)
57936 {
57937 atomic64_t *v = (atomic64_t *)l;
57938@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
57939 atomic64_dec(v);
57940 }
57941
57942+#ifdef CONFIG_PAX_REFCOUNT
57943+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57944+{
57945+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57946+
57947+ atomic64_dec_unchecked(v);
57948+}
57949+#endif
57950+
57951 static inline void atomic_long_add(long i, atomic_long_t *l)
57952 {
57953 atomic64_t *v = (atomic64_t *)l;
57954@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
57955 atomic64_add(i, v);
57956 }
57957
57958+#ifdef CONFIG_PAX_REFCOUNT
57959+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57960+{
57961+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57962+
57963+ atomic64_add_unchecked(i, v);
57964+}
57965+#endif
57966+
57967 static inline void atomic_long_sub(long i, atomic_long_t *l)
57968 {
57969 atomic64_t *v = (atomic64_t *)l;
57970@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
57971 atomic64_sub(i, v);
57972 }
57973
57974+#ifdef CONFIG_PAX_REFCOUNT
57975+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57976+{
57977+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57978+
57979+ atomic64_sub_unchecked(i, v);
57980+}
57981+#endif
57982+
57983 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57984 {
57985 atomic64_t *v = (atomic64_t *)l;
57986@@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
57987 return (long)atomic64_inc_return(v);
57988 }
57989
57990+#ifdef CONFIG_PAX_REFCOUNT
57991+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57992+{
57993+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57994+
57995+ return (long)atomic64_inc_return_unchecked(v);
57996+}
57997+#endif
57998+
57999 static inline long atomic_long_dec_return(atomic_long_t *l)
58000 {
58001 atomic64_t *v = (atomic64_t *)l;
58002@@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
58003
58004 typedef atomic_t atomic_long_t;
58005
58006+#ifdef CONFIG_PAX_REFCOUNT
58007+typedef atomic_unchecked_t atomic_long_unchecked_t;
58008+#else
58009+typedef atomic_t atomic_long_unchecked_t;
58010+#endif
58011+
58012 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
58013 static inline long atomic_long_read(atomic_long_t *l)
58014 {
58015@@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
58016 return (long)atomic_read(v);
58017 }
58018
58019+#ifdef CONFIG_PAX_REFCOUNT
58020+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
58021+{
58022+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58023+
58024+ return (long)atomic_read_unchecked(v);
58025+}
58026+#endif
58027+
58028 static inline void atomic_long_set(atomic_long_t *l, long i)
58029 {
58030 atomic_t *v = (atomic_t *)l;
58031@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
58032 atomic_set(v, i);
58033 }
58034
58035+#ifdef CONFIG_PAX_REFCOUNT
58036+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
58037+{
58038+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58039+
58040+ atomic_set_unchecked(v, i);
58041+}
58042+#endif
58043+
58044 static inline void atomic_long_inc(atomic_long_t *l)
58045 {
58046 atomic_t *v = (atomic_t *)l;
58047@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
58048 atomic_inc(v);
58049 }
58050
58051+#ifdef CONFIG_PAX_REFCOUNT
58052+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
58053+{
58054+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58055+
58056+ atomic_inc_unchecked(v);
58057+}
58058+#endif
58059+
58060 static inline void atomic_long_dec(atomic_long_t *l)
58061 {
58062 atomic_t *v = (atomic_t *)l;
58063@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
58064 atomic_dec(v);
58065 }
58066
58067+#ifdef CONFIG_PAX_REFCOUNT
58068+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
58069+{
58070+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58071+
58072+ atomic_dec_unchecked(v);
58073+}
58074+#endif
58075+
58076 static inline void atomic_long_add(long i, atomic_long_t *l)
58077 {
58078 atomic_t *v = (atomic_t *)l;
58079@@ -176,6 +287,15 @@ static inline void atomic_long_add(long
58080 atomic_add(i, v);
58081 }
58082
58083+#ifdef CONFIG_PAX_REFCOUNT
58084+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
58085+{
58086+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58087+
58088+ atomic_add_unchecked(i, v);
58089+}
58090+#endif
58091+
58092 static inline void atomic_long_sub(long i, atomic_long_t *l)
58093 {
58094 atomic_t *v = (atomic_t *)l;
58095@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
58096 atomic_sub(i, v);
58097 }
58098
58099+#ifdef CONFIG_PAX_REFCOUNT
58100+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
58101+{
58102+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58103+
58104+ atomic_sub_unchecked(i, v);
58105+}
58106+#endif
58107+
58108 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
58109 {
58110 atomic_t *v = (atomic_t *)l;
58111@@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
58112 return (long)atomic_inc_return(v);
58113 }
58114
58115+#ifdef CONFIG_PAX_REFCOUNT
58116+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
58117+{
58118+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58119+
58120+ return (long)atomic_inc_return_unchecked(v);
58121+}
58122+#endif
58123+
58124 static inline long atomic_long_dec_return(atomic_long_t *l)
58125 {
58126 atomic_t *v = (atomic_t *)l;
58127@@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
58128
58129 #endif /* BITS_PER_LONG == 64 */
58130
58131+#ifdef CONFIG_PAX_REFCOUNT
58132+static inline void pax_refcount_needs_these_functions(void)
58133+{
58134+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
58135+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
58136+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
58137+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
58138+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
58139+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
58140+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
58141+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
58142+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
58143+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
58144+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
58145+
58146+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
58147+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
58148+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
58149+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
58150+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
58151+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
58152+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
58153+}
58154+#else
58155+#define atomic_read_unchecked(v) atomic_read(v)
58156+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
58157+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
58158+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
58159+#define atomic_inc_unchecked(v) atomic_inc(v)
58160+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
58161+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
58162+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
58163+#define atomic_dec_unchecked(v) atomic_dec(v)
58164+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
58165+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
58166+
58167+#define atomic_long_read_unchecked(v) atomic_long_read(v)
58168+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
58169+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
58170+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
58171+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
58172+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
58173+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
58174+#endif
58175+
58176 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
58177diff -urNp linux-3.1.4/include/asm-generic/cache.h linux-3.1.4/include/asm-generic/cache.h
58178--- linux-3.1.4/include/asm-generic/cache.h 2011-11-11 15:19:27.000000000 -0500
58179+++ linux-3.1.4/include/asm-generic/cache.h 2011-11-16 18:39:08.000000000 -0500
58180@@ -6,7 +6,7 @@
58181 * cache lines need to provide their own cache.h.
58182 */
58183
58184-#define L1_CACHE_SHIFT 5
58185-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
58186+#define L1_CACHE_SHIFT 5UL
58187+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
58188
58189 #endif /* __ASM_GENERIC_CACHE_H */
58190diff -urNp linux-3.1.4/include/asm-generic/int-l64.h linux-3.1.4/include/asm-generic/int-l64.h
58191--- linux-3.1.4/include/asm-generic/int-l64.h 2011-11-11 15:19:27.000000000 -0500
58192+++ linux-3.1.4/include/asm-generic/int-l64.h 2011-11-16 18:39:08.000000000 -0500
58193@@ -46,6 +46,8 @@ typedef unsigned int u32;
58194 typedef signed long s64;
58195 typedef unsigned long u64;
58196
58197+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
58198+
58199 #define S8_C(x) x
58200 #define U8_C(x) x ## U
58201 #define S16_C(x) x
58202diff -urNp linux-3.1.4/include/asm-generic/int-ll64.h linux-3.1.4/include/asm-generic/int-ll64.h
58203--- linux-3.1.4/include/asm-generic/int-ll64.h 2011-11-11 15:19:27.000000000 -0500
58204+++ linux-3.1.4/include/asm-generic/int-ll64.h 2011-11-16 18:39:08.000000000 -0500
58205@@ -51,6 +51,8 @@ typedef unsigned int u32;
58206 typedef signed long long s64;
58207 typedef unsigned long long u64;
58208
58209+typedef unsigned long long intoverflow_t;
58210+
58211 #define S8_C(x) x
58212 #define U8_C(x) x ## U
58213 #define S16_C(x) x
58214diff -urNp linux-3.1.4/include/asm-generic/kmap_types.h linux-3.1.4/include/asm-generic/kmap_types.h
58215--- linux-3.1.4/include/asm-generic/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
58216+++ linux-3.1.4/include/asm-generic/kmap_types.h 2011-11-16 18:39:08.000000000 -0500
58217@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
58218 KMAP_D(17) KM_NMI,
58219 KMAP_D(18) KM_NMI_PTE,
58220 KMAP_D(19) KM_KDB,
58221+KMAP_D(20) KM_CLEARPAGE,
58222 /*
58223 * Remember to update debug_kmap_atomic() when adding new kmap types!
58224 */
58225-KMAP_D(20) KM_TYPE_NR
58226+KMAP_D(21) KM_TYPE_NR
58227 };
58228
58229 #undef KMAP_D
58230diff -urNp linux-3.1.4/include/asm-generic/pgtable.h linux-3.1.4/include/asm-generic/pgtable.h
58231--- linux-3.1.4/include/asm-generic/pgtable.h 2011-11-11 15:19:27.000000000 -0500
58232+++ linux-3.1.4/include/asm-generic/pgtable.h 2011-11-16 18:39:08.000000000 -0500
58233@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
58234 #endif /* __HAVE_ARCH_PMD_WRITE */
58235 #endif
58236
58237+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
58238+static inline unsigned long pax_open_kernel(void) { return 0; }
58239+#endif
58240+
58241+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
58242+static inline unsigned long pax_close_kernel(void) { return 0; }
58243+#endif
58244+
58245 #endif /* !__ASSEMBLY__ */
58246
58247 #endif /* _ASM_GENERIC_PGTABLE_H */
58248diff -urNp linux-3.1.4/include/asm-generic/pgtable-nopmd.h linux-3.1.4/include/asm-generic/pgtable-nopmd.h
58249--- linux-3.1.4/include/asm-generic/pgtable-nopmd.h 2011-11-11 15:19:27.000000000 -0500
58250+++ linux-3.1.4/include/asm-generic/pgtable-nopmd.h 2011-11-16 18:39:08.000000000 -0500
58251@@ -1,14 +1,19 @@
58252 #ifndef _PGTABLE_NOPMD_H
58253 #define _PGTABLE_NOPMD_H
58254
58255-#ifndef __ASSEMBLY__
58256-
58257 #include <asm-generic/pgtable-nopud.h>
58258
58259-struct mm_struct;
58260-
58261 #define __PAGETABLE_PMD_FOLDED
58262
58263+#define PMD_SHIFT PUD_SHIFT
58264+#define PTRS_PER_PMD 1
58265+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
58266+#define PMD_MASK (~(PMD_SIZE-1))
58267+
58268+#ifndef __ASSEMBLY__
58269+
58270+struct mm_struct;
58271+
58272 /*
58273 * Having the pmd type consist of a pud gets the size right, and allows
58274 * us to conceptually access the pud entry that this pmd is folded into
58275@@ -16,11 +21,6 @@ struct mm_struct;
58276 */
58277 typedef struct { pud_t pud; } pmd_t;
58278
58279-#define PMD_SHIFT PUD_SHIFT
58280-#define PTRS_PER_PMD 1
58281-#define PMD_SIZE (1UL << PMD_SHIFT)
58282-#define PMD_MASK (~(PMD_SIZE-1))
58283-
58284 /*
58285 * The "pud_xxx()" functions here are trivial for a folded two-level
58286 * setup: the pmd is never bad, and a pmd always exists (as it's folded
58287diff -urNp linux-3.1.4/include/asm-generic/pgtable-nopud.h linux-3.1.4/include/asm-generic/pgtable-nopud.h
58288--- linux-3.1.4/include/asm-generic/pgtable-nopud.h 2011-11-11 15:19:27.000000000 -0500
58289+++ linux-3.1.4/include/asm-generic/pgtable-nopud.h 2011-11-16 18:39:08.000000000 -0500
58290@@ -1,10 +1,15 @@
58291 #ifndef _PGTABLE_NOPUD_H
58292 #define _PGTABLE_NOPUD_H
58293
58294-#ifndef __ASSEMBLY__
58295-
58296 #define __PAGETABLE_PUD_FOLDED
58297
58298+#define PUD_SHIFT PGDIR_SHIFT
58299+#define PTRS_PER_PUD 1
58300+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
58301+#define PUD_MASK (~(PUD_SIZE-1))
58302+
58303+#ifndef __ASSEMBLY__
58304+
58305 /*
58306 * Having the pud type consist of a pgd gets the size right, and allows
58307 * us to conceptually access the pgd entry that this pud is folded into
58308@@ -12,11 +17,6 @@
58309 */
58310 typedef struct { pgd_t pgd; } pud_t;
58311
58312-#define PUD_SHIFT PGDIR_SHIFT
58313-#define PTRS_PER_PUD 1
58314-#define PUD_SIZE (1UL << PUD_SHIFT)
58315-#define PUD_MASK (~(PUD_SIZE-1))
58316-
58317 /*
58318 * The "pgd_xxx()" functions here are trivial for a folded two-level
58319 * setup: the pud is never bad, and a pud always exists (as it's folded
58320diff -urNp linux-3.1.4/include/asm-generic/vmlinux.lds.h linux-3.1.4/include/asm-generic/vmlinux.lds.h
58321--- linux-3.1.4/include/asm-generic/vmlinux.lds.h 2011-11-11 15:19:27.000000000 -0500
58322+++ linux-3.1.4/include/asm-generic/vmlinux.lds.h 2011-11-16 18:39:08.000000000 -0500
58323@@ -217,6 +217,7 @@
58324 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
58325 VMLINUX_SYMBOL(__start_rodata) = .; \
58326 *(.rodata) *(.rodata.*) \
58327+ *(.data..read_only) \
58328 *(__vermagic) /* Kernel version magic */ \
58329 . = ALIGN(8); \
58330 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
58331@@ -723,17 +724,18 @@
58332 * section in the linker script will go there too. @phdr should have
58333 * a leading colon.
58334 *
58335- * Note that this macros defines __per_cpu_load as an absolute symbol.
58336+ * Note that this macros defines per_cpu_load as an absolute symbol.
58337 * If there is no need to put the percpu section at a predetermined
58338 * address, use PERCPU_SECTION.
58339 */
58340 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
58341- VMLINUX_SYMBOL(__per_cpu_load) = .; \
58342- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
58343+ per_cpu_load = .; \
58344+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
58345 - LOAD_OFFSET) { \
58346+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
58347 PERCPU_INPUT(cacheline) \
58348 } phdr \
58349- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
58350+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
58351
58352 /**
58353 * PERCPU_SECTION - define output section for percpu area, simple version
58354diff -urNp linux-3.1.4/include/drm/drm_crtc_helper.h linux-3.1.4/include/drm/drm_crtc_helper.h
58355--- linux-3.1.4/include/drm/drm_crtc_helper.h 2011-11-11 15:19:27.000000000 -0500
58356+++ linux-3.1.4/include/drm/drm_crtc_helper.h 2011-11-16 18:39:08.000000000 -0500
58357@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
58358
58359 /* disable crtc when not in use - more explicit than dpms off */
58360 void (*disable)(struct drm_crtc *crtc);
58361-};
58362+} __no_const;
58363
58364 struct drm_encoder_helper_funcs {
58365 void (*dpms)(struct drm_encoder *encoder, int mode);
58366@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
58367 struct drm_connector *connector);
58368 /* disable encoder when not in use - more explicit than dpms off */
58369 void (*disable)(struct drm_encoder *encoder);
58370-};
58371+} __no_const;
58372
58373 struct drm_connector_helper_funcs {
58374 int (*get_modes)(struct drm_connector *connector);
58375diff -urNp linux-3.1.4/include/drm/drmP.h linux-3.1.4/include/drm/drmP.h
58376--- linux-3.1.4/include/drm/drmP.h 2011-11-11 15:19:27.000000000 -0500
58377+++ linux-3.1.4/include/drm/drmP.h 2011-11-16 18:39:08.000000000 -0500
58378@@ -73,6 +73,7 @@
58379 #include <linux/workqueue.h>
58380 #include <linux/poll.h>
58381 #include <asm/pgalloc.h>
58382+#include <asm/local.h>
58383 #include "drm.h"
58384
58385 #include <linux/idr.h>
58386@@ -1035,7 +1036,7 @@ struct drm_device {
58387
58388 /** \name Usage Counters */
58389 /*@{ */
58390- int open_count; /**< Outstanding files open */
58391+ local_t open_count; /**< Outstanding files open */
58392 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
58393 atomic_t vma_count; /**< Outstanding vma areas open */
58394 int buf_use; /**< Buffers in use -- cannot alloc */
58395@@ -1046,7 +1047,7 @@ struct drm_device {
58396 /*@{ */
58397 unsigned long counters;
58398 enum drm_stat_type types[15];
58399- atomic_t counts[15];
58400+ atomic_unchecked_t counts[15];
58401 /*@} */
58402
58403 struct list_head filelist;
58404diff -urNp linux-3.1.4/include/drm/ttm/ttm_memory.h linux-3.1.4/include/drm/ttm/ttm_memory.h
58405--- linux-3.1.4/include/drm/ttm/ttm_memory.h 2011-11-11 15:19:27.000000000 -0500
58406+++ linux-3.1.4/include/drm/ttm/ttm_memory.h 2011-11-16 18:39:08.000000000 -0500
58407@@ -47,7 +47,7 @@
58408
58409 struct ttm_mem_shrink {
58410 int (*do_shrink) (struct ttm_mem_shrink *);
58411-};
58412+} __no_const;
58413
58414 /**
58415 * struct ttm_mem_global - Global memory accounting structure.
58416diff -urNp linux-3.1.4/include/linux/a.out.h linux-3.1.4/include/linux/a.out.h
58417--- linux-3.1.4/include/linux/a.out.h 2011-11-11 15:19:27.000000000 -0500
58418+++ linux-3.1.4/include/linux/a.out.h 2011-11-16 18:39:08.000000000 -0500
58419@@ -39,6 +39,14 @@ enum machine_type {
58420 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
58421 };
58422
58423+/* Constants for the N_FLAGS field */
58424+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
58425+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
58426+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
58427+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
58428+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
58429+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
58430+
58431 #if !defined (N_MAGIC)
58432 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
58433 #endif
58434diff -urNp linux-3.1.4/include/linux/atmdev.h linux-3.1.4/include/linux/atmdev.h
58435--- linux-3.1.4/include/linux/atmdev.h 2011-11-11 15:19:27.000000000 -0500
58436+++ linux-3.1.4/include/linux/atmdev.h 2011-11-16 18:39:08.000000000 -0500
58437@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
58438 #endif
58439
58440 struct k_atm_aal_stats {
58441-#define __HANDLE_ITEM(i) atomic_t i
58442+#define __HANDLE_ITEM(i) atomic_unchecked_t i
58443 __AAL_STAT_ITEMS
58444 #undef __HANDLE_ITEM
58445 };
58446diff -urNp linux-3.1.4/include/linux/binfmts.h linux-3.1.4/include/linux/binfmts.h
58447--- linux-3.1.4/include/linux/binfmts.h 2011-11-11 15:19:27.000000000 -0500
58448+++ linux-3.1.4/include/linux/binfmts.h 2011-11-16 18:39:08.000000000 -0500
58449@@ -88,6 +88,7 @@ struct linux_binfmt {
58450 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
58451 int (*load_shlib)(struct file *);
58452 int (*core_dump)(struct coredump_params *cprm);
58453+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
58454 unsigned long min_coredump; /* minimal dump size */
58455 };
58456
58457diff -urNp linux-3.1.4/include/linux/blkdev.h linux-3.1.4/include/linux/blkdev.h
58458--- linux-3.1.4/include/linux/blkdev.h 2011-11-11 15:19:27.000000000 -0500
58459+++ linux-3.1.4/include/linux/blkdev.h 2011-11-16 18:39:08.000000000 -0500
58460@@ -1321,7 +1321,7 @@ struct block_device_operations {
58461 /* this callback is with swap_lock and sometimes page table lock held */
58462 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
58463 struct module *owner;
58464-};
58465+} __do_const;
58466
58467 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
58468 unsigned long);
58469diff -urNp linux-3.1.4/include/linux/blktrace_api.h linux-3.1.4/include/linux/blktrace_api.h
58470--- linux-3.1.4/include/linux/blktrace_api.h 2011-11-11 15:19:27.000000000 -0500
58471+++ linux-3.1.4/include/linux/blktrace_api.h 2011-11-16 18:39:08.000000000 -0500
58472@@ -162,7 +162,7 @@ struct blk_trace {
58473 struct dentry *dir;
58474 struct dentry *dropped_file;
58475 struct dentry *msg_file;
58476- atomic_t dropped;
58477+ atomic_unchecked_t dropped;
58478 };
58479
58480 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
58481diff -urNp linux-3.1.4/include/linux/byteorder/little_endian.h linux-3.1.4/include/linux/byteorder/little_endian.h
58482--- linux-3.1.4/include/linux/byteorder/little_endian.h 2011-11-11 15:19:27.000000000 -0500
58483+++ linux-3.1.4/include/linux/byteorder/little_endian.h 2011-11-16 18:39:08.000000000 -0500
58484@@ -42,51 +42,51 @@
58485
58486 static inline __le64 __cpu_to_le64p(const __u64 *p)
58487 {
58488- return (__force __le64)*p;
58489+ return (__force const __le64)*p;
58490 }
58491 static inline __u64 __le64_to_cpup(const __le64 *p)
58492 {
58493- return (__force __u64)*p;
58494+ return (__force const __u64)*p;
58495 }
58496 static inline __le32 __cpu_to_le32p(const __u32 *p)
58497 {
58498- return (__force __le32)*p;
58499+ return (__force const __le32)*p;
58500 }
58501 static inline __u32 __le32_to_cpup(const __le32 *p)
58502 {
58503- return (__force __u32)*p;
58504+ return (__force const __u32)*p;
58505 }
58506 static inline __le16 __cpu_to_le16p(const __u16 *p)
58507 {
58508- return (__force __le16)*p;
58509+ return (__force const __le16)*p;
58510 }
58511 static inline __u16 __le16_to_cpup(const __le16 *p)
58512 {
58513- return (__force __u16)*p;
58514+ return (__force const __u16)*p;
58515 }
58516 static inline __be64 __cpu_to_be64p(const __u64 *p)
58517 {
58518- return (__force __be64)__swab64p(p);
58519+ return (__force const __be64)__swab64p(p);
58520 }
58521 static inline __u64 __be64_to_cpup(const __be64 *p)
58522 {
58523- return __swab64p((__u64 *)p);
58524+ return __swab64p((const __u64 *)p);
58525 }
58526 static inline __be32 __cpu_to_be32p(const __u32 *p)
58527 {
58528- return (__force __be32)__swab32p(p);
58529+ return (__force const __be32)__swab32p(p);
58530 }
58531 static inline __u32 __be32_to_cpup(const __be32 *p)
58532 {
58533- return __swab32p((__u32 *)p);
58534+ return __swab32p((const __u32 *)p);
58535 }
58536 static inline __be16 __cpu_to_be16p(const __u16 *p)
58537 {
58538- return (__force __be16)__swab16p(p);
58539+ return (__force const __be16)__swab16p(p);
58540 }
58541 static inline __u16 __be16_to_cpup(const __be16 *p)
58542 {
58543- return __swab16p((__u16 *)p);
58544+ return __swab16p((const __u16 *)p);
58545 }
58546 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
58547 #define __le64_to_cpus(x) do { (void)(x); } while (0)
58548diff -urNp linux-3.1.4/include/linux/cache.h linux-3.1.4/include/linux/cache.h
58549--- linux-3.1.4/include/linux/cache.h 2011-11-11 15:19:27.000000000 -0500
58550+++ linux-3.1.4/include/linux/cache.h 2011-11-16 18:39:08.000000000 -0500
58551@@ -16,6 +16,10 @@
58552 #define __read_mostly
58553 #endif
58554
58555+#ifndef __read_only
58556+#define __read_only __read_mostly
58557+#endif
58558+
58559 #ifndef ____cacheline_aligned
58560 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
58561 #endif
58562diff -urNp linux-3.1.4/include/linux/capability.h linux-3.1.4/include/linux/capability.h
58563--- linux-3.1.4/include/linux/capability.h 2011-11-11 15:19:27.000000000 -0500
58564+++ linux-3.1.4/include/linux/capability.h 2011-11-16 18:40:31.000000000 -0500
58565@@ -547,6 +547,9 @@ extern bool capable(int cap);
58566 extern bool ns_capable(struct user_namespace *ns, int cap);
58567 extern bool task_ns_capable(struct task_struct *t, int cap);
58568 extern bool nsown_capable(int cap);
58569+extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
58570+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
58571+extern bool capable_nolog(int cap);
58572
58573 /* audit system wants to get cap info from files as well */
58574 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
58575diff -urNp linux-3.1.4/include/linux/cleancache.h linux-3.1.4/include/linux/cleancache.h
58576--- linux-3.1.4/include/linux/cleancache.h 2011-11-11 15:19:27.000000000 -0500
58577+++ linux-3.1.4/include/linux/cleancache.h 2011-11-16 18:39:08.000000000 -0500
58578@@ -31,7 +31,7 @@ struct cleancache_ops {
58579 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
58580 void (*flush_inode)(int, struct cleancache_filekey);
58581 void (*flush_fs)(int);
58582-};
58583+} __no_const;
58584
58585 extern struct cleancache_ops
58586 cleancache_register_ops(struct cleancache_ops *ops);
58587diff -urNp linux-3.1.4/include/linux/compiler-gcc4.h linux-3.1.4/include/linux/compiler-gcc4.h
58588--- linux-3.1.4/include/linux/compiler-gcc4.h 2011-11-11 15:19:27.000000000 -0500
58589+++ linux-3.1.4/include/linux/compiler-gcc4.h 2011-11-16 18:39:08.000000000 -0500
58590@@ -31,6 +31,12 @@
58591
58592
58593 #if __GNUC_MINOR__ >= 5
58594+
58595+#ifdef CONSTIFY_PLUGIN
58596+#define __no_const __attribute__((no_const))
58597+#define __do_const __attribute__((do_const))
58598+#endif
58599+
58600 /*
58601 * Mark a position in code as unreachable. This can be used to
58602 * suppress control flow warnings after asm blocks that transfer
58603@@ -46,6 +52,11 @@
58604 #define __noclone __attribute__((__noclone__))
58605
58606 #endif
58607+
58608+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
58609+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
58610+#define __bos0(ptr) __bos((ptr), 0)
58611+#define __bos1(ptr) __bos((ptr), 1)
58612 #endif
58613
58614 #if __GNUC_MINOR__ > 0
58615diff -urNp linux-3.1.4/include/linux/compiler.h linux-3.1.4/include/linux/compiler.h
58616--- linux-3.1.4/include/linux/compiler.h 2011-11-11 15:19:27.000000000 -0500
58617+++ linux-3.1.4/include/linux/compiler.h 2011-11-16 18:39:08.000000000 -0500
58618@@ -5,31 +5,62 @@
58619
58620 #ifdef __CHECKER__
58621 # define __user __attribute__((noderef, address_space(1)))
58622+# define __force_user __force __user
58623 # define __kernel __attribute__((address_space(0)))
58624+# define __force_kernel __force __kernel
58625 # define __safe __attribute__((safe))
58626 # define __force __attribute__((force))
58627 # define __nocast __attribute__((nocast))
58628 # define __iomem __attribute__((noderef, address_space(2)))
58629+# define __force_iomem __force __iomem
58630 # define __acquires(x) __attribute__((context(x,0,1)))
58631 # define __releases(x) __attribute__((context(x,1,0)))
58632 # define __acquire(x) __context__(x,1)
58633 # define __release(x) __context__(x,-1)
58634 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
58635 # define __percpu __attribute__((noderef, address_space(3)))
58636+# define __force_percpu __force __percpu
58637 #ifdef CONFIG_SPARSE_RCU_POINTER
58638 # define __rcu __attribute__((noderef, address_space(4)))
58639+# define __force_rcu __force __rcu
58640 #else
58641 # define __rcu
58642+# define __force_rcu
58643 #endif
58644 extern void __chk_user_ptr(const volatile void __user *);
58645 extern void __chk_io_ptr(const volatile void __iomem *);
58646+#elif defined(CHECKER_PLUGIN)
58647+//# define __user
58648+//# define __force_user
58649+//# define __kernel
58650+//# define __force_kernel
58651+# define __safe
58652+# define __force
58653+# define __nocast
58654+# define __iomem
58655+# define __force_iomem
58656+# define __chk_user_ptr(x) (void)0
58657+# define __chk_io_ptr(x) (void)0
58658+# define __builtin_warning(x, y...) (1)
58659+# define __acquires(x)
58660+# define __releases(x)
58661+# define __acquire(x) (void)0
58662+# define __release(x) (void)0
58663+# define __cond_lock(x,c) (c)
58664+# define __percpu
58665+# define __force_percpu
58666+# define __rcu
58667+# define __force_rcu
58668 #else
58669 # define __user
58670+# define __force_user
58671 # define __kernel
58672+# define __force_kernel
58673 # define __safe
58674 # define __force
58675 # define __nocast
58676 # define __iomem
58677+# define __force_iomem
58678 # define __chk_user_ptr(x) (void)0
58679 # define __chk_io_ptr(x) (void)0
58680 # define __builtin_warning(x, y...) (1)
58681@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile
58682 # define __release(x) (void)0
58683 # define __cond_lock(x,c) (c)
58684 # define __percpu
58685+# define __force_percpu
58686 # define __rcu
58687+# define __force_rcu
58688 #endif
58689
58690 #ifdef __KERNEL__
58691@@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_
58692 # define __attribute_const__ /* unimplemented */
58693 #endif
58694
58695+#ifndef __no_const
58696+# define __no_const
58697+#endif
58698+
58699+#ifndef __do_const
58700+# define __do_const
58701+#endif
58702+
58703 /*
58704 * Tell gcc if a function is cold. The compiler will assume any path
58705 * directly leading to the call is unlikely.
58706@@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_
58707 #define __cold
58708 #endif
58709
58710+#ifndef __alloc_size
58711+#define __alloc_size(...)
58712+#endif
58713+
58714+#ifndef __bos
58715+#define __bos(ptr, arg)
58716+#endif
58717+
58718+#ifndef __bos0
58719+#define __bos0(ptr)
58720+#endif
58721+
58722+#ifndef __bos1
58723+#define __bos1(ptr)
58724+#endif
58725+
58726 /* Simple shorthand for a section definition */
58727 #ifndef __section
58728 # define __section(S) __attribute__ ((__section__(#S)))
58729@@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_
58730 * use is to mediate communication between process-level code and irq/NMI
58731 * handlers, all running on the same CPU.
58732 */
58733-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
58734+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
58735+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
58736
58737 #endif /* __LINUX_COMPILER_H */
58738diff -urNp linux-3.1.4/include/linux/cpuset.h linux-3.1.4/include/linux/cpuset.h
58739--- linux-3.1.4/include/linux/cpuset.h 2011-11-11 15:19:27.000000000 -0500
58740+++ linux-3.1.4/include/linux/cpuset.h 2011-11-16 18:39:08.000000000 -0500
58741@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
58742 * nodemask.
58743 */
58744 smp_mb();
58745- --ACCESS_ONCE(current->mems_allowed_change_disable);
58746+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
58747 }
58748
58749 static inline void set_mems_allowed(nodemask_t nodemask)
58750diff -urNp linux-3.1.4/include/linux/crypto.h linux-3.1.4/include/linux/crypto.h
58751--- linux-3.1.4/include/linux/crypto.h 2011-11-11 15:19:27.000000000 -0500
58752+++ linux-3.1.4/include/linux/crypto.h 2011-11-16 18:39:08.000000000 -0500
58753@@ -361,7 +361,7 @@ struct cipher_tfm {
58754 const u8 *key, unsigned int keylen);
58755 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
58756 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
58757-};
58758+} __no_const;
58759
58760 struct hash_tfm {
58761 int (*init)(struct hash_desc *desc);
58762@@ -382,13 +382,13 @@ struct compress_tfm {
58763 int (*cot_decompress)(struct crypto_tfm *tfm,
58764 const u8 *src, unsigned int slen,
58765 u8 *dst, unsigned int *dlen);
58766-};
58767+} __no_const;
58768
58769 struct rng_tfm {
58770 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
58771 unsigned int dlen);
58772 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
58773-};
58774+} __no_const;
58775
58776 #define crt_ablkcipher crt_u.ablkcipher
58777 #define crt_aead crt_u.aead
58778diff -urNp linux-3.1.4/include/linux/decompress/mm.h linux-3.1.4/include/linux/decompress/mm.h
58779--- linux-3.1.4/include/linux/decompress/mm.h 2011-11-11 15:19:27.000000000 -0500
58780+++ linux-3.1.4/include/linux/decompress/mm.h 2011-11-16 18:39:08.000000000 -0500
58781@@ -77,7 +77,7 @@ static void free(void *where)
58782 * warnings when not needed (indeed large_malloc / large_free are not
58783 * needed by inflate */
58784
58785-#define malloc(a) kmalloc(a, GFP_KERNEL)
58786+#define malloc(a) kmalloc((a), GFP_KERNEL)
58787 #define free(a) kfree(a)
58788
58789 #define large_malloc(a) vmalloc(a)
58790diff -urNp linux-3.1.4/include/linux/dma-mapping.h linux-3.1.4/include/linux/dma-mapping.h
58791--- linux-3.1.4/include/linux/dma-mapping.h 2011-11-11 15:19:27.000000000 -0500
58792+++ linux-3.1.4/include/linux/dma-mapping.h 2011-11-16 18:39:08.000000000 -0500
58793@@ -42,7 +42,7 @@ struct dma_map_ops {
58794 int (*dma_supported)(struct device *dev, u64 mask);
58795 int (*set_dma_mask)(struct device *dev, u64 mask);
58796 int is_phys;
58797-};
58798+} __do_const;
58799
58800 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
58801
58802diff -urNp linux-3.1.4/include/linux/efi.h linux-3.1.4/include/linux/efi.h
58803--- linux-3.1.4/include/linux/efi.h 2011-11-11 15:19:27.000000000 -0500
58804+++ linux-3.1.4/include/linux/efi.h 2011-11-16 18:39:08.000000000 -0500
58805@@ -446,7 +446,7 @@ struct efivar_operations {
58806 efi_get_variable_t *get_variable;
58807 efi_get_next_variable_t *get_next_variable;
58808 efi_set_variable_t *set_variable;
58809-};
58810+} __no_const;
58811
58812 struct efivars {
58813 /*
58814diff -urNp linux-3.1.4/include/linux/elf.h linux-3.1.4/include/linux/elf.h
58815--- linux-3.1.4/include/linux/elf.h 2011-11-11 15:19:27.000000000 -0500
58816+++ linux-3.1.4/include/linux/elf.h 2011-11-16 18:39:08.000000000 -0500
58817@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
58818 #define PT_GNU_EH_FRAME 0x6474e550
58819
58820 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
58821+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
58822+
58823+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
58824+
58825+/* Constants for the e_flags field */
58826+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
58827+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
58828+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
58829+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
58830+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
58831+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
58832
58833 /*
58834 * Extended Numbering
58835@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
58836 #define DT_DEBUG 21
58837 #define DT_TEXTREL 22
58838 #define DT_JMPREL 23
58839+#define DT_FLAGS 30
58840+ #define DF_TEXTREL 0x00000004
58841 #define DT_ENCODING 32
58842 #define OLD_DT_LOOS 0x60000000
58843 #define DT_LOOS 0x6000000d
58844@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
58845 #define PF_W 0x2
58846 #define PF_X 0x1
58847
58848+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
58849+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
58850+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
58851+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
58852+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
58853+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
58854+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
58855+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
58856+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
58857+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
58858+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
58859+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
58860+
58861 typedef struct elf32_phdr{
58862 Elf32_Word p_type;
58863 Elf32_Off p_offset;
58864@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
58865 #define EI_OSABI 7
58866 #define EI_PAD 8
58867
58868+#define EI_PAX 14
58869+
58870 #define ELFMAG0 0x7f /* EI_MAG */
58871 #define ELFMAG1 'E'
58872 #define ELFMAG2 'L'
58873@@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
58874 #define elf_note elf32_note
58875 #define elf_addr_t Elf32_Off
58876 #define Elf_Half Elf32_Half
58877+#define elf_dyn Elf32_Dyn
58878
58879 #else
58880
58881@@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
58882 #define elf_note elf64_note
58883 #define elf_addr_t Elf64_Off
58884 #define Elf_Half Elf64_Half
58885+#define elf_dyn Elf64_Dyn
58886
58887 #endif
58888
58889diff -urNp linux-3.1.4/include/linux/filter.h linux-3.1.4/include/linux/filter.h
58890--- linux-3.1.4/include/linux/filter.h 2011-11-11 15:19:27.000000000 -0500
58891+++ linux-3.1.4/include/linux/filter.h 2011-11-20 19:21:53.000000000 -0500
58892@@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_A
58893
58894 struct sk_buff;
58895 struct sock;
58896+struct bpf_jit_work;
58897
58898 struct sk_filter
58899 {
58900@@ -141,6 +142,9 @@ struct sk_filter
58901 unsigned int len; /* Number of filter blocks */
58902 unsigned int (*bpf_func)(const struct sk_buff *skb,
58903 const struct sock_filter *filter);
58904+#ifdef CONFIG_BPF_JIT
58905+ struct bpf_jit_work *work;
58906+#endif
58907 struct rcu_head rcu;
58908 struct sock_filter insns[0];
58909 };
58910diff -urNp linux-3.1.4/include/linux/firewire.h linux-3.1.4/include/linux/firewire.h
58911--- linux-3.1.4/include/linux/firewire.h 2011-11-11 15:19:27.000000000 -0500
58912+++ linux-3.1.4/include/linux/firewire.h 2011-11-16 18:39:08.000000000 -0500
58913@@ -428,7 +428,7 @@ struct fw_iso_context {
58914 union {
58915 fw_iso_callback_t sc;
58916 fw_iso_mc_callback_t mc;
58917- } callback;
58918+ } __no_const callback;
58919 void *callback_data;
58920 };
58921
58922diff -urNp linux-3.1.4/include/linux/fscache-cache.h linux-3.1.4/include/linux/fscache-cache.h
58923--- linux-3.1.4/include/linux/fscache-cache.h 2011-11-11 15:19:27.000000000 -0500
58924+++ linux-3.1.4/include/linux/fscache-cache.h 2011-11-16 18:39:08.000000000 -0500
58925@@ -102,7 +102,7 @@ struct fscache_operation {
58926 fscache_operation_release_t release;
58927 };
58928
58929-extern atomic_t fscache_op_debug_id;
58930+extern atomic_unchecked_t fscache_op_debug_id;
58931 extern void fscache_op_work_func(struct work_struct *work);
58932
58933 extern void fscache_enqueue_operation(struct fscache_operation *);
58934@@ -122,7 +122,7 @@ static inline void fscache_operation_ini
58935 {
58936 INIT_WORK(&op->work, fscache_op_work_func);
58937 atomic_set(&op->usage, 1);
58938- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
58939+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
58940 op->processor = processor;
58941 op->release = release;
58942 INIT_LIST_HEAD(&op->pend_link);
58943diff -urNp linux-3.1.4/include/linux/fs.h linux-3.1.4/include/linux/fs.h
58944--- linux-3.1.4/include/linux/fs.h 2011-11-11 15:19:27.000000000 -0500
58945+++ linux-3.1.4/include/linux/fs.h 2011-11-16 23:39:39.000000000 -0500
58946@@ -1588,7 +1588,8 @@ struct file_operations {
58947 int (*setlease)(struct file *, long, struct file_lock **);
58948 long (*fallocate)(struct file *file, int mode, loff_t offset,
58949 loff_t len);
58950-};
58951+} __do_const;
58952+typedef struct file_operations __no_const file_operations_no_const;
58953
58954 struct inode_operations {
58955 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
58956diff -urNp linux-3.1.4/include/linux/fsnotify.h linux-3.1.4/include/linux/fsnotify.h
58957--- linux-3.1.4/include/linux/fsnotify.h 2011-11-11 15:19:27.000000000 -0500
58958+++ linux-3.1.4/include/linux/fsnotify.h 2011-11-16 18:39:08.000000000 -0500
58959@@ -314,7 +314,7 @@ static inline void fsnotify_change(struc
58960 */
58961 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
58962 {
58963- return kstrdup(name, GFP_KERNEL);
58964+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
58965 }
58966
58967 /*
58968diff -urNp linux-3.1.4/include/linux/fs_struct.h linux-3.1.4/include/linux/fs_struct.h
58969--- linux-3.1.4/include/linux/fs_struct.h 2011-11-11 15:19:27.000000000 -0500
58970+++ linux-3.1.4/include/linux/fs_struct.h 2011-11-16 18:39:08.000000000 -0500
58971@@ -6,7 +6,7 @@
58972 #include <linux/seqlock.h>
58973
58974 struct fs_struct {
58975- int users;
58976+ atomic_t users;
58977 spinlock_t lock;
58978 seqcount_t seq;
58979 int umask;
58980diff -urNp linux-3.1.4/include/linux/ftrace_event.h linux-3.1.4/include/linux/ftrace_event.h
58981--- linux-3.1.4/include/linux/ftrace_event.h 2011-11-11 15:19:27.000000000 -0500
58982+++ linux-3.1.4/include/linux/ftrace_event.h 2011-11-16 18:39:08.000000000 -0500
58983@@ -97,7 +97,7 @@ struct trace_event_functions {
58984 trace_print_func raw;
58985 trace_print_func hex;
58986 trace_print_func binary;
58987-};
58988+} __no_const;
58989
58990 struct trace_event {
58991 struct hlist_node node;
58992@@ -252,7 +252,7 @@ extern int trace_define_field(struct ftr
58993 extern int trace_add_event_call(struct ftrace_event_call *call);
58994 extern void trace_remove_event_call(struct ftrace_event_call *call);
58995
58996-#define is_signed_type(type) (((type)(-1)) < 0)
58997+#define is_signed_type(type) (((type)(-1)) < (type)1)
58998
58999 int trace_set_clr_event(const char *system, const char *event, int set);
59000
59001diff -urNp linux-3.1.4/include/linux/genhd.h linux-3.1.4/include/linux/genhd.h
59002--- linux-3.1.4/include/linux/genhd.h 2011-11-11 15:19:27.000000000 -0500
59003+++ linux-3.1.4/include/linux/genhd.h 2011-11-16 18:39:08.000000000 -0500
59004@@ -184,7 +184,7 @@ struct gendisk {
59005 struct kobject *slave_dir;
59006
59007 struct timer_rand_state *random;
59008- atomic_t sync_io; /* RAID */
59009+ atomic_unchecked_t sync_io; /* RAID */
59010 struct disk_events *ev;
59011 #ifdef CONFIG_BLK_DEV_INTEGRITY
59012 struct blk_integrity *integrity;
59013diff -urNp linux-3.1.4/include/linux/gracl.h linux-3.1.4/include/linux/gracl.h
59014--- linux-3.1.4/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
59015+++ linux-3.1.4/include/linux/gracl.h 2011-11-16 18:40:31.000000000 -0500
59016@@ -0,0 +1,317 @@
59017+#ifndef GR_ACL_H
59018+#define GR_ACL_H
59019+
59020+#include <linux/grdefs.h>
59021+#include <linux/resource.h>
59022+#include <linux/capability.h>
59023+#include <linux/dcache.h>
59024+#include <asm/resource.h>
59025+
59026+/* Major status information */
59027+
59028+#define GR_VERSION "grsecurity 2.2.2"
59029+#define GRSECURITY_VERSION 0x2202
59030+
59031+enum {
59032+ GR_SHUTDOWN = 0,
59033+ GR_ENABLE = 1,
59034+ GR_SPROLE = 2,
59035+ GR_RELOAD = 3,
59036+ GR_SEGVMOD = 4,
59037+ GR_STATUS = 5,
59038+ GR_UNSPROLE = 6,
59039+ GR_PASSSET = 7,
59040+ GR_SPROLEPAM = 8,
59041+};
59042+
59043+/* Password setup definitions
59044+ * kernel/grhash.c */
59045+enum {
59046+ GR_PW_LEN = 128,
59047+ GR_SALT_LEN = 16,
59048+ GR_SHA_LEN = 32,
59049+};
59050+
59051+enum {
59052+ GR_SPROLE_LEN = 64,
59053+};
59054+
59055+enum {
59056+ GR_NO_GLOB = 0,
59057+ GR_REG_GLOB,
59058+ GR_CREATE_GLOB
59059+};
59060+
59061+#define GR_NLIMITS 32
59062+
59063+/* Begin Data Structures */
59064+
59065+struct sprole_pw {
59066+ unsigned char *rolename;
59067+ unsigned char salt[GR_SALT_LEN];
59068+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
59069+};
59070+
59071+struct name_entry {
59072+ __u32 key;
59073+ ino_t inode;
59074+ dev_t device;
59075+ char *name;
59076+ __u16 len;
59077+ __u8 deleted;
59078+ struct name_entry *prev;
59079+ struct name_entry *next;
59080+};
59081+
59082+struct inodev_entry {
59083+ struct name_entry *nentry;
59084+ struct inodev_entry *prev;
59085+ struct inodev_entry *next;
59086+};
59087+
59088+struct acl_role_db {
59089+ struct acl_role_label **r_hash;
59090+ __u32 r_size;
59091+};
59092+
59093+struct inodev_db {
59094+ struct inodev_entry **i_hash;
59095+ __u32 i_size;
59096+};
59097+
59098+struct name_db {
59099+ struct name_entry **n_hash;
59100+ __u32 n_size;
59101+};
59102+
59103+struct crash_uid {
59104+ uid_t uid;
59105+ unsigned long expires;
59106+};
59107+
59108+struct gr_hash_struct {
59109+ void **table;
59110+ void **nametable;
59111+ void *first;
59112+ __u32 table_size;
59113+ __u32 used_size;
59114+ int type;
59115+};
59116+
59117+/* Userspace Grsecurity ACL data structures */
59118+
59119+struct acl_subject_label {
59120+ char *filename;
59121+ ino_t inode;
59122+ dev_t device;
59123+ __u32 mode;
59124+ kernel_cap_t cap_mask;
59125+ kernel_cap_t cap_lower;
59126+ kernel_cap_t cap_invert_audit;
59127+
59128+ struct rlimit res[GR_NLIMITS];
59129+ __u32 resmask;
59130+
59131+ __u8 user_trans_type;
59132+ __u8 group_trans_type;
59133+ uid_t *user_transitions;
59134+ gid_t *group_transitions;
59135+ __u16 user_trans_num;
59136+ __u16 group_trans_num;
59137+
59138+ __u32 sock_families[2];
59139+ __u32 ip_proto[8];
59140+ __u32 ip_type;
59141+ struct acl_ip_label **ips;
59142+ __u32 ip_num;
59143+ __u32 inaddr_any_override;
59144+
59145+ __u32 crashes;
59146+ unsigned long expires;
59147+
59148+ struct acl_subject_label *parent_subject;
59149+ struct gr_hash_struct *hash;
59150+ struct acl_subject_label *prev;
59151+ struct acl_subject_label *next;
59152+
59153+ struct acl_object_label **obj_hash;
59154+ __u32 obj_hash_size;
59155+ __u16 pax_flags;
59156+};
59157+
59158+struct role_allowed_ip {
59159+ __u32 addr;
59160+ __u32 netmask;
59161+
59162+ struct role_allowed_ip *prev;
59163+ struct role_allowed_ip *next;
59164+};
59165+
59166+struct role_transition {
59167+ char *rolename;
59168+
59169+ struct role_transition *prev;
59170+ struct role_transition *next;
59171+};
59172+
59173+struct acl_role_label {
59174+ char *rolename;
59175+ uid_t uidgid;
59176+ __u16 roletype;
59177+
59178+ __u16 auth_attempts;
59179+ unsigned long expires;
59180+
59181+ struct acl_subject_label *root_label;
59182+ struct gr_hash_struct *hash;
59183+
59184+ struct acl_role_label *prev;
59185+ struct acl_role_label *next;
59186+
59187+ struct role_transition *transitions;
59188+ struct role_allowed_ip *allowed_ips;
59189+ uid_t *domain_children;
59190+ __u16 domain_child_num;
59191+
59192+ struct acl_subject_label **subj_hash;
59193+ __u32 subj_hash_size;
59194+};
59195+
59196+struct user_acl_role_db {
59197+ struct acl_role_label **r_table;
59198+ __u32 num_pointers; /* Number of allocations to track */
59199+ __u32 num_roles; /* Number of roles */
59200+ __u32 num_domain_children; /* Number of domain children */
59201+ __u32 num_subjects; /* Number of subjects */
59202+ __u32 num_objects; /* Number of objects */
59203+};
59204+
59205+struct acl_object_label {
59206+ char *filename;
59207+ ino_t inode;
59208+ dev_t device;
59209+ __u32 mode;
59210+
59211+ struct acl_subject_label *nested;
59212+ struct acl_object_label *globbed;
59213+
59214+ /* next two structures not used */
59215+
59216+ struct acl_object_label *prev;
59217+ struct acl_object_label *next;
59218+};
59219+
59220+struct acl_ip_label {
59221+ char *iface;
59222+ __u32 addr;
59223+ __u32 netmask;
59224+ __u16 low, high;
59225+ __u8 mode;
59226+ __u32 type;
59227+ __u32 proto[8];
59228+
59229+ /* next two structures not used */
59230+
59231+ struct acl_ip_label *prev;
59232+ struct acl_ip_label *next;
59233+};
59234+
59235+struct gr_arg {
59236+ struct user_acl_role_db role_db;
59237+ unsigned char pw[GR_PW_LEN];
59238+ unsigned char salt[GR_SALT_LEN];
59239+ unsigned char sum[GR_SHA_LEN];
59240+ unsigned char sp_role[GR_SPROLE_LEN];
59241+ struct sprole_pw *sprole_pws;
59242+ dev_t segv_device;
59243+ ino_t segv_inode;
59244+ uid_t segv_uid;
59245+ __u16 num_sprole_pws;
59246+ __u16 mode;
59247+};
59248+
59249+struct gr_arg_wrapper {
59250+ struct gr_arg *arg;
59251+ __u32 version;
59252+ __u32 size;
59253+};
59254+
59255+struct subject_map {
59256+ struct acl_subject_label *user;
59257+ struct acl_subject_label *kernel;
59258+ struct subject_map *prev;
59259+ struct subject_map *next;
59260+};
59261+
59262+struct acl_subj_map_db {
59263+ struct subject_map **s_hash;
59264+ __u32 s_size;
59265+};
59266+
59267+/* End Data Structures Section */
59268+
59269+/* Hash functions generated by empirical testing by Brad Spengler
59270+ Makes good use of the low bits of the inode. Generally 0-1 times
59271+ in loop for successful match. 0-3 for unsuccessful match.
59272+ Shift/add algorithm with modulus of table size and an XOR*/
59273+
59274+static __inline__ unsigned int
59275+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
59276+{
59277+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
59278+}
59279+
59280+ static __inline__ unsigned int
59281+shash(const struct acl_subject_label *userp, const unsigned int sz)
59282+{
59283+ return ((const unsigned long)userp % sz);
59284+}
59285+
59286+static __inline__ unsigned int
59287+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
59288+{
59289+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
59290+}
59291+
59292+static __inline__ unsigned int
59293+nhash(const char *name, const __u16 len, const unsigned int sz)
59294+{
59295+ return full_name_hash((const unsigned char *)name, len) % sz;
59296+}
59297+
59298+#define FOR_EACH_ROLE_START(role) \
59299+ role = role_list; \
59300+ while (role) {
59301+
59302+#define FOR_EACH_ROLE_END(role) \
59303+ role = role->prev; \
59304+ }
59305+
59306+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
59307+ subj = NULL; \
59308+ iter = 0; \
59309+ while (iter < role->subj_hash_size) { \
59310+ if (subj == NULL) \
59311+ subj = role->subj_hash[iter]; \
59312+ if (subj == NULL) { \
59313+ iter++; \
59314+ continue; \
59315+ }
59316+
59317+#define FOR_EACH_SUBJECT_END(subj,iter) \
59318+ subj = subj->next; \
59319+ if (subj == NULL) \
59320+ iter++; \
59321+ }
59322+
59323+
59324+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
59325+ subj = role->hash->first; \
59326+ while (subj != NULL) {
59327+
59328+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
59329+ subj = subj->next; \
59330+ }
59331+
59332+#endif
59333+
59334diff -urNp linux-3.1.4/include/linux/gralloc.h linux-3.1.4/include/linux/gralloc.h
59335--- linux-3.1.4/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
59336+++ linux-3.1.4/include/linux/gralloc.h 2011-11-16 18:40:31.000000000 -0500
59337@@ -0,0 +1,9 @@
59338+#ifndef __GRALLOC_H
59339+#define __GRALLOC_H
59340+
59341+void acl_free_all(void);
59342+int acl_alloc_stack_init(unsigned long size);
59343+void *acl_alloc(unsigned long len);
59344+void *acl_alloc_num(unsigned long num, unsigned long len);
59345+
59346+#endif
59347diff -urNp linux-3.1.4/include/linux/grdefs.h linux-3.1.4/include/linux/grdefs.h
59348--- linux-3.1.4/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
59349+++ linux-3.1.4/include/linux/grdefs.h 2011-11-16 18:40:31.000000000 -0500
59350@@ -0,0 +1,140 @@
59351+#ifndef GRDEFS_H
59352+#define GRDEFS_H
59353+
59354+/* Begin grsecurity status declarations */
59355+
59356+enum {
59357+ GR_READY = 0x01,
59358+ GR_STATUS_INIT = 0x00 // disabled state
59359+};
59360+
59361+/* Begin ACL declarations */
59362+
59363+/* Role flags */
59364+
59365+enum {
59366+ GR_ROLE_USER = 0x0001,
59367+ GR_ROLE_GROUP = 0x0002,
59368+ GR_ROLE_DEFAULT = 0x0004,
59369+ GR_ROLE_SPECIAL = 0x0008,
59370+ GR_ROLE_AUTH = 0x0010,
59371+ GR_ROLE_NOPW = 0x0020,
59372+ GR_ROLE_GOD = 0x0040,
59373+ GR_ROLE_LEARN = 0x0080,
59374+ GR_ROLE_TPE = 0x0100,
59375+ GR_ROLE_DOMAIN = 0x0200,
59376+ GR_ROLE_PAM = 0x0400,
59377+ GR_ROLE_PERSIST = 0x0800
59378+};
59379+
59380+/* ACL Subject and Object mode flags */
59381+enum {
59382+ GR_DELETED = 0x80000000
59383+};
59384+
59385+/* ACL Object-only mode flags */
59386+enum {
59387+ GR_READ = 0x00000001,
59388+ GR_APPEND = 0x00000002,
59389+ GR_WRITE = 0x00000004,
59390+ GR_EXEC = 0x00000008,
59391+ GR_FIND = 0x00000010,
59392+ GR_INHERIT = 0x00000020,
59393+ GR_SETID = 0x00000040,
59394+ GR_CREATE = 0x00000080,
59395+ GR_DELETE = 0x00000100,
59396+ GR_LINK = 0x00000200,
59397+ GR_AUDIT_READ = 0x00000400,
59398+ GR_AUDIT_APPEND = 0x00000800,
59399+ GR_AUDIT_WRITE = 0x00001000,
59400+ GR_AUDIT_EXEC = 0x00002000,
59401+ GR_AUDIT_FIND = 0x00004000,
59402+ GR_AUDIT_INHERIT= 0x00008000,
59403+ GR_AUDIT_SETID = 0x00010000,
59404+ GR_AUDIT_CREATE = 0x00020000,
59405+ GR_AUDIT_DELETE = 0x00040000,
59406+ GR_AUDIT_LINK = 0x00080000,
59407+ GR_PTRACERD = 0x00100000,
59408+ GR_NOPTRACE = 0x00200000,
59409+ GR_SUPPRESS = 0x00400000,
59410+ GR_NOLEARN = 0x00800000,
59411+ GR_INIT_TRANSFER= 0x01000000
59412+};
59413+
59414+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
59415+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
59416+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
59417+
59418+/* ACL subject-only mode flags */
59419+enum {
59420+ GR_KILL = 0x00000001,
59421+ GR_VIEW = 0x00000002,
59422+ GR_PROTECTED = 0x00000004,
59423+ GR_LEARN = 0x00000008,
59424+ GR_OVERRIDE = 0x00000010,
59425+ /* just a placeholder, this mode is only used in userspace */
59426+ GR_DUMMY = 0x00000020,
59427+ GR_PROTSHM = 0x00000040,
59428+ GR_KILLPROC = 0x00000080,
59429+ GR_KILLIPPROC = 0x00000100,
59430+ /* just a placeholder, this mode is only used in userspace */
59431+ GR_NOTROJAN = 0x00000200,
59432+ GR_PROTPROCFD = 0x00000400,
59433+ GR_PROCACCT = 0x00000800,
59434+ GR_RELAXPTRACE = 0x00001000,
59435+ GR_NESTED = 0x00002000,
59436+ GR_INHERITLEARN = 0x00004000,
59437+ GR_PROCFIND = 0x00008000,
59438+ GR_POVERRIDE = 0x00010000,
59439+ GR_KERNELAUTH = 0x00020000,
59440+ GR_ATSECURE = 0x00040000,
59441+ GR_SHMEXEC = 0x00080000
59442+};
59443+
59444+enum {
59445+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
59446+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
59447+ GR_PAX_ENABLE_MPROTECT = 0x0004,
59448+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
59449+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
59450+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
59451+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
59452+ GR_PAX_DISABLE_MPROTECT = 0x0400,
59453+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
59454+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
59455+};
59456+
59457+enum {
59458+ GR_ID_USER = 0x01,
59459+ GR_ID_GROUP = 0x02,
59460+};
59461+
59462+enum {
59463+ GR_ID_ALLOW = 0x01,
59464+ GR_ID_DENY = 0x02,
59465+};
59466+
59467+#define GR_CRASH_RES 31
59468+#define GR_UIDTABLE_MAX 500
59469+
59470+/* begin resource learning section */
59471+enum {
59472+ GR_RLIM_CPU_BUMP = 60,
59473+ GR_RLIM_FSIZE_BUMP = 50000,
59474+ GR_RLIM_DATA_BUMP = 10000,
59475+ GR_RLIM_STACK_BUMP = 1000,
59476+ GR_RLIM_CORE_BUMP = 10000,
59477+ GR_RLIM_RSS_BUMP = 500000,
59478+ GR_RLIM_NPROC_BUMP = 1,
59479+ GR_RLIM_NOFILE_BUMP = 5,
59480+ GR_RLIM_MEMLOCK_BUMP = 50000,
59481+ GR_RLIM_AS_BUMP = 500000,
59482+ GR_RLIM_LOCKS_BUMP = 2,
59483+ GR_RLIM_SIGPENDING_BUMP = 5,
59484+ GR_RLIM_MSGQUEUE_BUMP = 10000,
59485+ GR_RLIM_NICE_BUMP = 1,
59486+ GR_RLIM_RTPRIO_BUMP = 1,
59487+ GR_RLIM_RTTIME_BUMP = 1000000
59488+};
59489+
59490+#endif
59491diff -urNp linux-3.1.4/include/linux/grinternal.h linux-3.1.4/include/linux/grinternal.h
59492--- linux-3.1.4/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
59493+++ linux-3.1.4/include/linux/grinternal.h 2011-11-16 18:40:31.000000000 -0500
59494@@ -0,0 +1,220 @@
59495+#ifndef __GRINTERNAL_H
59496+#define __GRINTERNAL_H
59497+
59498+#ifdef CONFIG_GRKERNSEC
59499+
59500+#include <linux/fs.h>
59501+#include <linux/mnt_namespace.h>
59502+#include <linux/nsproxy.h>
59503+#include <linux/gracl.h>
59504+#include <linux/grdefs.h>
59505+#include <linux/grmsg.h>
59506+
59507+void gr_add_learn_entry(const char *fmt, ...)
59508+ __attribute__ ((format (printf, 1, 2)));
59509+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
59510+ const struct vfsmount *mnt);
59511+__u32 gr_check_create(const struct dentry *new_dentry,
59512+ const struct dentry *parent,
59513+ const struct vfsmount *mnt, const __u32 mode);
59514+int gr_check_protected_task(const struct task_struct *task);
59515+__u32 to_gr_audit(const __u32 reqmode);
59516+int gr_set_acls(const int type);
59517+int gr_apply_subject_to_task(struct task_struct *task);
59518+int gr_acl_is_enabled(void);
59519+char gr_roletype_to_char(void);
59520+
59521+void gr_handle_alertkill(struct task_struct *task);
59522+char *gr_to_filename(const struct dentry *dentry,
59523+ const struct vfsmount *mnt);
59524+char *gr_to_filename1(const struct dentry *dentry,
59525+ const struct vfsmount *mnt);
59526+char *gr_to_filename2(const struct dentry *dentry,
59527+ const struct vfsmount *mnt);
59528+char *gr_to_filename3(const struct dentry *dentry,
59529+ const struct vfsmount *mnt);
59530+
59531+extern int grsec_enable_harden_ptrace;
59532+extern int grsec_enable_link;
59533+extern int grsec_enable_fifo;
59534+extern int grsec_enable_execve;
59535+extern int grsec_enable_shm;
59536+extern int grsec_enable_execlog;
59537+extern int grsec_enable_signal;
59538+extern int grsec_enable_audit_ptrace;
59539+extern int grsec_enable_forkfail;
59540+extern int grsec_enable_time;
59541+extern int grsec_enable_rofs;
59542+extern int grsec_enable_chroot_shmat;
59543+extern int grsec_enable_chroot_mount;
59544+extern int grsec_enable_chroot_double;
59545+extern int grsec_enable_chroot_pivot;
59546+extern int grsec_enable_chroot_chdir;
59547+extern int grsec_enable_chroot_chmod;
59548+extern int grsec_enable_chroot_mknod;
59549+extern int grsec_enable_chroot_fchdir;
59550+extern int grsec_enable_chroot_nice;
59551+extern int grsec_enable_chroot_execlog;
59552+extern int grsec_enable_chroot_caps;
59553+extern int grsec_enable_chroot_sysctl;
59554+extern int grsec_enable_chroot_unix;
59555+extern int grsec_enable_tpe;
59556+extern int grsec_tpe_gid;
59557+extern int grsec_enable_tpe_all;
59558+extern int grsec_enable_tpe_invert;
59559+extern int grsec_enable_socket_all;
59560+extern int grsec_socket_all_gid;
59561+extern int grsec_enable_socket_client;
59562+extern int grsec_socket_client_gid;
59563+extern int grsec_enable_socket_server;
59564+extern int grsec_socket_server_gid;
59565+extern int grsec_audit_gid;
59566+extern int grsec_enable_group;
59567+extern int grsec_enable_audit_textrel;
59568+extern int grsec_enable_log_rwxmaps;
59569+extern int grsec_enable_mount;
59570+extern int grsec_enable_chdir;
59571+extern int grsec_resource_logging;
59572+extern int grsec_enable_blackhole;
59573+extern int grsec_lastack_retries;
59574+extern int grsec_enable_brute;
59575+extern int grsec_lock;
59576+
59577+extern spinlock_t grsec_alert_lock;
59578+extern unsigned long grsec_alert_wtime;
59579+extern unsigned long grsec_alert_fyet;
59580+
59581+extern spinlock_t grsec_audit_lock;
59582+
59583+extern rwlock_t grsec_exec_file_lock;
59584+
59585+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
59586+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
59587+ (tsk)->exec_file->f_vfsmnt) : "/")
59588+
59589+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
59590+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
59591+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
59592+
59593+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
59594+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
59595+ (tsk)->exec_file->f_vfsmnt) : "/")
59596+
59597+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
59598+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
59599+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
59600+
59601+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
59602+
59603+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
59604+
59605+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
59606+ (task)->pid, (cred)->uid, \
59607+ (cred)->euid, (cred)->gid, (cred)->egid, \
59608+ gr_parent_task_fullpath(task), \
59609+ (task)->real_parent->comm, (task)->real_parent->pid, \
59610+ (pcred)->uid, (pcred)->euid, \
59611+ (pcred)->gid, (pcred)->egid
59612+
59613+#define GR_CHROOT_CAPS {{ \
59614+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
59615+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
59616+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
59617+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
59618+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
59619+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
59620+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
59621+
59622+#define security_learn(normal_msg,args...) \
59623+({ \
59624+ read_lock(&grsec_exec_file_lock); \
59625+ gr_add_learn_entry(normal_msg "\n", ## args); \
59626+ read_unlock(&grsec_exec_file_lock); \
59627+})
59628+
59629+enum {
59630+ GR_DO_AUDIT,
59631+ GR_DONT_AUDIT,
59632+ /* used for non-audit messages that we shouldn't kill the task on */
59633+ GR_DONT_AUDIT_GOOD
59634+};
59635+
59636+enum {
59637+ GR_TTYSNIFF,
59638+ GR_RBAC,
59639+ GR_RBAC_STR,
59640+ GR_STR_RBAC,
59641+ GR_RBAC_MODE2,
59642+ GR_RBAC_MODE3,
59643+ GR_FILENAME,
59644+ GR_SYSCTL_HIDDEN,
59645+ GR_NOARGS,
59646+ GR_ONE_INT,
59647+ GR_ONE_INT_TWO_STR,
59648+ GR_ONE_STR,
59649+ GR_STR_INT,
59650+ GR_TWO_STR_INT,
59651+ GR_TWO_INT,
59652+ GR_TWO_U64,
59653+ GR_THREE_INT,
59654+ GR_FIVE_INT_TWO_STR,
59655+ GR_TWO_STR,
59656+ GR_THREE_STR,
59657+ GR_FOUR_STR,
59658+ GR_STR_FILENAME,
59659+ GR_FILENAME_STR,
59660+ GR_FILENAME_TWO_INT,
59661+ GR_FILENAME_TWO_INT_STR,
59662+ GR_TEXTREL,
59663+ GR_PTRACE,
59664+ GR_RESOURCE,
59665+ GR_CAP,
59666+ GR_SIG,
59667+ GR_SIG2,
59668+ GR_CRASH1,
59669+ GR_CRASH2,
59670+ GR_PSACCT,
59671+ GR_RWXMAP
59672+};
59673+
59674+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
59675+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
59676+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
59677+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
59678+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
59679+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
59680+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
59681+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
59682+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
59683+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
59684+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
59685+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
59686+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
59687+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
59688+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
59689+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
59690+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
59691+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
59692+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
59693+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
59694+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
59695+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
59696+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
59697+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
59698+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
59699+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
59700+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
59701+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
59702+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
59703+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
59704+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
59705+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
59706+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
59707+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
59708+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
59709+
59710+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
59711+
59712+#endif
59713+
59714+#endif
59715diff -urNp linux-3.1.4/include/linux/grmsg.h linux-3.1.4/include/linux/grmsg.h
59716--- linux-3.1.4/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
59717+++ linux-3.1.4/include/linux/grmsg.h 2011-11-16 18:40:31.000000000 -0500
59718@@ -0,0 +1,108 @@
59719+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
59720+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
59721+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
59722+#define GR_STOPMOD_MSG "denied modification of module state by "
59723+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
59724+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
59725+#define GR_IOPERM_MSG "denied use of ioperm() by "
59726+#define GR_IOPL_MSG "denied use of iopl() by "
59727+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
59728+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
59729+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
59730+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
59731+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
59732+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
59733+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
59734+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
59735+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
59736+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
59737+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
59738+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
59739+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
59740+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
59741+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
59742+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
59743+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
59744+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
59745+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
59746+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
59747+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
59748+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
59749+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
59750+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
59751+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
59752+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
59753+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
59754+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
59755+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
59756+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
59757+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
59758+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
59759+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
59760+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
59761+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
59762+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
59763+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
59764+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
59765+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
59766+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
59767+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
59768+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
59769+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
59770+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
59771+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
59772+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
59773+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
59774+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
59775+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
59776+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
59777+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
59778+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
59779+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
59780+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
59781+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
59782+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
59783+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
59784+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
59785+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
59786+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
59787+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
59788+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
59789+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
59790+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
59791+#define GR_FAILFORK_MSG "failed fork with errno %s by "
59792+#define GR_NICE_CHROOT_MSG "denied priority change by "
59793+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
59794+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
59795+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
59796+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
59797+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
59798+#define GR_TIME_MSG "time set by "
59799+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
59800+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
59801+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
59802+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
59803+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
59804+#define GR_BIND_MSG "denied bind() by "
59805+#define GR_CONNECT_MSG "denied connect() by "
59806+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
59807+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
59808+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
59809+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
59810+#define GR_CAP_ACL_MSG "use of %s denied for "
59811+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
59812+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
59813+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
59814+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
59815+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
59816+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
59817+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
59818+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
59819+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
59820+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
59821+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
59822+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
59823+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
59824+#define GR_VM86_MSG "denied use of vm86 by "
59825+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
59826+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
59827diff -urNp linux-3.1.4/include/linux/grsecurity.h linux-3.1.4/include/linux/grsecurity.h
59828--- linux-3.1.4/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
59829+++ linux-3.1.4/include/linux/grsecurity.h 2011-11-17 00:16:10.000000000 -0500
59830@@ -0,0 +1,228 @@
59831+#ifndef GR_SECURITY_H
59832+#define GR_SECURITY_H
59833+#include <linux/fs.h>
59834+#include <linux/fs_struct.h>
59835+#include <linux/binfmts.h>
59836+#include <linux/gracl.h>
59837+
59838+/* notify of brain-dead configs */
59839+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59840+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
59841+#endif
59842+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
59843+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
59844+#endif
59845+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
59846+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
59847+#endif
59848+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
59849+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
59850+#endif
59851+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
59852+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
59853+#endif
59854+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
59855+#error "CONFIG_PAX enabled, but no PaX options are enabled."
59856+#endif
59857+
59858+#include <linux/compat.h>
59859+
59860+struct user_arg_ptr {
59861+#ifdef CONFIG_COMPAT
59862+ bool is_compat;
59863+#endif
59864+ union {
59865+ const char __user *const __user *native;
59866+#ifdef CONFIG_COMPAT
59867+ compat_uptr_t __user *compat;
59868+#endif
59869+ } ptr;
59870+};
59871+
59872+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
59873+void gr_handle_brute_check(void);
59874+void gr_handle_kernel_exploit(void);
59875+int gr_process_user_ban(void);
59876+
59877+char gr_roletype_to_char(void);
59878+
59879+int gr_acl_enable_at_secure(void);
59880+
59881+int gr_check_user_change(int real, int effective, int fs);
59882+int gr_check_group_change(int real, int effective, int fs);
59883+
59884+void gr_del_task_from_ip_table(struct task_struct *p);
59885+
59886+int gr_pid_is_chrooted(struct task_struct *p);
59887+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
59888+int gr_handle_chroot_nice(void);
59889+int gr_handle_chroot_sysctl(const int op);
59890+int gr_handle_chroot_setpriority(struct task_struct *p,
59891+ const int niceval);
59892+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
59893+int gr_handle_chroot_chroot(const struct dentry *dentry,
59894+ const struct vfsmount *mnt);
59895+void gr_handle_chroot_chdir(struct path *path);
59896+int gr_handle_chroot_chmod(const struct dentry *dentry,
59897+ const struct vfsmount *mnt, const int mode);
59898+int gr_handle_chroot_mknod(const struct dentry *dentry,
59899+ const struct vfsmount *mnt, const int mode);
59900+int gr_handle_chroot_mount(const struct dentry *dentry,
59901+ const struct vfsmount *mnt,
59902+ const char *dev_name);
59903+int gr_handle_chroot_pivot(void);
59904+int gr_handle_chroot_unix(const pid_t pid);
59905+
59906+int gr_handle_rawio(const struct inode *inode);
59907+
59908+void gr_handle_ioperm(void);
59909+void gr_handle_iopl(void);
59910+
59911+int gr_tpe_allow(const struct file *file);
59912+
59913+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
59914+void gr_clear_chroot_entries(struct task_struct *task);
59915+
59916+void gr_log_forkfail(const int retval);
59917+void gr_log_timechange(void);
59918+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
59919+void gr_log_chdir(const struct dentry *dentry,
59920+ const struct vfsmount *mnt);
59921+void gr_log_chroot_exec(const struct dentry *dentry,
59922+ const struct vfsmount *mnt);
59923+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
59924+void gr_log_remount(const char *devname, const int retval);
59925+void gr_log_unmount(const char *devname, const int retval);
59926+void gr_log_mount(const char *from, const char *to, const int retval);
59927+void gr_log_textrel(struct vm_area_struct *vma);
59928+void gr_log_rwxmmap(struct file *file);
59929+void gr_log_rwxmprotect(struct file *file);
59930+
59931+int gr_handle_follow_link(const struct inode *parent,
59932+ const struct inode *inode,
59933+ const struct dentry *dentry,
59934+ const struct vfsmount *mnt);
59935+int gr_handle_fifo(const struct dentry *dentry,
59936+ const struct vfsmount *mnt,
59937+ const struct dentry *dir, const int flag,
59938+ const int acc_mode);
59939+int gr_handle_hardlink(const struct dentry *dentry,
59940+ const struct vfsmount *mnt,
59941+ struct inode *inode,
59942+ const int mode, const char *to);
59943+
59944+int gr_is_capable(const int cap);
59945+int gr_is_capable_nolog(const int cap);
59946+void gr_learn_resource(const struct task_struct *task, const int limit,
59947+ const unsigned long wanted, const int gt);
59948+void gr_copy_label(struct task_struct *tsk);
59949+void gr_handle_crash(struct task_struct *task, const int sig);
59950+int gr_handle_signal(const struct task_struct *p, const int sig);
59951+int gr_check_crash_uid(const uid_t uid);
59952+int gr_check_protected_task(const struct task_struct *task);
59953+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
59954+int gr_acl_handle_mmap(const struct file *file,
59955+ const unsigned long prot);
59956+int gr_acl_handle_mprotect(const struct file *file,
59957+ const unsigned long prot);
59958+int gr_check_hidden_task(const struct task_struct *tsk);
59959+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
59960+ const struct vfsmount *mnt);
59961+__u32 gr_acl_handle_utime(const struct dentry *dentry,
59962+ const struct vfsmount *mnt);
59963+__u32 gr_acl_handle_access(const struct dentry *dentry,
59964+ const struct vfsmount *mnt, const int fmode);
59965+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
59966+ const struct vfsmount *mnt, mode_t mode);
59967+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
59968+ const struct vfsmount *mnt, mode_t mode);
59969+__u32 gr_acl_handle_chown(const struct dentry *dentry,
59970+ const struct vfsmount *mnt);
59971+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
59972+ const struct vfsmount *mnt);
59973+int gr_handle_ptrace(struct task_struct *task, const long request);
59974+int gr_handle_proc_ptrace(struct task_struct *task);
59975+__u32 gr_acl_handle_execve(const struct dentry *dentry,
59976+ const struct vfsmount *mnt);
59977+int gr_check_crash_exec(const struct file *filp);
59978+int gr_acl_is_enabled(void);
59979+void gr_set_kernel_label(struct task_struct *task);
59980+void gr_set_role_label(struct task_struct *task, const uid_t uid,
59981+ const gid_t gid);
59982+int gr_set_proc_label(const struct dentry *dentry,
59983+ const struct vfsmount *mnt,
59984+ const int unsafe_share);
59985+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
59986+ const struct vfsmount *mnt);
59987+__u32 gr_acl_handle_open(const struct dentry *dentry,
59988+ const struct vfsmount *mnt, int acc_mode);
59989+__u32 gr_acl_handle_creat(const struct dentry *dentry,
59990+ const struct dentry *p_dentry,
59991+ const struct vfsmount *p_mnt,
59992+ int open_flags, int acc_mode, const int imode);
59993+void gr_handle_create(const struct dentry *dentry,
59994+ const struct vfsmount *mnt);
59995+void gr_handle_proc_create(const struct dentry *dentry,
59996+ const struct inode *inode);
59997+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
59998+ const struct dentry *parent_dentry,
59999+ const struct vfsmount *parent_mnt,
60000+ const int mode);
60001+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
60002+ const struct dentry *parent_dentry,
60003+ const struct vfsmount *parent_mnt);
60004+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
60005+ const struct vfsmount *mnt);
60006+void gr_handle_delete(const ino_t ino, const dev_t dev);
60007+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
60008+ const struct vfsmount *mnt);
60009+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
60010+ const struct dentry *parent_dentry,
60011+ const struct vfsmount *parent_mnt,
60012+ const char *from);
60013+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
60014+ const struct dentry *parent_dentry,
60015+ const struct vfsmount *parent_mnt,
60016+ const struct dentry *old_dentry,
60017+ const struct vfsmount *old_mnt, const char *to);
60018+int gr_acl_handle_rename(struct dentry *new_dentry,
60019+ struct dentry *parent_dentry,
60020+ const struct vfsmount *parent_mnt,
60021+ struct dentry *old_dentry,
60022+ struct inode *old_parent_inode,
60023+ struct vfsmount *old_mnt, const char *newname);
60024+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
60025+ struct dentry *old_dentry,
60026+ struct dentry *new_dentry,
60027+ struct vfsmount *mnt, const __u8 replace);
60028+__u32 gr_check_link(const struct dentry *new_dentry,
60029+ const struct dentry *parent_dentry,
60030+ const struct vfsmount *parent_mnt,
60031+ const struct dentry *old_dentry,
60032+ const struct vfsmount *old_mnt);
60033+int gr_acl_handle_filldir(const struct file *file, const char *name,
60034+ const unsigned int namelen, const ino_t ino);
60035+
60036+__u32 gr_acl_handle_unix(const struct dentry *dentry,
60037+ const struct vfsmount *mnt);
60038+void gr_acl_handle_exit(void);
60039+void gr_acl_handle_psacct(struct task_struct *task, const long code);
60040+int gr_acl_handle_procpidmem(const struct task_struct *task);
60041+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
60042+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
60043+void gr_audit_ptrace(struct task_struct *task);
60044+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
60045+
60046+#ifdef CONFIG_GRKERNSEC
60047+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
60048+void gr_handle_vm86(void);
60049+void gr_handle_mem_readwrite(u64 from, u64 to);
60050+
60051+extern int grsec_enable_dmesg;
60052+extern int grsec_disable_privio;
60053+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
60054+extern int grsec_enable_chroot_findtask;
60055+#endif
60056+#endif
60057+
60058+#endif
60059diff -urNp linux-3.1.4/include/linux/grsock.h linux-3.1.4/include/linux/grsock.h
60060--- linux-3.1.4/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
60061+++ linux-3.1.4/include/linux/grsock.h 2011-11-16 18:40:31.000000000 -0500
60062@@ -0,0 +1,19 @@
60063+#ifndef __GRSOCK_H
60064+#define __GRSOCK_H
60065+
60066+extern void gr_attach_curr_ip(const struct sock *sk);
60067+extern int gr_handle_sock_all(const int family, const int type,
60068+ const int protocol);
60069+extern int gr_handle_sock_server(const struct sockaddr *sck);
60070+extern int gr_handle_sock_server_other(const struct sock *sck);
60071+extern int gr_handle_sock_client(const struct sockaddr *sck);
60072+extern int gr_search_connect(struct socket * sock,
60073+ struct sockaddr_in * addr);
60074+extern int gr_search_bind(struct socket * sock,
60075+ struct sockaddr_in * addr);
60076+extern int gr_search_listen(struct socket * sock);
60077+extern int gr_search_accept(struct socket * sock);
60078+extern int gr_search_socket(const int domain, const int type,
60079+ const int protocol);
60080+
60081+#endif
60082diff -urNp linux-3.1.4/include/linux/hid.h linux-3.1.4/include/linux/hid.h
60083--- linux-3.1.4/include/linux/hid.h 2011-11-11 15:19:27.000000000 -0500
60084+++ linux-3.1.4/include/linux/hid.h 2011-11-16 18:39:08.000000000 -0500
60085@@ -676,7 +676,7 @@ struct hid_ll_driver {
60086 unsigned int code, int value);
60087
60088 int (*parse)(struct hid_device *hdev);
60089-};
60090+} __no_const;
60091
60092 #define PM_HINT_FULLON 1<<5
60093 #define PM_HINT_NORMAL 1<<1
60094diff -urNp linux-3.1.4/include/linux/highmem.h linux-3.1.4/include/linux/highmem.h
60095--- linux-3.1.4/include/linux/highmem.h 2011-11-11 15:19:27.000000000 -0500
60096+++ linux-3.1.4/include/linux/highmem.h 2011-11-16 18:39:08.000000000 -0500
60097@@ -185,6 +185,18 @@ static inline void clear_highpage(struct
60098 kunmap_atomic(kaddr, KM_USER0);
60099 }
60100
60101+static inline void sanitize_highpage(struct page *page)
60102+{
60103+ void *kaddr;
60104+ unsigned long flags;
60105+
60106+ local_irq_save(flags);
60107+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
60108+ clear_page(kaddr);
60109+ kunmap_atomic(kaddr, KM_CLEARPAGE);
60110+ local_irq_restore(flags);
60111+}
60112+
60113 static inline void zero_user_segments(struct page *page,
60114 unsigned start1, unsigned end1,
60115 unsigned start2, unsigned end2)
60116diff -urNp linux-3.1.4/include/linux/i2c.h linux-3.1.4/include/linux/i2c.h
60117--- linux-3.1.4/include/linux/i2c.h 2011-11-11 15:19:27.000000000 -0500
60118+++ linux-3.1.4/include/linux/i2c.h 2011-11-16 18:39:08.000000000 -0500
60119@@ -346,6 +346,7 @@ struct i2c_algorithm {
60120 /* To determine what the adapter supports */
60121 u32 (*functionality) (struct i2c_adapter *);
60122 };
60123+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
60124
60125 /*
60126 * i2c_adapter is the structure used to identify a physical i2c bus along
60127diff -urNp linux-3.1.4/include/linux/i2o.h linux-3.1.4/include/linux/i2o.h
60128--- linux-3.1.4/include/linux/i2o.h 2011-11-11 15:19:27.000000000 -0500
60129+++ linux-3.1.4/include/linux/i2o.h 2011-11-16 18:39:08.000000000 -0500
60130@@ -564,7 +564,7 @@ struct i2o_controller {
60131 struct i2o_device *exec; /* Executive */
60132 #if BITS_PER_LONG == 64
60133 spinlock_t context_list_lock; /* lock for context_list */
60134- atomic_t context_list_counter; /* needed for unique contexts */
60135+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
60136 struct list_head context_list; /* list of context id's
60137 and pointers */
60138 #endif
60139diff -urNp linux-3.1.4/include/linux/init.h linux-3.1.4/include/linux/init.h
60140--- linux-3.1.4/include/linux/init.h 2011-11-11 15:19:27.000000000 -0500
60141+++ linux-3.1.4/include/linux/init.h 2011-11-16 18:39:08.000000000 -0500
60142@@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
60143
60144 /* Each module must use one module_init(). */
60145 #define module_init(initfn) \
60146- static inline initcall_t __inittest(void) \
60147+ static inline __used initcall_t __inittest(void) \
60148 { return initfn; } \
60149 int init_module(void) __attribute__((alias(#initfn)));
60150
60151 /* This is only required if you want to be unloadable. */
60152 #define module_exit(exitfn) \
60153- static inline exitcall_t __exittest(void) \
60154+ static inline __used exitcall_t __exittest(void) \
60155 { return exitfn; } \
60156 void cleanup_module(void) __attribute__((alias(#exitfn)));
60157
60158diff -urNp linux-3.1.4/include/linux/init_task.h linux-3.1.4/include/linux/init_task.h
60159--- linux-3.1.4/include/linux/init_task.h 2011-11-11 15:19:27.000000000 -0500
60160+++ linux-3.1.4/include/linux/init_task.h 2011-11-16 18:39:08.000000000 -0500
60161@@ -126,6 +126,12 @@ extern struct cred init_cred;
60162 # define INIT_PERF_EVENTS(tsk)
60163 #endif
60164
60165+#ifdef CONFIG_X86
60166+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
60167+#else
60168+#define INIT_TASK_THREAD_INFO
60169+#endif
60170+
60171 /*
60172 * INIT_TASK is used to set up the first task table, touch at
60173 * your own risk!. Base=0, limit=0x1fffff (=2MB)
60174@@ -164,6 +170,7 @@ extern struct cred init_cred;
60175 RCU_INIT_POINTER(.cred, &init_cred), \
60176 .comm = "swapper", \
60177 .thread = INIT_THREAD, \
60178+ INIT_TASK_THREAD_INFO \
60179 .fs = &init_fs, \
60180 .files = &init_files, \
60181 .signal = &init_signals, \
60182diff -urNp linux-3.1.4/include/linux/intel-iommu.h linux-3.1.4/include/linux/intel-iommu.h
60183--- linux-3.1.4/include/linux/intel-iommu.h 2011-11-11 15:19:27.000000000 -0500
60184+++ linux-3.1.4/include/linux/intel-iommu.h 2011-11-16 18:39:08.000000000 -0500
60185@@ -296,7 +296,7 @@ struct iommu_flush {
60186 u8 fm, u64 type);
60187 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
60188 unsigned int size_order, u64 type);
60189-};
60190+} __no_const;
60191
60192 enum {
60193 SR_DMAR_FECTL_REG,
60194diff -urNp linux-3.1.4/include/linux/interrupt.h linux-3.1.4/include/linux/interrupt.h
60195--- linux-3.1.4/include/linux/interrupt.h 2011-11-11 15:19:27.000000000 -0500
60196+++ linux-3.1.4/include/linux/interrupt.h 2011-11-16 18:39:08.000000000 -0500
60197@@ -425,7 +425,7 @@ enum
60198 /* map softirq index to softirq name. update 'softirq_to_name' in
60199 * kernel/softirq.c when adding a new softirq.
60200 */
60201-extern char *softirq_to_name[NR_SOFTIRQS];
60202+extern const char * const softirq_to_name[NR_SOFTIRQS];
60203
60204 /* softirq mask and active fields moved to irq_cpustat_t in
60205 * asm/hardirq.h to get better cache usage. KAO
60206@@ -433,12 +433,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
60207
60208 struct softirq_action
60209 {
60210- void (*action)(struct softirq_action *);
60211+ void (*action)(void);
60212 };
60213
60214 asmlinkage void do_softirq(void);
60215 asmlinkage void __do_softirq(void);
60216-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
60217+extern void open_softirq(int nr, void (*action)(void));
60218 extern void softirq_init(void);
60219 static inline void __raise_softirq_irqoff(unsigned int nr)
60220 {
60221diff -urNp linux-3.1.4/include/linux/kallsyms.h linux-3.1.4/include/linux/kallsyms.h
60222--- linux-3.1.4/include/linux/kallsyms.h 2011-11-11 15:19:27.000000000 -0500
60223+++ linux-3.1.4/include/linux/kallsyms.h 2011-11-16 18:40:31.000000000 -0500
60224@@ -15,7 +15,8 @@
60225
60226 struct module;
60227
60228-#ifdef CONFIG_KALLSYMS
60229+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
60230+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60231 /* Lookup the address for a symbol. Returns 0 if not found. */
60232 unsigned long kallsyms_lookup_name(const char *name);
60233
60234@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
60235 /* Stupid that this does nothing, but I didn't create this mess. */
60236 #define __print_symbol(fmt, addr)
60237 #endif /*CONFIG_KALLSYMS*/
60238+#else /* when included by kallsyms.c, vsnprintf.c, or
60239+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
60240+extern void __print_symbol(const char *fmt, unsigned long address);
60241+extern int sprint_backtrace(char *buffer, unsigned long address);
60242+extern int sprint_symbol(char *buffer, unsigned long address);
60243+const char *kallsyms_lookup(unsigned long addr,
60244+ unsigned long *symbolsize,
60245+ unsigned long *offset,
60246+ char **modname, char *namebuf);
60247+#endif
60248
60249 /* This macro allows us to keep printk typechecking */
60250 static void __check_printsym_format(const char *fmt, ...)
60251diff -urNp linux-3.1.4/include/linux/kgdb.h linux-3.1.4/include/linux/kgdb.h
60252--- linux-3.1.4/include/linux/kgdb.h 2011-11-11 15:19:27.000000000 -0500
60253+++ linux-3.1.4/include/linux/kgdb.h 2011-11-16 18:39:08.000000000 -0500
60254@@ -53,7 +53,7 @@ extern int kgdb_connected;
60255 extern int kgdb_io_module_registered;
60256
60257 extern atomic_t kgdb_setting_breakpoint;
60258-extern atomic_t kgdb_cpu_doing_single_step;
60259+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
60260
60261 extern struct task_struct *kgdb_usethread;
60262 extern struct task_struct *kgdb_contthread;
60263@@ -251,7 +251,7 @@ struct kgdb_arch {
60264 void (*disable_hw_break)(struct pt_regs *regs);
60265 void (*remove_all_hw_break)(void);
60266 void (*correct_hw_break)(void);
60267-};
60268+} __do_const;
60269
60270 /**
60271 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
60272@@ -276,7 +276,7 @@ struct kgdb_io {
60273 void (*pre_exception) (void);
60274 void (*post_exception) (void);
60275 int is_console;
60276-};
60277+} __do_const;
60278
60279 extern struct kgdb_arch arch_kgdb_ops;
60280
60281diff -urNp linux-3.1.4/include/linux/kmod.h linux-3.1.4/include/linux/kmod.h
60282--- linux-3.1.4/include/linux/kmod.h 2011-11-11 15:19:27.000000000 -0500
60283+++ linux-3.1.4/include/linux/kmod.h 2011-11-16 18:40:31.000000000 -0500
60284@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysc
60285 * usually useless though. */
60286 extern int __request_module(bool wait, const char *name, ...) \
60287 __attribute__((format(printf, 2, 3)));
60288+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
60289+ __attribute__((format(printf, 3, 4)));
60290 #define request_module(mod...) __request_module(true, mod)
60291 #define request_module_nowait(mod...) __request_module(false, mod)
60292 #define try_then_request_module(x, mod...) \
60293diff -urNp linux-3.1.4/include/linux/kvm_host.h linux-3.1.4/include/linux/kvm_host.h
60294--- linux-3.1.4/include/linux/kvm_host.h 2011-11-11 15:19:27.000000000 -0500
60295+++ linux-3.1.4/include/linux/kvm_host.h 2011-11-16 18:39:08.000000000 -0500
60296@@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
60297 void vcpu_load(struct kvm_vcpu *vcpu);
60298 void vcpu_put(struct kvm_vcpu *vcpu);
60299
60300-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
60301+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
60302 struct module *module);
60303 void kvm_exit(void);
60304
60305@@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
60306 struct kvm_guest_debug *dbg);
60307 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
60308
60309-int kvm_arch_init(void *opaque);
60310+int kvm_arch_init(const void *opaque);
60311 void kvm_arch_exit(void);
60312
60313 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
60314diff -urNp linux-3.1.4/include/linux/libata.h linux-3.1.4/include/linux/libata.h
60315--- linux-3.1.4/include/linux/libata.h 2011-11-11 15:19:27.000000000 -0500
60316+++ linux-3.1.4/include/linux/libata.h 2011-11-16 18:39:08.000000000 -0500
60317@@ -909,7 +909,7 @@ struct ata_port_operations {
60318 * fields must be pointers.
60319 */
60320 const struct ata_port_operations *inherits;
60321-};
60322+} __do_const;
60323
60324 struct ata_port_info {
60325 unsigned long flags;
60326diff -urNp linux-3.1.4/include/linux/mca.h linux-3.1.4/include/linux/mca.h
60327--- linux-3.1.4/include/linux/mca.h 2011-11-11 15:19:27.000000000 -0500
60328+++ linux-3.1.4/include/linux/mca.h 2011-11-16 18:39:08.000000000 -0500
60329@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
60330 int region);
60331 void * (*mca_transform_memory)(struct mca_device *,
60332 void *memory);
60333-};
60334+} __no_const;
60335
60336 struct mca_bus {
60337 u64 default_dma_mask;
60338diff -urNp linux-3.1.4/include/linux/memory.h linux-3.1.4/include/linux/memory.h
60339--- linux-3.1.4/include/linux/memory.h 2011-11-11 15:19:27.000000000 -0500
60340+++ linux-3.1.4/include/linux/memory.h 2011-11-16 18:39:08.000000000 -0500
60341@@ -144,7 +144,7 @@ struct memory_accessor {
60342 size_t count);
60343 ssize_t (*write)(struct memory_accessor *, const char *buf,
60344 off_t offset, size_t count);
60345-};
60346+} __no_const;
60347
60348 /*
60349 * Kernel text modification mutex, used for code patching. Users of this lock
60350diff -urNp linux-3.1.4/include/linux/mfd/abx500.h linux-3.1.4/include/linux/mfd/abx500.h
60351--- linux-3.1.4/include/linux/mfd/abx500.h 2011-11-11 15:19:27.000000000 -0500
60352+++ linux-3.1.4/include/linux/mfd/abx500.h 2011-11-16 18:39:08.000000000 -0500
60353@@ -234,6 +234,7 @@ struct abx500_ops {
60354 int (*event_registers_startup_state_get) (struct device *, u8 *);
60355 int (*startup_irq_enabled) (struct device *, unsigned int);
60356 };
60357+typedef struct abx500_ops __no_const abx500_ops_no_const;
60358
60359 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
60360 void abx500_remove_ops(struct device *dev);
60361diff -urNp linux-3.1.4/include/linux/mm.h linux-3.1.4/include/linux/mm.h
60362--- linux-3.1.4/include/linux/mm.h 2011-11-11 15:19:27.000000000 -0500
60363+++ linux-3.1.4/include/linux/mm.h 2011-11-16 18:39:08.000000000 -0500
60364@@ -114,7 +114,14 @@ extern unsigned int kobjsize(const void
60365
60366 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
60367 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
60368+
60369+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
60370+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
60371+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
60372+#else
60373 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
60374+#endif
60375+
60376 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
60377 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
60378
60379@@ -1011,34 +1018,6 @@ int set_page_dirty(struct page *page);
60380 int set_page_dirty_lock(struct page *page);
60381 int clear_page_dirty_for_io(struct page *page);
60382
60383-/* Is the vma a continuation of the stack vma above it? */
60384-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
60385-{
60386- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
60387-}
60388-
60389-static inline int stack_guard_page_start(struct vm_area_struct *vma,
60390- unsigned long addr)
60391-{
60392- return (vma->vm_flags & VM_GROWSDOWN) &&
60393- (vma->vm_start == addr) &&
60394- !vma_growsdown(vma->vm_prev, addr);
60395-}
60396-
60397-/* Is the vma a continuation of the stack vma below it? */
60398-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
60399-{
60400- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
60401-}
60402-
60403-static inline int stack_guard_page_end(struct vm_area_struct *vma,
60404- unsigned long addr)
60405-{
60406- return (vma->vm_flags & VM_GROWSUP) &&
60407- (vma->vm_end == addr) &&
60408- !vma_growsup(vma->vm_next, addr);
60409-}
60410-
60411 extern unsigned long move_page_tables(struct vm_area_struct *vma,
60412 unsigned long old_addr, struct vm_area_struct *new_vma,
60413 unsigned long new_addr, unsigned long len);
60414@@ -1133,6 +1112,15 @@ static inline void sync_mm_rss(struct ta
60415 }
60416 #endif
60417
60418+#ifdef CONFIG_MMU
60419+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
60420+#else
60421+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
60422+{
60423+ return __pgprot(0);
60424+}
60425+#endif
60426+
60427 int vma_wants_writenotify(struct vm_area_struct *vma);
60428
60429 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
60430@@ -1417,6 +1405,7 @@ out:
60431 }
60432
60433 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
60434+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
60435
60436 extern unsigned long do_brk(unsigned long, unsigned long);
60437
60438@@ -1474,6 +1463,10 @@ extern struct vm_area_struct * find_vma(
60439 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
60440 struct vm_area_struct **pprev);
60441
60442+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
60443+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
60444+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
60445+
60446 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
60447 NULL if none. Assume start_addr < end_addr. */
60448 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
60449@@ -1490,15 +1483,6 @@ static inline unsigned long vma_pages(st
60450 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
60451 }
60452
60453-#ifdef CONFIG_MMU
60454-pgprot_t vm_get_page_prot(unsigned long vm_flags);
60455-#else
60456-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
60457-{
60458- return __pgprot(0);
60459-}
60460-#endif
60461-
60462 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
60463 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
60464 unsigned long pfn, unsigned long size, pgprot_t);
60465@@ -1612,7 +1596,7 @@ extern int unpoison_memory(unsigned long
60466 extern int sysctl_memory_failure_early_kill;
60467 extern int sysctl_memory_failure_recovery;
60468 extern void shake_page(struct page *p, int access);
60469-extern atomic_long_t mce_bad_pages;
60470+extern atomic_long_unchecked_t mce_bad_pages;
60471 extern int soft_offline_page(struct page *page, int flags);
60472
60473 extern void dump_page(struct page *page);
60474@@ -1626,5 +1610,11 @@ extern void copy_user_huge_page(struct p
60475 unsigned int pages_per_huge_page);
60476 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
60477
60478+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
60479+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
60480+#else
60481+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
60482+#endif
60483+
60484 #endif /* __KERNEL__ */
60485 #endif /* _LINUX_MM_H */
60486diff -urNp linux-3.1.4/include/linux/mm_types.h linux-3.1.4/include/linux/mm_types.h
60487--- linux-3.1.4/include/linux/mm_types.h 2011-11-11 15:19:27.000000000 -0500
60488+++ linux-3.1.4/include/linux/mm_types.h 2011-11-16 18:39:08.000000000 -0500
60489@@ -230,6 +230,8 @@ struct vm_area_struct {
60490 #ifdef CONFIG_NUMA
60491 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
60492 #endif
60493+
60494+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
60495 };
60496
60497 struct core_thread {
60498@@ -362,6 +364,24 @@ struct mm_struct {
60499 #ifdef CONFIG_CPUMASK_OFFSTACK
60500 struct cpumask cpumask_allocation;
60501 #endif
60502+
60503+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60504+ unsigned long pax_flags;
60505+#endif
60506+
60507+#ifdef CONFIG_PAX_DLRESOLVE
60508+ unsigned long call_dl_resolve;
60509+#endif
60510+
60511+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
60512+ unsigned long call_syscall;
60513+#endif
60514+
60515+#ifdef CONFIG_PAX_ASLR
60516+ unsigned long delta_mmap; /* randomized offset */
60517+ unsigned long delta_stack; /* randomized offset */
60518+#endif
60519+
60520 };
60521
60522 static inline void mm_init_cpumask(struct mm_struct *mm)
60523diff -urNp linux-3.1.4/include/linux/mmu_notifier.h linux-3.1.4/include/linux/mmu_notifier.h
60524--- linux-3.1.4/include/linux/mmu_notifier.h 2011-11-11 15:19:27.000000000 -0500
60525+++ linux-3.1.4/include/linux/mmu_notifier.h 2011-11-16 18:39:08.000000000 -0500
60526@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
60527 */
60528 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
60529 ({ \
60530- pte_t __pte; \
60531+ pte_t ___pte; \
60532 struct vm_area_struct *___vma = __vma; \
60533 unsigned long ___address = __address; \
60534- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
60535+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
60536 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
60537- __pte; \
60538+ ___pte; \
60539 })
60540
60541 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
60542diff -urNp linux-3.1.4/include/linux/mmzone.h linux-3.1.4/include/linux/mmzone.h
60543--- linux-3.1.4/include/linux/mmzone.h 2011-11-11 15:19:27.000000000 -0500
60544+++ linux-3.1.4/include/linux/mmzone.h 2011-11-16 18:39:08.000000000 -0500
60545@@ -356,7 +356,7 @@ struct zone {
60546 unsigned long flags; /* zone flags, see below */
60547
60548 /* Zone statistics */
60549- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60550+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60551
60552 /*
60553 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
60554diff -urNp linux-3.1.4/include/linux/mod_devicetable.h linux-3.1.4/include/linux/mod_devicetable.h
60555--- linux-3.1.4/include/linux/mod_devicetable.h 2011-11-11 15:19:27.000000000 -0500
60556+++ linux-3.1.4/include/linux/mod_devicetable.h 2011-11-16 18:39:08.000000000 -0500
60557@@ -12,7 +12,7 @@
60558 typedef unsigned long kernel_ulong_t;
60559 #endif
60560
60561-#define PCI_ANY_ID (~0)
60562+#define PCI_ANY_ID ((__u16)~0)
60563
60564 struct pci_device_id {
60565 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
60566@@ -131,7 +131,7 @@ struct usb_device_id {
60567 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
60568 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
60569
60570-#define HID_ANY_ID (~0)
60571+#define HID_ANY_ID (~0U)
60572
60573 struct hid_device_id {
60574 __u16 bus;
60575diff -urNp linux-3.1.4/include/linux/module.h linux-3.1.4/include/linux/module.h
60576--- linux-3.1.4/include/linux/module.h 2011-11-11 15:19:27.000000000 -0500
60577+++ linux-3.1.4/include/linux/module.h 2011-11-16 18:39:08.000000000 -0500
60578@@ -16,6 +16,7 @@
60579 #include <linux/kobject.h>
60580 #include <linux/moduleparam.h>
60581 #include <linux/tracepoint.h>
60582+#include <linux/fs.h>
60583
60584 #include <linux/percpu.h>
60585 #include <asm/module.h>
60586@@ -327,19 +328,16 @@ struct module
60587 int (*init)(void);
60588
60589 /* If this is non-NULL, vfree after init() returns */
60590- void *module_init;
60591+ void *module_init_rx, *module_init_rw;
60592
60593 /* Here is the actual code + data, vfree'd on unload. */
60594- void *module_core;
60595+ void *module_core_rx, *module_core_rw;
60596
60597 /* Here are the sizes of the init and core sections */
60598- unsigned int init_size, core_size;
60599+ unsigned int init_size_rw, core_size_rw;
60600
60601 /* The size of the executable code in each section. */
60602- unsigned int init_text_size, core_text_size;
60603-
60604- /* Size of RO sections of the module (text+rodata) */
60605- unsigned int init_ro_size, core_ro_size;
60606+ unsigned int init_size_rx, core_size_rx;
60607
60608 /* Arch-specific module values */
60609 struct mod_arch_specific arch;
60610@@ -395,6 +393,10 @@ struct module
60611 #ifdef CONFIG_EVENT_TRACING
60612 struct ftrace_event_call **trace_events;
60613 unsigned int num_trace_events;
60614+ struct file_operations trace_id;
60615+ struct file_operations trace_enable;
60616+ struct file_operations trace_format;
60617+ struct file_operations trace_filter;
60618 #endif
60619 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
60620 unsigned int num_ftrace_callsites;
60621@@ -445,16 +447,46 @@ bool is_module_address(unsigned long add
60622 bool is_module_percpu_address(unsigned long addr);
60623 bool is_module_text_address(unsigned long addr);
60624
60625+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
60626+{
60627+
60628+#ifdef CONFIG_PAX_KERNEXEC
60629+ if (ktla_ktva(addr) >= (unsigned long)start &&
60630+ ktla_ktva(addr) < (unsigned long)start + size)
60631+ return 1;
60632+#endif
60633+
60634+ return ((void *)addr >= start && (void *)addr < start + size);
60635+}
60636+
60637+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
60638+{
60639+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
60640+}
60641+
60642+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
60643+{
60644+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
60645+}
60646+
60647+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
60648+{
60649+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
60650+}
60651+
60652+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
60653+{
60654+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
60655+}
60656+
60657 static inline int within_module_core(unsigned long addr, struct module *mod)
60658 {
60659- return (unsigned long)mod->module_core <= addr &&
60660- addr < (unsigned long)mod->module_core + mod->core_size;
60661+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
60662 }
60663
60664 static inline int within_module_init(unsigned long addr, struct module *mod)
60665 {
60666- return (unsigned long)mod->module_init <= addr &&
60667- addr < (unsigned long)mod->module_init + mod->init_size;
60668+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
60669 }
60670
60671 /* Search for module by name: must hold module_mutex. */
60672diff -urNp linux-3.1.4/include/linux/moduleloader.h linux-3.1.4/include/linux/moduleloader.h
60673--- linux-3.1.4/include/linux/moduleloader.h 2011-11-11 15:19:27.000000000 -0500
60674+++ linux-3.1.4/include/linux/moduleloader.h 2011-11-16 18:39:08.000000000 -0500
60675@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(st
60676 sections. Returns NULL on failure. */
60677 void *module_alloc(unsigned long size);
60678
60679+#ifdef CONFIG_PAX_KERNEXEC
60680+void *module_alloc_exec(unsigned long size);
60681+#else
60682+#define module_alloc_exec(x) module_alloc(x)
60683+#endif
60684+
60685 /* Free memory returned from module_alloc. */
60686 void module_free(struct module *mod, void *module_region);
60687
60688+#ifdef CONFIG_PAX_KERNEXEC
60689+void module_free_exec(struct module *mod, void *module_region);
60690+#else
60691+#define module_free_exec(x, y) module_free((x), (y))
60692+#endif
60693+
60694 /* Apply the given relocation to the (simplified) ELF. Return -error
60695 or 0. */
60696 int apply_relocate(Elf_Shdr *sechdrs,
60697diff -urNp linux-3.1.4/include/linux/moduleparam.h linux-3.1.4/include/linux/moduleparam.h
60698--- linux-3.1.4/include/linux/moduleparam.h 2011-11-11 15:19:27.000000000 -0500
60699+++ linux-3.1.4/include/linux/moduleparam.h 2011-11-16 18:39:08.000000000 -0500
60700@@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
60701 * @len is usually just sizeof(string).
60702 */
60703 #define module_param_string(name, string, len, perm) \
60704- static const struct kparam_string __param_string_##name \
60705+ static const struct kparam_string __param_string_##name __used \
60706 = { len, string }; \
60707 __module_param_call(MODULE_PARAM_PREFIX, name, \
60708 &param_ops_string, \
60709@@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
60710 * module_param_named() for why this might be necessary.
60711 */
60712 #define module_param_array_named(name, array, type, nump, perm) \
60713- static const struct kparam_array __param_arr_##name \
60714+ static const struct kparam_array __param_arr_##name __used \
60715 = { .max = ARRAY_SIZE(array), .num = nump, \
60716 .ops = &param_ops_##type, \
60717 .elemsize = sizeof(array[0]), .elem = array }; \
60718diff -urNp linux-3.1.4/include/linux/namei.h linux-3.1.4/include/linux/namei.h
60719--- linux-3.1.4/include/linux/namei.h 2011-11-11 15:19:27.000000000 -0500
60720+++ linux-3.1.4/include/linux/namei.h 2011-11-16 18:39:08.000000000 -0500
60721@@ -24,7 +24,7 @@ struct nameidata {
60722 unsigned seq;
60723 int last_type;
60724 unsigned depth;
60725- char *saved_names[MAX_NESTED_LINKS + 1];
60726+ const char *saved_names[MAX_NESTED_LINKS + 1];
60727
60728 /* Intent data */
60729 union {
60730@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
60731 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
60732 extern void unlock_rename(struct dentry *, struct dentry *);
60733
60734-static inline void nd_set_link(struct nameidata *nd, char *path)
60735+static inline void nd_set_link(struct nameidata *nd, const char *path)
60736 {
60737 nd->saved_names[nd->depth] = path;
60738 }
60739
60740-static inline char *nd_get_link(struct nameidata *nd)
60741+static inline const char *nd_get_link(const struct nameidata *nd)
60742 {
60743 return nd->saved_names[nd->depth];
60744 }
60745diff -urNp linux-3.1.4/include/linux/netdevice.h linux-3.1.4/include/linux/netdevice.h
60746--- linux-3.1.4/include/linux/netdevice.h 2011-11-11 15:19:27.000000000 -0500
60747+++ linux-3.1.4/include/linux/netdevice.h 2011-11-16 18:39:08.000000000 -0500
60748@@ -944,6 +944,7 @@ struct net_device_ops {
60749 int (*ndo_set_features)(struct net_device *dev,
60750 u32 features);
60751 };
60752+typedef struct net_device_ops __no_const net_device_ops_no_const;
60753
60754 /*
60755 * The DEVICE structure.
60756diff -urNp linux-3.1.4/include/linux/netfilter/xt_gradm.h linux-3.1.4/include/linux/netfilter/xt_gradm.h
60757--- linux-3.1.4/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
60758+++ linux-3.1.4/include/linux/netfilter/xt_gradm.h 2011-11-16 18:40:31.000000000 -0500
60759@@ -0,0 +1,9 @@
60760+#ifndef _LINUX_NETFILTER_XT_GRADM_H
60761+#define _LINUX_NETFILTER_XT_GRADM_H 1
60762+
60763+struct xt_gradm_mtinfo {
60764+ __u16 flags;
60765+ __u16 invflags;
60766+};
60767+
60768+#endif
60769diff -urNp linux-3.1.4/include/linux/of_pdt.h linux-3.1.4/include/linux/of_pdt.h
60770--- linux-3.1.4/include/linux/of_pdt.h 2011-11-11 15:19:27.000000000 -0500
60771+++ linux-3.1.4/include/linux/of_pdt.h 2011-11-16 18:39:08.000000000 -0500
60772@@ -32,7 +32,7 @@ struct of_pdt_ops {
60773
60774 /* return 0 on success; fill in 'len' with number of bytes in path */
60775 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
60776-};
60777+} __no_const;
60778
60779 extern void *prom_early_alloc(unsigned long size);
60780
60781diff -urNp linux-3.1.4/include/linux/oprofile.h linux-3.1.4/include/linux/oprofile.h
60782--- linux-3.1.4/include/linux/oprofile.h 2011-11-11 15:19:27.000000000 -0500
60783+++ linux-3.1.4/include/linux/oprofile.h 2011-11-16 18:39:08.000000000 -0500
60784@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
60785 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
60786 char const * name, ulong * val);
60787
60788-/** Create a file for read-only access to an atomic_t. */
60789+/** Create a file for read-only access to an atomic_unchecked_t. */
60790 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
60791- char const * name, atomic_t * val);
60792+ char const * name, atomic_unchecked_t * val);
60793
60794 /** create a directory */
60795 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
60796diff -urNp linux-3.1.4/include/linux/padata.h linux-3.1.4/include/linux/padata.h
60797--- linux-3.1.4/include/linux/padata.h 2011-11-11 15:19:27.000000000 -0500
60798+++ linux-3.1.4/include/linux/padata.h 2011-11-16 18:39:08.000000000 -0500
60799@@ -129,7 +129,7 @@ struct parallel_data {
60800 struct padata_instance *pinst;
60801 struct padata_parallel_queue __percpu *pqueue;
60802 struct padata_serial_queue __percpu *squeue;
60803- atomic_t seq_nr;
60804+ atomic_unchecked_t seq_nr;
60805 atomic_t reorder_objects;
60806 atomic_t refcnt;
60807 unsigned int max_seq_nr;
60808diff -urNp linux-3.1.4/include/linux/perf_event.h linux-3.1.4/include/linux/perf_event.h
60809--- linux-3.1.4/include/linux/perf_event.h 2011-11-11 15:19:27.000000000 -0500
60810+++ linux-3.1.4/include/linux/perf_event.h 2011-11-16 18:39:08.000000000 -0500
60811@@ -745,8 +745,8 @@ struct perf_event {
60812
60813 enum perf_event_active_state state;
60814 unsigned int attach_state;
60815- local64_t count;
60816- atomic64_t child_count;
60817+ local64_t count; /* PaX: fix it one day */
60818+ atomic64_unchecked_t child_count;
60819
60820 /*
60821 * These are the total time in nanoseconds that the event
60822@@ -797,8 +797,8 @@ struct perf_event {
60823 * These accumulate total time (in nanoseconds) that children
60824 * events have been enabled and running, respectively.
60825 */
60826- atomic64_t child_total_time_enabled;
60827- atomic64_t child_total_time_running;
60828+ atomic64_unchecked_t child_total_time_enabled;
60829+ atomic64_unchecked_t child_total_time_running;
60830
60831 /*
60832 * Protect attach/detach and child_list:
60833diff -urNp linux-3.1.4/include/linux/pipe_fs_i.h linux-3.1.4/include/linux/pipe_fs_i.h
60834--- linux-3.1.4/include/linux/pipe_fs_i.h 2011-11-11 15:19:27.000000000 -0500
60835+++ linux-3.1.4/include/linux/pipe_fs_i.h 2011-11-16 18:39:08.000000000 -0500
60836@@ -46,9 +46,9 @@ struct pipe_buffer {
60837 struct pipe_inode_info {
60838 wait_queue_head_t wait;
60839 unsigned int nrbufs, curbuf, buffers;
60840- unsigned int readers;
60841- unsigned int writers;
60842- unsigned int waiting_writers;
60843+ atomic_t readers;
60844+ atomic_t writers;
60845+ atomic_t waiting_writers;
60846 unsigned int r_counter;
60847 unsigned int w_counter;
60848 struct page *tmp_page;
60849diff -urNp linux-3.1.4/include/linux/pm_runtime.h linux-3.1.4/include/linux/pm_runtime.h
60850--- linux-3.1.4/include/linux/pm_runtime.h 2011-11-11 15:19:27.000000000 -0500
60851+++ linux-3.1.4/include/linux/pm_runtime.h 2011-11-16 18:39:08.000000000 -0500
60852@@ -99,7 +99,7 @@ static inline bool pm_runtime_callbacks_
60853
60854 static inline void pm_runtime_mark_last_busy(struct device *dev)
60855 {
60856- ACCESS_ONCE(dev->power.last_busy) = jiffies;
60857+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
60858 }
60859
60860 #else /* !CONFIG_PM_RUNTIME */
60861diff -urNp linux-3.1.4/include/linux/poison.h linux-3.1.4/include/linux/poison.h
60862--- linux-3.1.4/include/linux/poison.h 2011-11-11 15:19:27.000000000 -0500
60863+++ linux-3.1.4/include/linux/poison.h 2011-11-16 18:39:08.000000000 -0500
60864@@ -19,8 +19,8 @@
60865 * under normal circumstances, used to verify that nobody uses
60866 * non-initialized list entries.
60867 */
60868-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
60869-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
60870+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
60871+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
60872
60873 /********** include/linux/timer.h **********/
60874 /*
60875diff -urNp linux-3.1.4/include/linux/preempt.h linux-3.1.4/include/linux/preempt.h
60876--- linux-3.1.4/include/linux/preempt.h 2011-11-11 15:19:27.000000000 -0500
60877+++ linux-3.1.4/include/linux/preempt.h 2011-11-16 18:39:08.000000000 -0500
60878@@ -123,7 +123,7 @@ struct preempt_ops {
60879 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
60880 void (*sched_out)(struct preempt_notifier *notifier,
60881 struct task_struct *next);
60882-};
60883+} __no_const;
60884
60885 /**
60886 * preempt_notifier - key for installing preemption notifiers
60887diff -urNp linux-3.1.4/include/linux/proc_fs.h linux-3.1.4/include/linux/proc_fs.h
60888--- linux-3.1.4/include/linux/proc_fs.h 2011-11-11 15:19:27.000000000 -0500
60889+++ linux-3.1.4/include/linux/proc_fs.h 2011-11-16 18:40:31.000000000 -0500
60890@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
60891 return proc_create_data(name, mode, parent, proc_fops, NULL);
60892 }
60893
60894+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
60895+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
60896+{
60897+#ifdef CONFIG_GRKERNSEC_PROC_USER
60898+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
60899+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60900+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
60901+#else
60902+ return proc_create_data(name, mode, parent, proc_fops, NULL);
60903+#endif
60904+}
60905+
60906+
60907 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
60908 mode_t mode, struct proc_dir_entry *base,
60909 read_proc_t *read_proc, void * data)
60910@@ -258,7 +271,7 @@ union proc_op {
60911 int (*proc_show)(struct seq_file *m,
60912 struct pid_namespace *ns, struct pid *pid,
60913 struct task_struct *task);
60914-};
60915+} __no_const;
60916
60917 struct ctl_table_header;
60918 struct ctl_table;
60919diff -urNp linux-3.1.4/include/linux/ptrace.h linux-3.1.4/include/linux/ptrace.h
60920--- linux-3.1.4/include/linux/ptrace.h 2011-11-11 15:19:27.000000000 -0500
60921+++ linux-3.1.4/include/linux/ptrace.h 2011-11-16 18:40:31.000000000 -0500
60922@@ -129,10 +129,10 @@ extern void __ptrace_unlink(struct task_
60923 extern void exit_ptrace(struct task_struct *tracer);
60924 #define PTRACE_MODE_READ 1
60925 #define PTRACE_MODE_ATTACH 2
60926-/* Returns 0 on success, -errno on denial. */
60927-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
60928 /* Returns true on success, false on denial. */
60929 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
60930+/* Returns true on success, false on denial. */
60931+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
60932
60933 static inline int ptrace_reparented(struct task_struct *child)
60934 {
60935diff -urNp linux-3.1.4/include/linux/random.h linux-3.1.4/include/linux/random.h
60936--- linux-3.1.4/include/linux/random.h 2011-11-11 15:19:27.000000000 -0500
60937+++ linux-3.1.4/include/linux/random.h 2011-11-16 18:39:08.000000000 -0500
60938@@ -69,12 +69,17 @@ void srandom32(u32 seed);
60939
60940 u32 prandom32(struct rnd_state *);
60941
60942+static inline unsigned long pax_get_random_long(void)
60943+{
60944+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
60945+}
60946+
60947 /*
60948 * Handle minimum values for seeds
60949 */
60950 static inline u32 __seed(u32 x, u32 m)
60951 {
60952- return (x < m) ? x + m : x;
60953+ return (x <= m) ? x + m + 1 : x;
60954 }
60955
60956 /**
60957diff -urNp linux-3.1.4/include/linux/reboot.h linux-3.1.4/include/linux/reboot.h
60958--- linux-3.1.4/include/linux/reboot.h 2011-11-11 15:19:27.000000000 -0500
60959+++ linux-3.1.4/include/linux/reboot.h 2011-11-16 18:39:08.000000000 -0500
60960@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(st
60961 * Architecture-specific implementations of sys_reboot commands.
60962 */
60963
60964-extern void machine_restart(char *cmd);
60965-extern void machine_halt(void);
60966-extern void machine_power_off(void);
60967+extern void machine_restart(char *cmd) __noreturn;
60968+extern void machine_halt(void) __noreturn;
60969+extern void machine_power_off(void) __noreturn;
60970
60971 extern void machine_shutdown(void);
60972 struct pt_regs;
60973@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struc
60974 */
60975
60976 extern void kernel_restart_prepare(char *cmd);
60977-extern void kernel_restart(char *cmd);
60978-extern void kernel_halt(void);
60979-extern void kernel_power_off(void);
60980+extern void kernel_restart(char *cmd) __noreturn;
60981+extern void kernel_halt(void) __noreturn;
60982+extern void kernel_power_off(void) __noreturn;
60983
60984 extern int C_A_D; /* for sysctl */
60985 void ctrl_alt_del(void);
60986@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
60987 * Emergency restart, callable from an interrupt handler.
60988 */
60989
60990-extern void emergency_restart(void);
60991+extern void emergency_restart(void) __noreturn;
60992 #include <asm/emergency-restart.h>
60993
60994 #endif
60995diff -urNp linux-3.1.4/include/linux/reiserfs_fs.h linux-3.1.4/include/linux/reiserfs_fs.h
60996--- linux-3.1.4/include/linux/reiserfs_fs.h 2011-11-11 15:19:27.000000000 -0500
60997+++ linux-3.1.4/include/linux/reiserfs_fs.h 2011-11-16 18:39:08.000000000 -0500
60998@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
60999 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
61000
61001 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
61002-#define get_generation(s) atomic_read (&fs_generation(s))
61003+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
61004 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
61005 #define __fs_changed(gen,s) (gen != get_generation (s))
61006 #define fs_changed(gen,s) \
61007diff -urNp linux-3.1.4/include/linux/reiserfs_fs_sb.h linux-3.1.4/include/linux/reiserfs_fs_sb.h
61008--- linux-3.1.4/include/linux/reiserfs_fs_sb.h 2011-11-11 15:19:27.000000000 -0500
61009+++ linux-3.1.4/include/linux/reiserfs_fs_sb.h 2011-11-16 18:39:08.000000000 -0500
61010@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
61011 /* Comment? -Hans */
61012 wait_queue_head_t s_wait;
61013 /* To be obsoleted soon by per buffer seals.. -Hans */
61014- atomic_t s_generation_counter; // increased by one every time the
61015+ atomic_unchecked_t s_generation_counter; // increased by one every time the
61016 // tree gets re-balanced
61017 unsigned long s_properties; /* File system properties. Currently holds
61018 on-disk FS format */
61019diff -urNp linux-3.1.4/include/linux/relay.h linux-3.1.4/include/linux/relay.h
61020--- linux-3.1.4/include/linux/relay.h 2011-11-11 15:19:27.000000000 -0500
61021+++ linux-3.1.4/include/linux/relay.h 2011-11-16 18:39:08.000000000 -0500
61022@@ -159,7 +159,7 @@ struct rchan_callbacks
61023 * The callback should return 0 if successful, negative if not.
61024 */
61025 int (*remove_buf_file)(struct dentry *dentry);
61026-};
61027+} __no_const;
61028
61029 /*
61030 * CONFIG_RELAY kernel API, kernel/relay.c
61031diff -urNp linux-3.1.4/include/linux/rfkill.h linux-3.1.4/include/linux/rfkill.h
61032--- linux-3.1.4/include/linux/rfkill.h 2011-11-11 15:19:27.000000000 -0500
61033+++ linux-3.1.4/include/linux/rfkill.h 2011-11-16 18:39:08.000000000 -0500
61034@@ -147,6 +147,7 @@ struct rfkill_ops {
61035 void (*query)(struct rfkill *rfkill, void *data);
61036 int (*set_block)(void *data, bool blocked);
61037 };
61038+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
61039
61040 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
61041 /**
61042diff -urNp linux-3.1.4/include/linux/rmap.h linux-3.1.4/include/linux/rmap.h
61043--- linux-3.1.4/include/linux/rmap.h 2011-11-11 15:19:27.000000000 -0500
61044+++ linux-3.1.4/include/linux/rmap.h 2011-11-16 18:39:08.000000000 -0500
61045@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
61046 void anon_vma_init(void); /* create anon_vma_cachep */
61047 int anon_vma_prepare(struct vm_area_struct *);
61048 void unlink_anon_vmas(struct vm_area_struct *);
61049-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
61050-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
61051+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
61052+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
61053 void __anon_vma_link(struct vm_area_struct *);
61054
61055 static inline void anon_vma_merge(struct vm_area_struct *vma,
61056diff -urNp linux-3.1.4/include/linux/sched.h linux-3.1.4/include/linux/sched.h
61057--- linux-3.1.4/include/linux/sched.h 2011-11-11 15:19:27.000000000 -0500
61058+++ linux-3.1.4/include/linux/sched.h 2011-11-16 18:40:31.000000000 -0500
61059@@ -100,6 +100,7 @@ struct bio_list;
61060 struct fs_struct;
61061 struct perf_event_context;
61062 struct blk_plug;
61063+struct linux_binprm;
61064
61065 /*
61066 * List of flags we want to share for kernel threads,
61067@@ -380,10 +381,13 @@ struct user_namespace;
61068 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
61069
61070 extern int sysctl_max_map_count;
61071+extern unsigned long sysctl_heap_stack_gap;
61072
61073 #include <linux/aio.h>
61074
61075 #ifdef CONFIG_MMU
61076+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
61077+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
61078 extern void arch_pick_mmap_layout(struct mm_struct *mm);
61079 extern unsigned long
61080 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
61081@@ -629,6 +633,17 @@ struct signal_struct {
61082 #ifdef CONFIG_TASKSTATS
61083 struct taskstats *stats;
61084 #endif
61085+
61086+#ifdef CONFIG_GRKERNSEC
61087+ u32 curr_ip;
61088+ u32 saved_ip;
61089+ u32 gr_saddr;
61090+ u32 gr_daddr;
61091+ u16 gr_sport;
61092+ u16 gr_dport;
61093+ u8 used_accept:1;
61094+#endif
61095+
61096 #ifdef CONFIG_AUDIT
61097 unsigned audit_tty;
61098 struct tty_audit_buf *tty_audit_buf;
61099@@ -710,6 +725,11 @@ struct user_struct {
61100 struct key *session_keyring; /* UID's default session keyring */
61101 #endif
61102
61103+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
61104+ unsigned int banned;
61105+ unsigned long ban_expires;
61106+#endif
61107+
61108 /* Hash table maintenance information */
61109 struct hlist_node uidhash_node;
61110 uid_t uid;
61111@@ -1340,8 +1360,8 @@ struct task_struct {
61112 struct list_head thread_group;
61113
61114 struct completion *vfork_done; /* for vfork() */
61115- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
61116- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
61117+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
61118+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
61119
61120 cputime_t utime, stime, utimescaled, stimescaled;
61121 cputime_t gtime;
61122@@ -1357,13 +1377,6 @@ struct task_struct {
61123 struct task_cputime cputime_expires;
61124 struct list_head cpu_timers[3];
61125
61126-/* process credentials */
61127- const struct cred __rcu *real_cred; /* objective and real subjective task
61128- * credentials (COW) */
61129- const struct cred __rcu *cred; /* effective (overridable) subjective task
61130- * credentials (COW) */
61131- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
61132-
61133 char comm[TASK_COMM_LEN]; /* executable name excluding path
61134 - access with [gs]et_task_comm (which lock
61135 it with task_lock())
61136@@ -1380,8 +1393,16 @@ struct task_struct {
61137 #endif
61138 /* CPU-specific state of this task */
61139 struct thread_struct thread;
61140+/* thread_info moved to task_struct */
61141+#ifdef CONFIG_X86
61142+ struct thread_info tinfo;
61143+#endif
61144 /* filesystem information */
61145 struct fs_struct *fs;
61146+
61147+ const struct cred __rcu *cred; /* effective (overridable) subjective task
61148+ * credentials (COW) */
61149+
61150 /* open file information */
61151 struct files_struct *files;
61152 /* namespaces */
61153@@ -1428,6 +1449,11 @@ struct task_struct {
61154 struct rt_mutex_waiter *pi_blocked_on;
61155 #endif
61156
61157+/* process credentials */
61158+ const struct cred __rcu *real_cred; /* objective and real subjective task
61159+ * credentials (COW) */
61160+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
61161+
61162 #ifdef CONFIG_DEBUG_MUTEXES
61163 /* mutex deadlock detection */
61164 struct mutex_waiter *blocked_on;
61165@@ -1537,6 +1563,21 @@ struct task_struct {
61166 unsigned long default_timer_slack_ns;
61167
61168 struct list_head *scm_work_list;
61169+
61170+#ifdef CONFIG_GRKERNSEC
61171+ /* grsecurity */
61172+ struct dentry *gr_chroot_dentry;
61173+ struct acl_subject_label *acl;
61174+ struct acl_role_label *role;
61175+ struct file *exec_file;
61176+ u16 acl_role_id;
61177+ /* is this the task that authenticated to the special role */
61178+ u8 acl_sp_role;
61179+ u8 is_writable;
61180+ u8 brute;
61181+ u8 gr_is_chrooted;
61182+#endif
61183+
61184 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
61185 /* Index of current stored address in ret_stack */
61186 int curr_ret_stack;
61187@@ -1571,6 +1612,57 @@ struct task_struct {
61188 #endif
61189 };
61190
61191+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
61192+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
61193+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
61194+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
61195+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
61196+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
61197+
61198+#ifdef CONFIG_PAX_SOFTMODE
61199+extern int pax_softmode;
61200+#endif
61201+
61202+extern int pax_check_flags(unsigned long *);
61203+
61204+/* if tsk != current then task_lock must be held on it */
61205+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
61206+static inline unsigned long pax_get_flags(struct task_struct *tsk)
61207+{
61208+ if (likely(tsk->mm))
61209+ return tsk->mm->pax_flags;
61210+ else
61211+ return 0UL;
61212+}
61213+
61214+/* if tsk != current then task_lock must be held on it */
61215+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
61216+{
61217+ if (likely(tsk->mm)) {
61218+ tsk->mm->pax_flags = flags;
61219+ return 0;
61220+ }
61221+ return -EINVAL;
61222+}
61223+#endif
61224+
61225+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
61226+extern void pax_set_initial_flags(struct linux_binprm *bprm);
61227+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
61228+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
61229+#endif
61230+
61231+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
61232+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
61233+extern void pax_report_refcount_overflow(struct pt_regs *regs);
61234+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
61235+
61236+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
61237+extern void pax_track_stack(void);
61238+#else
61239+static inline void pax_track_stack(void) {}
61240+#endif
61241+
61242 /* Future-safe accessor for struct task_struct's cpus_allowed. */
61243 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
61244
61245@@ -2074,7 +2166,9 @@ void yield(void);
61246 extern struct exec_domain default_exec_domain;
61247
61248 union thread_union {
61249+#ifndef CONFIG_X86
61250 struct thread_info thread_info;
61251+#endif
61252 unsigned long stack[THREAD_SIZE/sizeof(long)];
61253 };
61254
61255@@ -2107,6 +2201,7 @@ extern struct pid_namespace init_pid_ns;
61256 */
61257
61258 extern struct task_struct *find_task_by_vpid(pid_t nr);
61259+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
61260 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
61261 struct pid_namespace *ns);
61262
61263@@ -2243,7 +2338,7 @@ extern void __cleanup_sighand(struct sig
61264 extern void exit_itimers(struct signal_struct *);
61265 extern void flush_itimer_signals(void);
61266
61267-extern NORET_TYPE void do_group_exit(int);
61268+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
61269
61270 extern void daemonize(const char *, ...);
61271 extern int allow_signal(int);
61272@@ -2408,13 +2503,17 @@ static inline unsigned long *end_of_stac
61273
61274 #endif
61275
61276-static inline int object_is_on_stack(void *obj)
61277+static inline int object_starts_on_stack(void *obj)
61278 {
61279- void *stack = task_stack_page(current);
61280+ const void *stack = task_stack_page(current);
61281
61282 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
61283 }
61284
61285+#ifdef CONFIG_PAX_USERCOPY
61286+extern int object_is_on_stack(const void *obj, unsigned long len);
61287+#endif
61288+
61289 extern void thread_info_cache_init(void);
61290
61291 #ifdef CONFIG_DEBUG_STACK_USAGE
61292diff -urNp linux-3.1.4/include/linux/screen_info.h linux-3.1.4/include/linux/screen_info.h
61293--- linux-3.1.4/include/linux/screen_info.h 2011-11-11 15:19:27.000000000 -0500
61294+++ linux-3.1.4/include/linux/screen_info.h 2011-11-16 18:39:08.000000000 -0500
61295@@ -43,7 +43,8 @@ struct screen_info {
61296 __u16 pages; /* 0x32 */
61297 __u16 vesa_attributes; /* 0x34 */
61298 __u32 capabilities; /* 0x36 */
61299- __u8 _reserved[6]; /* 0x3a */
61300+ __u16 vesapm_size; /* 0x3a */
61301+ __u8 _reserved[4]; /* 0x3c */
61302 } __attribute__((packed));
61303
61304 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
61305diff -urNp linux-3.1.4/include/linux/security.h linux-3.1.4/include/linux/security.h
61306--- linux-3.1.4/include/linux/security.h 2011-11-11 15:19:27.000000000 -0500
61307+++ linux-3.1.4/include/linux/security.h 2011-11-16 18:40:31.000000000 -0500
61308@@ -36,6 +36,7 @@
61309 #include <linux/key.h>
61310 #include <linux/xfrm.h>
61311 #include <linux/slab.h>
61312+#include <linux/grsecurity.h>
61313 #include <net/flow.h>
61314
61315 /* Maximum number of letters for an LSM name string */
61316diff -urNp linux-3.1.4/include/linux/seq_file.h linux-3.1.4/include/linux/seq_file.h
61317--- linux-3.1.4/include/linux/seq_file.h 2011-11-11 15:19:27.000000000 -0500
61318+++ linux-3.1.4/include/linux/seq_file.h 2011-11-16 18:39:08.000000000 -0500
61319@@ -33,6 +33,7 @@ struct seq_operations {
61320 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
61321 int (*show) (struct seq_file *m, void *v);
61322 };
61323+typedef struct seq_operations __no_const seq_operations_no_const;
61324
61325 #define SEQ_SKIP 1
61326
61327diff -urNp linux-3.1.4/include/linux/shm.h linux-3.1.4/include/linux/shm.h
61328--- linux-3.1.4/include/linux/shm.h 2011-11-11 15:19:27.000000000 -0500
61329+++ linux-3.1.4/include/linux/shm.h 2011-11-16 18:59:58.000000000 -0500
61330@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the ke
61331
61332 /* The task created the shm object. NULL if the task is dead. */
61333 struct task_struct *shm_creator;
61334+#ifdef CONFIG_GRKERNSEC
61335+ time_t shm_createtime;
61336+ pid_t shm_lapid;
61337+#endif
61338 };
61339
61340 /* shm_mode upper byte flags */
61341diff -urNp linux-3.1.4/include/linux/skbuff.h linux-3.1.4/include/linux/skbuff.h
61342--- linux-3.1.4/include/linux/skbuff.h 2011-11-11 15:19:27.000000000 -0500
61343+++ linux-3.1.4/include/linux/skbuff.h 2011-11-16 18:39:08.000000000 -0500
61344@@ -610,7 +610,7 @@ static inline struct skb_shared_hwtstamp
61345 */
61346 static inline int skb_queue_empty(const struct sk_buff_head *list)
61347 {
61348- return list->next == (struct sk_buff *)list;
61349+ return list->next == (const struct sk_buff *)list;
61350 }
61351
61352 /**
61353@@ -623,7 +623,7 @@ static inline int skb_queue_empty(const
61354 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
61355 const struct sk_buff *skb)
61356 {
61357- return skb->next == (struct sk_buff *)list;
61358+ return skb->next == (const struct sk_buff *)list;
61359 }
61360
61361 /**
61362@@ -636,7 +636,7 @@ static inline bool skb_queue_is_last(con
61363 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
61364 const struct sk_buff *skb)
61365 {
61366- return skb->prev == (struct sk_buff *)list;
61367+ return skb->prev == (const struct sk_buff *)list;
61368 }
61369
61370 /**
61371@@ -1458,7 +1458,7 @@ static inline int pskb_network_may_pull(
61372 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
61373 */
61374 #ifndef NET_SKB_PAD
61375-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
61376+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
61377 #endif
61378
61379 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
61380diff -urNp linux-3.1.4/include/linux/slab_def.h linux-3.1.4/include/linux/slab_def.h
61381--- linux-3.1.4/include/linux/slab_def.h 2011-11-11 15:19:27.000000000 -0500
61382+++ linux-3.1.4/include/linux/slab_def.h 2011-11-16 18:39:08.000000000 -0500
61383@@ -68,10 +68,10 @@ struct kmem_cache {
61384 unsigned long node_allocs;
61385 unsigned long node_frees;
61386 unsigned long node_overflow;
61387- atomic_t allochit;
61388- atomic_t allocmiss;
61389- atomic_t freehit;
61390- atomic_t freemiss;
61391+ atomic_unchecked_t allochit;
61392+ atomic_unchecked_t allocmiss;
61393+ atomic_unchecked_t freehit;
61394+ atomic_unchecked_t freemiss;
61395
61396 /*
61397 * If debugging is enabled, then the allocator can add additional
61398diff -urNp linux-3.1.4/include/linux/slab.h linux-3.1.4/include/linux/slab.h
61399--- linux-3.1.4/include/linux/slab.h 2011-11-11 15:19:27.000000000 -0500
61400+++ linux-3.1.4/include/linux/slab.h 2011-11-16 18:39:08.000000000 -0500
61401@@ -11,12 +11,20 @@
61402
61403 #include <linux/gfp.h>
61404 #include <linux/types.h>
61405+#include <linux/err.h>
61406
61407 /*
61408 * Flags to pass to kmem_cache_create().
61409 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
61410 */
61411 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
61412+
61413+#ifdef CONFIG_PAX_USERCOPY
61414+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
61415+#else
61416+#define SLAB_USERCOPY 0x00000000UL
61417+#endif
61418+
61419 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
61420 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
61421 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
61422@@ -87,10 +95,13 @@
61423 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
61424 * Both make kfree a no-op.
61425 */
61426-#define ZERO_SIZE_PTR ((void *)16)
61427+#define ZERO_SIZE_PTR \
61428+({ \
61429+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
61430+ (void *)(-MAX_ERRNO-1L); \
61431+})
61432
61433-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
61434- (unsigned long)ZERO_SIZE_PTR)
61435+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
61436
61437 /*
61438 * struct kmem_cache related prototypes
61439@@ -161,6 +172,7 @@ void * __must_check krealloc(const void
61440 void kfree(const void *);
61441 void kzfree(const void *);
61442 size_t ksize(const void *);
61443+void check_object_size(const void *ptr, unsigned long n, bool to);
61444
61445 /*
61446 * Allocator specific definitions. These are mainly used to establish optimized
61447@@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t
61448
61449 void __init kmem_cache_init_late(void);
61450
61451+#define kmalloc(x, y) \
61452+({ \
61453+ void *___retval; \
61454+ intoverflow_t ___x = (intoverflow_t)x; \
61455+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
61456+ ___retval = NULL; \
61457+ else \
61458+ ___retval = kmalloc((size_t)___x, (y)); \
61459+ ___retval; \
61460+})
61461+
61462+#define kmalloc_node(x, y, z) \
61463+({ \
61464+ void *___retval; \
61465+ intoverflow_t ___x = (intoverflow_t)x; \
61466+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
61467+ ___retval = NULL; \
61468+ else \
61469+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
61470+ ___retval; \
61471+})
61472+
61473+#define kzalloc(x, y) \
61474+({ \
61475+ void *___retval; \
61476+ intoverflow_t ___x = (intoverflow_t)x; \
61477+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
61478+ ___retval = NULL; \
61479+ else \
61480+ ___retval = kzalloc((size_t)___x, (y)); \
61481+ ___retval; \
61482+})
61483+
61484+#define __krealloc(x, y, z) \
61485+({ \
61486+ void *___retval; \
61487+ intoverflow_t ___y = (intoverflow_t)y; \
61488+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
61489+ ___retval = NULL; \
61490+ else \
61491+ ___retval = __krealloc((x), (size_t)___y, (z)); \
61492+ ___retval; \
61493+})
61494+
61495+#define krealloc(x, y, z) \
61496+({ \
61497+ void *___retval; \
61498+ intoverflow_t ___y = (intoverflow_t)y; \
61499+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
61500+ ___retval = NULL; \
61501+ else \
61502+ ___retval = krealloc((x), (size_t)___y, (z)); \
61503+ ___retval; \
61504+})
61505+
61506 #endif /* _LINUX_SLAB_H */
61507diff -urNp linux-3.1.4/include/linux/slub_def.h linux-3.1.4/include/linux/slub_def.h
61508--- linux-3.1.4/include/linux/slub_def.h 2011-11-11 15:19:27.000000000 -0500
61509+++ linux-3.1.4/include/linux/slub_def.h 2011-11-16 18:39:08.000000000 -0500
61510@@ -85,7 +85,7 @@ struct kmem_cache {
61511 struct kmem_cache_order_objects max;
61512 struct kmem_cache_order_objects min;
61513 gfp_t allocflags; /* gfp flags to use on each alloc */
61514- int refcount; /* Refcount for slab cache destroy */
61515+ atomic_t refcount; /* Refcount for slab cache destroy */
61516 void (*ctor)(void *);
61517 int inuse; /* Offset to metadata */
61518 int align; /* Alignment */
61519@@ -211,7 +211,7 @@ static __always_inline struct kmem_cache
61520 }
61521
61522 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
61523-void *__kmalloc(size_t size, gfp_t flags);
61524+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
61525
61526 static __always_inline void *
61527 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
61528diff -urNp linux-3.1.4/include/linux/sonet.h linux-3.1.4/include/linux/sonet.h
61529--- linux-3.1.4/include/linux/sonet.h 2011-11-11 15:19:27.000000000 -0500
61530+++ linux-3.1.4/include/linux/sonet.h 2011-11-16 18:39:08.000000000 -0500
61531@@ -61,7 +61,7 @@ struct sonet_stats {
61532 #include <linux/atomic.h>
61533
61534 struct k_sonet_stats {
61535-#define __HANDLE_ITEM(i) atomic_t i
61536+#define __HANDLE_ITEM(i) atomic_unchecked_t i
61537 __SONET_ITEMS
61538 #undef __HANDLE_ITEM
61539 };
61540diff -urNp linux-3.1.4/include/linux/sunrpc/clnt.h linux-3.1.4/include/linux/sunrpc/clnt.h
61541--- linux-3.1.4/include/linux/sunrpc/clnt.h 2011-11-11 15:19:27.000000000 -0500
61542+++ linux-3.1.4/include/linux/sunrpc/clnt.h 2011-11-16 18:39:08.000000000 -0500
61543@@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
61544 {
61545 switch (sap->sa_family) {
61546 case AF_INET:
61547- return ntohs(((struct sockaddr_in *)sap)->sin_port);
61548+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
61549 case AF_INET6:
61550- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
61551+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
61552 }
61553 return 0;
61554 }
61555@@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
61556 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
61557 const struct sockaddr *src)
61558 {
61559- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
61560+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
61561 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
61562
61563 dsin->sin_family = ssin->sin_family;
61564@@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
61565 if (sa->sa_family != AF_INET6)
61566 return 0;
61567
61568- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
61569+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
61570 }
61571
61572 #endif /* __KERNEL__ */
61573diff -urNp linux-3.1.4/include/linux/sunrpc/sched.h linux-3.1.4/include/linux/sunrpc/sched.h
61574--- linux-3.1.4/include/linux/sunrpc/sched.h 2011-11-11 15:19:27.000000000 -0500
61575+++ linux-3.1.4/include/linux/sunrpc/sched.h 2011-11-16 18:39:08.000000000 -0500
61576@@ -105,6 +105,7 @@ struct rpc_call_ops {
61577 void (*rpc_call_done)(struct rpc_task *, void *);
61578 void (*rpc_release)(void *);
61579 };
61580+typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
61581
61582 struct rpc_task_setup {
61583 struct rpc_task *task;
61584diff -urNp linux-3.1.4/include/linux/sunrpc/svc_rdma.h linux-3.1.4/include/linux/sunrpc/svc_rdma.h
61585--- linux-3.1.4/include/linux/sunrpc/svc_rdma.h 2011-11-11 15:19:27.000000000 -0500
61586+++ linux-3.1.4/include/linux/sunrpc/svc_rdma.h 2011-11-16 18:39:08.000000000 -0500
61587@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
61588 extern unsigned int svcrdma_max_requests;
61589 extern unsigned int svcrdma_max_req_size;
61590
61591-extern atomic_t rdma_stat_recv;
61592-extern atomic_t rdma_stat_read;
61593-extern atomic_t rdma_stat_write;
61594-extern atomic_t rdma_stat_sq_starve;
61595-extern atomic_t rdma_stat_rq_starve;
61596-extern atomic_t rdma_stat_rq_poll;
61597-extern atomic_t rdma_stat_rq_prod;
61598-extern atomic_t rdma_stat_sq_poll;
61599-extern atomic_t rdma_stat_sq_prod;
61600+extern atomic_unchecked_t rdma_stat_recv;
61601+extern atomic_unchecked_t rdma_stat_read;
61602+extern atomic_unchecked_t rdma_stat_write;
61603+extern atomic_unchecked_t rdma_stat_sq_starve;
61604+extern atomic_unchecked_t rdma_stat_rq_starve;
61605+extern atomic_unchecked_t rdma_stat_rq_poll;
61606+extern atomic_unchecked_t rdma_stat_rq_prod;
61607+extern atomic_unchecked_t rdma_stat_sq_poll;
61608+extern atomic_unchecked_t rdma_stat_sq_prod;
61609
61610 #define RPCRDMA_VERSION 1
61611
61612diff -urNp linux-3.1.4/include/linux/sysctl.h linux-3.1.4/include/linux/sysctl.h
61613--- linux-3.1.4/include/linux/sysctl.h 2011-11-11 15:19:27.000000000 -0500
61614+++ linux-3.1.4/include/linux/sysctl.h 2011-11-16 18:40:31.000000000 -0500
61615@@ -155,7 +155,11 @@ enum
61616 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
61617 };
61618
61619-
61620+#ifdef CONFIG_PAX_SOFTMODE
61621+enum {
61622+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
61623+};
61624+#endif
61625
61626 /* CTL_VM names: */
61627 enum
61628@@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
61629
61630 extern int proc_dostring(struct ctl_table *, int,
61631 void __user *, size_t *, loff_t *);
61632+extern int proc_dostring_modpriv(struct ctl_table *, int,
61633+ void __user *, size_t *, loff_t *);
61634 extern int proc_dointvec(struct ctl_table *, int,
61635 void __user *, size_t *, loff_t *);
61636 extern int proc_dointvec_minmax(struct ctl_table *, int,
61637diff -urNp linux-3.1.4/include/linux/tty_ldisc.h linux-3.1.4/include/linux/tty_ldisc.h
61638--- linux-3.1.4/include/linux/tty_ldisc.h 2011-11-11 15:19:27.000000000 -0500
61639+++ linux-3.1.4/include/linux/tty_ldisc.h 2011-11-16 18:39:08.000000000 -0500
61640@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
61641
61642 struct module *owner;
61643
61644- int refcount;
61645+ atomic_t refcount;
61646 };
61647
61648 struct tty_ldisc {
61649diff -urNp linux-3.1.4/include/linux/types.h linux-3.1.4/include/linux/types.h
61650--- linux-3.1.4/include/linux/types.h 2011-11-11 15:19:27.000000000 -0500
61651+++ linux-3.1.4/include/linux/types.h 2011-11-16 18:39:08.000000000 -0500
61652@@ -213,10 +213,26 @@ typedef struct {
61653 int counter;
61654 } atomic_t;
61655
61656+#ifdef CONFIG_PAX_REFCOUNT
61657+typedef struct {
61658+ int counter;
61659+} atomic_unchecked_t;
61660+#else
61661+typedef atomic_t atomic_unchecked_t;
61662+#endif
61663+
61664 #ifdef CONFIG_64BIT
61665 typedef struct {
61666 long counter;
61667 } atomic64_t;
61668+
61669+#ifdef CONFIG_PAX_REFCOUNT
61670+typedef struct {
61671+ long counter;
61672+} atomic64_unchecked_t;
61673+#else
61674+typedef atomic64_t atomic64_unchecked_t;
61675+#endif
61676 #endif
61677
61678 struct list_head {
61679diff -urNp linux-3.1.4/include/linux/uaccess.h linux-3.1.4/include/linux/uaccess.h
61680--- linux-3.1.4/include/linux/uaccess.h 2011-11-11 15:19:27.000000000 -0500
61681+++ linux-3.1.4/include/linux/uaccess.h 2011-11-16 18:39:08.000000000 -0500
61682@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
61683 long ret; \
61684 mm_segment_t old_fs = get_fs(); \
61685 \
61686- set_fs(KERNEL_DS); \
61687 pagefault_disable(); \
61688- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
61689- pagefault_enable(); \
61690+ set_fs(KERNEL_DS); \
61691+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
61692 set_fs(old_fs); \
61693+ pagefault_enable(); \
61694 ret; \
61695 })
61696
61697diff -urNp linux-3.1.4/include/linux/unaligned/access_ok.h linux-3.1.4/include/linux/unaligned/access_ok.h
61698--- linux-3.1.4/include/linux/unaligned/access_ok.h 2011-11-11 15:19:27.000000000 -0500
61699+++ linux-3.1.4/include/linux/unaligned/access_ok.h 2011-11-16 18:39:08.000000000 -0500
61700@@ -6,32 +6,32 @@
61701
61702 static inline u16 get_unaligned_le16(const void *p)
61703 {
61704- return le16_to_cpup((__le16 *)p);
61705+ return le16_to_cpup((const __le16 *)p);
61706 }
61707
61708 static inline u32 get_unaligned_le32(const void *p)
61709 {
61710- return le32_to_cpup((__le32 *)p);
61711+ return le32_to_cpup((const __le32 *)p);
61712 }
61713
61714 static inline u64 get_unaligned_le64(const void *p)
61715 {
61716- return le64_to_cpup((__le64 *)p);
61717+ return le64_to_cpup((const __le64 *)p);
61718 }
61719
61720 static inline u16 get_unaligned_be16(const void *p)
61721 {
61722- return be16_to_cpup((__be16 *)p);
61723+ return be16_to_cpup((const __be16 *)p);
61724 }
61725
61726 static inline u32 get_unaligned_be32(const void *p)
61727 {
61728- return be32_to_cpup((__be32 *)p);
61729+ return be32_to_cpup((const __be32 *)p);
61730 }
61731
61732 static inline u64 get_unaligned_be64(const void *p)
61733 {
61734- return be64_to_cpup((__be64 *)p);
61735+ return be64_to_cpup((const __be64 *)p);
61736 }
61737
61738 static inline void put_unaligned_le16(u16 val, void *p)
61739diff -urNp linux-3.1.4/include/linux/vermagic.h linux-3.1.4/include/linux/vermagic.h
61740--- linux-3.1.4/include/linux/vermagic.h 2011-11-11 15:19:27.000000000 -0500
61741+++ linux-3.1.4/include/linux/vermagic.h 2011-11-16 18:54:54.000000000 -0500
61742@@ -26,9 +26,35 @@
61743 #define MODULE_ARCH_VERMAGIC ""
61744 #endif
61745
61746+#ifdef CONFIG_PAX_REFCOUNT
61747+#define MODULE_PAX_REFCOUNT "REFCOUNT "
61748+#else
61749+#define MODULE_PAX_REFCOUNT ""
61750+#endif
61751+
61752+#ifdef CONSTIFY_PLUGIN
61753+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
61754+#else
61755+#define MODULE_CONSTIFY_PLUGIN ""
61756+#endif
61757+
61758+#ifdef STACKLEAK_PLUGIN
61759+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
61760+#else
61761+#define MODULE_STACKLEAK_PLUGIN ""
61762+#endif
61763+
61764+#ifdef CONFIG_GRKERNSEC
61765+#define MODULE_GRSEC "GRSEC "
61766+#else
61767+#define MODULE_GRSEC ""
61768+#endif
61769+
61770 #define VERMAGIC_STRING \
61771 UTS_RELEASE " " \
61772 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
61773 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
61774- MODULE_ARCH_VERMAGIC
61775+ MODULE_ARCH_VERMAGIC \
61776+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
61777+ MODULE_GRSEC
61778
61779diff -urNp linux-3.1.4/include/linux/vmalloc.h linux-3.1.4/include/linux/vmalloc.h
61780--- linux-3.1.4/include/linux/vmalloc.h 2011-11-11 15:19:27.000000000 -0500
61781+++ linux-3.1.4/include/linux/vmalloc.h 2011-11-16 18:39:08.000000000 -0500
61782@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining
61783 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
61784 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
61785 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
61786+
61787+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
61788+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
61789+#endif
61790+
61791 /* bits [20..32] reserved for arch specific ioremap internals */
61792
61793 /*
61794@@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
61795 # endif
61796 #endif
61797
61798+#define vmalloc(x) \
61799+({ \
61800+ void *___retval; \
61801+ intoverflow_t ___x = (intoverflow_t)x; \
61802+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
61803+ ___retval = NULL; \
61804+ else \
61805+ ___retval = vmalloc((unsigned long)___x); \
61806+ ___retval; \
61807+})
61808+
61809+#define vzalloc(x) \
61810+({ \
61811+ void *___retval; \
61812+ intoverflow_t ___x = (intoverflow_t)x; \
61813+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
61814+ ___retval = NULL; \
61815+ else \
61816+ ___retval = vzalloc((unsigned long)___x); \
61817+ ___retval; \
61818+})
61819+
61820+#define __vmalloc(x, y, z) \
61821+({ \
61822+ void *___retval; \
61823+ intoverflow_t ___x = (intoverflow_t)x; \
61824+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
61825+ ___retval = NULL; \
61826+ else \
61827+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
61828+ ___retval; \
61829+})
61830+
61831+#define vmalloc_user(x) \
61832+({ \
61833+ void *___retval; \
61834+ intoverflow_t ___x = (intoverflow_t)x; \
61835+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
61836+ ___retval = NULL; \
61837+ else \
61838+ ___retval = vmalloc_user((unsigned long)___x); \
61839+ ___retval; \
61840+})
61841+
61842+#define vmalloc_exec(x) \
61843+({ \
61844+ void *___retval; \
61845+ intoverflow_t ___x = (intoverflow_t)x; \
61846+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
61847+ ___retval = NULL; \
61848+ else \
61849+ ___retval = vmalloc_exec((unsigned long)___x); \
61850+ ___retval; \
61851+})
61852+
61853+#define vmalloc_node(x, y) \
61854+({ \
61855+ void *___retval; \
61856+ intoverflow_t ___x = (intoverflow_t)x; \
61857+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
61858+ ___retval = NULL; \
61859+ else \
61860+ ___retval = vmalloc_node((unsigned long)___x, (y));\
61861+ ___retval; \
61862+})
61863+
61864+#define vzalloc_node(x, y) \
61865+({ \
61866+ void *___retval; \
61867+ intoverflow_t ___x = (intoverflow_t)x; \
61868+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
61869+ ___retval = NULL; \
61870+ else \
61871+ ___retval = vzalloc_node((unsigned long)___x, (y));\
61872+ ___retval; \
61873+})
61874+
61875+#define vmalloc_32(x) \
61876+({ \
61877+ void *___retval; \
61878+ intoverflow_t ___x = (intoverflow_t)x; \
61879+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
61880+ ___retval = NULL; \
61881+ else \
61882+ ___retval = vmalloc_32((unsigned long)___x); \
61883+ ___retval; \
61884+})
61885+
61886+#define vmalloc_32_user(x) \
61887+({ \
61888+void *___retval; \
61889+ intoverflow_t ___x = (intoverflow_t)x; \
61890+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
61891+ ___retval = NULL; \
61892+ else \
61893+ ___retval = vmalloc_32_user((unsigned long)___x);\
61894+ ___retval; \
61895+})
61896+
61897 #endif /* _LINUX_VMALLOC_H */
61898diff -urNp linux-3.1.4/include/linux/vmstat.h linux-3.1.4/include/linux/vmstat.h
61899--- linux-3.1.4/include/linux/vmstat.h 2011-11-11 15:19:27.000000000 -0500
61900+++ linux-3.1.4/include/linux/vmstat.h 2011-11-16 18:39:08.000000000 -0500
61901@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(in
61902 /*
61903 * Zone based page accounting with per cpu differentials.
61904 */
61905-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61906+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61907
61908 static inline void zone_page_state_add(long x, struct zone *zone,
61909 enum zone_stat_item item)
61910 {
61911- atomic_long_add(x, &zone->vm_stat[item]);
61912- atomic_long_add(x, &vm_stat[item]);
61913+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
61914+ atomic_long_add_unchecked(x, &vm_stat[item]);
61915 }
61916
61917 static inline unsigned long global_page_state(enum zone_stat_item item)
61918 {
61919- long x = atomic_long_read(&vm_stat[item]);
61920+ long x = atomic_long_read_unchecked(&vm_stat[item]);
61921 #ifdef CONFIG_SMP
61922 if (x < 0)
61923 x = 0;
61924@@ -109,7 +109,7 @@ static inline unsigned long global_page_
61925 static inline unsigned long zone_page_state(struct zone *zone,
61926 enum zone_stat_item item)
61927 {
61928- long x = atomic_long_read(&zone->vm_stat[item]);
61929+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61930 #ifdef CONFIG_SMP
61931 if (x < 0)
61932 x = 0;
61933@@ -126,7 +126,7 @@ static inline unsigned long zone_page_st
61934 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
61935 enum zone_stat_item item)
61936 {
61937- long x = atomic_long_read(&zone->vm_stat[item]);
61938+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61939
61940 #ifdef CONFIG_SMP
61941 int cpu;
61942@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state
61943
61944 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
61945 {
61946- atomic_long_inc(&zone->vm_stat[item]);
61947- atomic_long_inc(&vm_stat[item]);
61948+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
61949+ atomic_long_inc_unchecked(&vm_stat[item]);
61950 }
61951
61952 static inline void __inc_zone_page_state(struct page *page,
61953@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state
61954
61955 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
61956 {
61957- atomic_long_dec(&zone->vm_stat[item]);
61958- atomic_long_dec(&vm_stat[item]);
61959+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
61960+ atomic_long_dec_unchecked(&vm_stat[item]);
61961 }
61962
61963 static inline void __dec_zone_page_state(struct page *page,
61964diff -urNp linux-3.1.4/include/media/saa7146_vv.h linux-3.1.4/include/media/saa7146_vv.h
61965--- linux-3.1.4/include/media/saa7146_vv.h 2011-11-11 15:19:27.000000000 -0500
61966+++ linux-3.1.4/include/media/saa7146_vv.h 2011-11-16 18:39:08.000000000 -0500
61967@@ -163,7 +163,7 @@ struct saa7146_ext_vv
61968 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
61969
61970 /* the extension can override this */
61971- struct v4l2_ioctl_ops ops;
61972+ v4l2_ioctl_ops_no_const ops;
61973 /* pointer to the saa7146 core ops */
61974 const struct v4l2_ioctl_ops *core_ops;
61975
61976diff -urNp linux-3.1.4/include/media/v4l2-dev.h linux-3.1.4/include/media/v4l2-dev.h
61977--- linux-3.1.4/include/media/v4l2-dev.h 2011-11-11 15:19:27.000000000 -0500
61978+++ linux-3.1.4/include/media/v4l2-dev.h 2011-11-16 18:39:08.000000000 -0500
61979@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_sta
61980
61981
61982 struct v4l2_file_operations {
61983- struct module *owner;
61984+ struct module * const owner;
61985 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61986 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61987 unsigned int (*poll) (struct file *, struct poll_table_struct *);
61988@@ -68,6 +68,7 @@ struct v4l2_file_operations {
61989 int (*open) (struct file *);
61990 int (*release) (struct file *);
61991 };
61992+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
61993
61994 /*
61995 * Newer version of video_device, handled by videodev2.c
61996diff -urNp linux-3.1.4/include/media/v4l2-ioctl.h linux-3.1.4/include/media/v4l2-ioctl.h
61997--- linux-3.1.4/include/media/v4l2-ioctl.h 2011-11-11 15:19:27.000000000 -0500
61998+++ linux-3.1.4/include/media/v4l2-ioctl.h 2011-11-17 18:44:20.000000000 -0500
61999@@ -272,7 +272,7 @@ struct v4l2_ioctl_ops {
62000 long (*vidioc_default) (struct file *file, void *fh,
62001 bool valid_prio, int cmd, void *arg);
62002 };
62003-
62004+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
62005
62006 /* v4l debugging and diagnostics */
62007
62008diff -urNp linux-3.1.4/include/net/caif/caif_hsi.h linux-3.1.4/include/net/caif/caif_hsi.h
62009--- linux-3.1.4/include/net/caif/caif_hsi.h 2011-11-11 15:19:27.000000000 -0500
62010+++ linux-3.1.4/include/net/caif/caif_hsi.h 2011-11-16 18:39:08.000000000 -0500
62011@@ -94,7 +94,7 @@ struct cfhsi_drv {
62012 void (*rx_done_cb) (struct cfhsi_drv *drv);
62013 void (*wake_up_cb) (struct cfhsi_drv *drv);
62014 void (*wake_down_cb) (struct cfhsi_drv *drv);
62015-};
62016+} __no_const;
62017
62018 /* Structure implemented by HSI device. */
62019 struct cfhsi_dev {
62020diff -urNp linux-3.1.4/include/net/caif/cfctrl.h linux-3.1.4/include/net/caif/cfctrl.h
62021--- linux-3.1.4/include/net/caif/cfctrl.h 2011-11-11 15:19:27.000000000 -0500
62022+++ linux-3.1.4/include/net/caif/cfctrl.h 2011-11-16 18:39:08.000000000 -0500
62023@@ -52,7 +52,7 @@ struct cfctrl_rsp {
62024 void (*radioset_rsp)(void);
62025 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
62026 struct cflayer *client_layer);
62027-};
62028+} __no_const;
62029
62030 /* Link Setup Parameters for CAIF-Links. */
62031 struct cfctrl_link_param {
62032@@ -101,8 +101,8 @@ struct cfctrl_request_info {
62033 struct cfctrl {
62034 struct cfsrvl serv;
62035 struct cfctrl_rsp res;
62036- atomic_t req_seq_no;
62037- atomic_t rsp_seq_no;
62038+ atomic_unchecked_t req_seq_no;
62039+ atomic_unchecked_t rsp_seq_no;
62040 struct list_head list;
62041 /* Protects from simultaneous access to first_req list */
62042 spinlock_t info_list_lock;
62043diff -urNp linux-3.1.4/include/net/flow.h linux-3.1.4/include/net/flow.h
62044--- linux-3.1.4/include/net/flow.h 2011-11-11 15:19:27.000000000 -0500
62045+++ linux-3.1.4/include/net/flow.h 2011-11-16 18:39:08.000000000 -0500
62046@@ -207,6 +207,6 @@ extern struct flow_cache_object *flow_ca
62047 u8 dir, flow_resolve_t resolver, void *ctx);
62048
62049 extern void flow_cache_flush(void);
62050-extern atomic_t flow_cache_genid;
62051+extern atomic_unchecked_t flow_cache_genid;
62052
62053 #endif
62054diff -urNp linux-3.1.4/include/net/inetpeer.h linux-3.1.4/include/net/inetpeer.h
62055--- linux-3.1.4/include/net/inetpeer.h 2011-11-11 15:19:27.000000000 -0500
62056+++ linux-3.1.4/include/net/inetpeer.h 2011-11-16 18:39:08.000000000 -0500
62057@@ -47,8 +47,8 @@ struct inet_peer {
62058 */
62059 union {
62060 struct {
62061- atomic_t rid; /* Frag reception counter */
62062- atomic_t ip_id_count; /* IP ID for the next packet */
62063+ atomic_unchecked_t rid; /* Frag reception counter */
62064+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
62065 __u32 tcp_ts;
62066 __u32 tcp_ts_stamp;
62067 };
62068@@ -112,11 +112,11 @@ static inline int inet_getid(struct inet
62069 more++;
62070 inet_peer_refcheck(p);
62071 do {
62072- old = atomic_read(&p->ip_id_count);
62073+ old = atomic_read_unchecked(&p->ip_id_count);
62074 new = old + more;
62075 if (!new)
62076 new = 1;
62077- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
62078+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
62079 return new;
62080 }
62081
62082diff -urNp linux-3.1.4/include/net/ip_fib.h linux-3.1.4/include/net/ip_fib.h
62083--- linux-3.1.4/include/net/ip_fib.h 2011-11-11 15:19:27.000000000 -0500
62084+++ linux-3.1.4/include/net/ip_fib.h 2011-11-16 18:39:08.000000000 -0500
62085@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
62086
62087 #define FIB_RES_SADDR(net, res) \
62088 ((FIB_RES_NH(res).nh_saddr_genid == \
62089- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
62090+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
62091 FIB_RES_NH(res).nh_saddr : \
62092 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
62093 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
62094diff -urNp linux-3.1.4/include/net/ip_vs.h linux-3.1.4/include/net/ip_vs.h
62095--- linux-3.1.4/include/net/ip_vs.h 2011-11-11 15:19:27.000000000 -0500
62096+++ linux-3.1.4/include/net/ip_vs.h 2011-11-16 18:39:08.000000000 -0500
62097@@ -509,7 +509,7 @@ struct ip_vs_conn {
62098 struct ip_vs_conn *control; /* Master control connection */
62099 atomic_t n_control; /* Number of controlled ones */
62100 struct ip_vs_dest *dest; /* real server */
62101- atomic_t in_pkts; /* incoming packet counter */
62102+ atomic_unchecked_t in_pkts; /* incoming packet counter */
62103
62104 /* packet transmitter for different forwarding methods. If it
62105 mangles the packet, it must return NF_DROP or better NF_STOLEN,
62106@@ -647,7 +647,7 @@ struct ip_vs_dest {
62107 __be16 port; /* port number of the server */
62108 union nf_inet_addr addr; /* IP address of the server */
62109 volatile unsigned flags; /* dest status flags */
62110- atomic_t conn_flags; /* flags to copy to conn */
62111+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
62112 atomic_t weight; /* server weight */
62113
62114 atomic_t refcnt; /* reference counter */
62115diff -urNp linux-3.1.4/include/net/irda/ircomm_core.h linux-3.1.4/include/net/irda/ircomm_core.h
62116--- linux-3.1.4/include/net/irda/ircomm_core.h 2011-11-11 15:19:27.000000000 -0500
62117+++ linux-3.1.4/include/net/irda/ircomm_core.h 2011-11-16 18:39:08.000000000 -0500
62118@@ -51,7 +51,7 @@ typedef struct {
62119 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
62120 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
62121 struct ircomm_info *);
62122-} call_t;
62123+} __no_const call_t;
62124
62125 struct ircomm_cb {
62126 irda_queue_t queue;
62127diff -urNp linux-3.1.4/include/net/irda/ircomm_tty.h linux-3.1.4/include/net/irda/ircomm_tty.h
62128--- linux-3.1.4/include/net/irda/ircomm_tty.h 2011-11-11 15:19:27.000000000 -0500
62129+++ linux-3.1.4/include/net/irda/ircomm_tty.h 2011-11-16 18:39:08.000000000 -0500
62130@@ -35,6 +35,7 @@
62131 #include <linux/termios.h>
62132 #include <linux/timer.h>
62133 #include <linux/tty.h> /* struct tty_struct */
62134+#include <asm/local.h>
62135
62136 #include <net/irda/irias_object.h>
62137 #include <net/irda/ircomm_core.h>
62138@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
62139 unsigned short close_delay;
62140 unsigned short closing_wait; /* time to wait before closing */
62141
62142- int open_count;
62143- int blocked_open; /* # of blocked opens */
62144+ local_t open_count;
62145+ local_t blocked_open; /* # of blocked opens */
62146
62147 /* Protect concurent access to :
62148 * o self->open_count
62149diff -urNp linux-3.1.4/include/net/iucv/af_iucv.h linux-3.1.4/include/net/iucv/af_iucv.h
62150--- linux-3.1.4/include/net/iucv/af_iucv.h 2011-11-11 15:19:27.000000000 -0500
62151+++ linux-3.1.4/include/net/iucv/af_iucv.h 2011-11-16 18:39:08.000000000 -0500
62152@@ -87,7 +87,7 @@ struct iucv_sock {
62153 struct iucv_sock_list {
62154 struct hlist_head head;
62155 rwlock_t lock;
62156- atomic_t autobind_name;
62157+ atomic_unchecked_t autobind_name;
62158 };
62159
62160 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
62161diff -urNp linux-3.1.4/include/net/lapb.h linux-3.1.4/include/net/lapb.h
62162--- linux-3.1.4/include/net/lapb.h 2011-11-11 15:19:27.000000000 -0500
62163+++ linux-3.1.4/include/net/lapb.h 2011-11-16 18:39:08.000000000 -0500
62164@@ -95,7 +95,7 @@ struct lapb_cb {
62165 struct sk_buff_head write_queue;
62166 struct sk_buff_head ack_queue;
62167 unsigned char window;
62168- struct lapb_register_struct callbacks;
62169+ struct lapb_register_struct *callbacks;
62170
62171 /* FRMR control information */
62172 struct lapb_frame frmr_data;
62173diff -urNp linux-3.1.4/include/net/neighbour.h linux-3.1.4/include/net/neighbour.h
62174--- linux-3.1.4/include/net/neighbour.h 2011-11-11 15:19:27.000000000 -0500
62175+++ linux-3.1.4/include/net/neighbour.h 2011-11-16 18:39:08.000000000 -0500
62176@@ -122,7 +122,7 @@ struct neigh_ops {
62177 void (*error_report)(struct neighbour *, struct sk_buff *);
62178 int (*output)(struct neighbour *, struct sk_buff *);
62179 int (*connected_output)(struct neighbour *, struct sk_buff *);
62180-};
62181+} __do_const;
62182
62183 struct pneigh_entry {
62184 struct pneigh_entry *next;
62185diff -urNp linux-3.1.4/include/net/netlink.h linux-3.1.4/include/net/netlink.h
62186--- linux-3.1.4/include/net/netlink.h 2011-11-11 15:19:27.000000000 -0500
62187+++ linux-3.1.4/include/net/netlink.h 2011-11-16 18:39:08.000000000 -0500
62188@@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
62189 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
62190 {
62191 if (mark)
62192- skb_trim(skb, (unsigned char *) mark - skb->data);
62193+ skb_trim(skb, (const unsigned char *) mark - skb->data);
62194 }
62195
62196 /**
62197diff -urNp linux-3.1.4/include/net/netns/ipv4.h linux-3.1.4/include/net/netns/ipv4.h
62198--- linux-3.1.4/include/net/netns/ipv4.h 2011-11-11 15:19:27.000000000 -0500
62199+++ linux-3.1.4/include/net/netns/ipv4.h 2011-11-16 18:39:08.000000000 -0500
62200@@ -56,8 +56,8 @@ struct netns_ipv4 {
62201
62202 unsigned int sysctl_ping_group_range[2];
62203
62204- atomic_t rt_genid;
62205- atomic_t dev_addr_genid;
62206+ atomic_unchecked_t rt_genid;
62207+ atomic_unchecked_t dev_addr_genid;
62208
62209 #ifdef CONFIG_IP_MROUTE
62210 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
62211diff -urNp linux-3.1.4/include/net/sctp/sctp.h linux-3.1.4/include/net/sctp/sctp.h
62212--- linux-3.1.4/include/net/sctp/sctp.h 2011-11-11 15:19:27.000000000 -0500
62213+++ linux-3.1.4/include/net/sctp/sctp.h 2011-11-16 18:39:08.000000000 -0500
62214@@ -318,9 +318,9 @@ do { \
62215
62216 #else /* SCTP_DEBUG */
62217
62218-#define SCTP_DEBUG_PRINTK(whatever...)
62219-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
62220-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
62221+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
62222+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
62223+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
62224 #define SCTP_ENABLE_DEBUG
62225 #define SCTP_DISABLE_DEBUG
62226 #define SCTP_ASSERT(expr, str, func)
62227diff -urNp linux-3.1.4/include/net/sock.h linux-3.1.4/include/net/sock.h
62228--- linux-3.1.4/include/net/sock.h 2011-11-11 15:19:27.000000000 -0500
62229+++ linux-3.1.4/include/net/sock.h 2011-11-16 18:39:08.000000000 -0500
62230@@ -278,7 +278,7 @@ struct sock {
62231 #ifdef CONFIG_RPS
62232 __u32 sk_rxhash;
62233 #endif
62234- atomic_t sk_drops;
62235+ atomic_unchecked_t sk_drops;
62236 int sk_rcvbuf;
62237
62238 struct sk_filter __rcu *sk_filter;
62239@@ -1391,7 +1391,7 @@ static inline void sk_nocaps_add(struct
62240 }
62241
62242 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
62243- char __user *from, char *to,
62244+ char __user *from, unsigned char *to,
62245 int copy, int offset)
62246 {
62247 if (skb->ip_summed == CHECKSUM_NONE) {
62248diff -urNp linux-3.1.4/include/net/tcp.h linux-3.1.4/include/net/tcp.h
62249--- linux-3.1.4/include/net/tcp.h 2011-11-11 15:19:27.000000000 -0500
62250+++ linux-3.1.4/include/net/tcp.h 2011-11-16 18:39:08.000000000 -0500
62251@@ -1401,8 +1401,8 @@ enum tcp_seq_states {
62252 struct tcp_seq_afinfo {
62253 char *name;
62254 sa_family_t family;
62255- struct file_operations seq_fops;
62256- struct seq_operations seq_ops;
62257+ file_operations_no_const seq_fops;
62258+ seq_operations_no_const seq_ops;
62259 };
62260
62261 struct tcp_iter_state {
62262diff -urNp linux-3.1.4/include/net/udp.h linux-3.1.4/include/net/udp.h
62263--- linux-3.1.4/include/net/udp.h 2011-11-11 15:19:27.000000000 -0500
62264+++ linux-3.1.4/include/net/udp.h 2011-11-16 18:39:08.000000000 -0500
62265@@ -234,8 +234,8 @@ struct udp_seq_afinfo {
62266 char *name;
62267 sa_family_t family;
62268 struct udp_table *udp_table;
62269- struct file_operations seq_fops;
62270- struct seq_operations seq_ops;
62271+ file_operations_no_const seq_fops;
62272+ seq_operations_no_const seq_ops;
62273 };
62274
62275 struct udp_iter_state {
62276diff -urNp linux-3.1.4/include/net/xfrm.h linux-3.1.4/include/net/xfrm.h
62277--- linux-3.1.4/include/net/xfrm.h 2011-11-11 15:19:27.000000000 -0500
62278+++ linux-3.1.4/include/net/xfrm.h 2011-11-16 18:39:08.000000000 -0500
62279@@ -505,7 +505,7 @@ struct xfrm_policy {
62280 struct timer_list timer;
62281
62282 struct flow_cache_object flo;
62283- atomic_t genid;
62284+ atomic_unchecked_t genid;
62285 u32 priority;
62286 u32 index;
62287 struct xfrm_mark mark;
62288diff -urNp linux-3.1.4/include/rdma/iw_cm.h linux-3.1.4/include/rdma/iw_cm.h
62289--- linux-3.1.4/include/rdma/iw_cm.h 2011-11-11 15:19:27.000000000 -0500
62290+++ linux-3.1.4/include/rdma/iw_cm.h 2011-11-16 18:39:08.000000000 -0500
62291@@ -120,7 +120,7 @@ struct iw_cm_verbs {
62292 int backlog);
62293
62294 int (*destroy_listen)(struct iw_cm_id *cm_id);
62295-};
62296+} __no_const;
62297
62298 /**
62299 * iw_create_cm_id - Create an IW CM identifier.
62300diff -urNp linux-3.1.4/include/scsi/libfc.h linux-3.1.4/include/scsi/libfc.h
62301--- linux-3.1.4/include/scsi/libfc.h 2011-11-11 15:19:27.000000000 -0500
62302+++ linux-3.1.4/include/scsi/libfc.h 2011-11-16 18:39:08.000000000 -0500
62303@@ -758,6 +758,7 @@ struct libfc_function_template {
62304 */
62305 void (*disc_stop_final) (struct fc_lport *);
62306 };
62307+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
62308
62309 /**
62310 * struct fc_disc - Discovery context
62311@@ -861,7 +862,7 @@ struct fc_lport {
62312 struct fc_vport *vport;
62313
62314 /* Operational Information */
62315- struct libfc_function_template tt;
62316+ libfc_function_template_no_const tt;
62317 u8 link_up;
62318 u8 qfull;
62319 enum fc_lport_state state;
62320diff -urNp linux-3.1.4/include/scsi/scsi_device.h linux-3.1.4/include/scsi/scsi_device.h
62321--- linux-3.1.4/include/scsi/scsi_device.h 2011-11-11 15:19:27.000000000 -0500
62322+++ linux-3.1.4/include/scsi/scsi_device.h 2011-11-16 18:39:08.000000000 -0500
62323@@ -161,9 +161,9 @@ struct scsi_device {
62324 unsigned int max_device_blocked; /* what device_blocked counts down from */
62325 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
62326
62327- atomic_t iorequest_cnt;
62328- atomic_t iodone_cnt;
62329- atomic_t ioerr_cnt;
62330+ atomic_unchecked_t iorequest_cnt;
62331+ atomic_unchecked_t iodone_cnt;
62332+ atomic_unchecked_t ioerr_cnt;
62333
62334 struct device sdev_gendev,
62335 sdev_dev;
62336diff -urNp linux-3.1.4/include/scsi/scsi_transport_fc.h linux-3.1.4/include/scsi/scsi_transport_fc.h
62337--- linux-3.1.4/include/scsi/scsi_transport_fc.h 2011-11-11 15:19:27.000000000 -0500
62338+++ linux-3.1.4/include/scsi/scsi_transport_fc.h 2011-11-16 18:39:08.000000000 -0500
62339@@ -711,7 +711,7 @@ struct fc_function_template {
62340 unsigned long show_host_system_hostname:1;
62341
62342 unsigned long disable_target_scan:1;
62343-};
62344+} __do_const;
62345
62346
62347 /**
62348diff -urNp linux-3.1.4/include/sound/ak4xxx-adda.h linux-3.1.4/include/sound/ak4xxx-adda.h
62349--- linux-3.1.4/include/sound/ak4xxx-adda.h 2011-11-11 15:19:27.000000000 -0500
62350+++ linux-3.1.4/include/sound/ak4xxx-adda.h 2011-11-16 18:39:08.000000000 -0500
62351@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
62352 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
62353 unsigned char val);
62354 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
62355-};
62356+} __no_const;
62357
62358 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
62359
62360diff -urNp linux-3.1.4/include/sound/hwdep.h linux-3.1.4/include/sound/hwdep.h
62361--- linux-3.1.4/include/sound/hwdep.h 2011-11-11 15:19:27.000000000 -0500
62362+++ linux-3.1.4/include/sound/hwdep.h 2011-11-16 18:39:08.000000000 -0500
62363@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
62364 struct snd_hwdep_dsp_status *status);
62365 int (*dsp_load)(struct snd_hwdep *hw,
62366 struct snd_hwdep_dsp_image *image);
62367-};
62368+} __no_const;
62369
62370 struct snd_hwdep {
62371 struct snd_card *card;
62372diff -urNp linux-3.1.4/include/sound/info.h linux-3.1.4/include/sound/info.h
62373--- linux-3.1.4/include/sound/info.h 2011-11-11 15:19:27.000000000 -0500
62374+++ linux-3.1.4/include/sound/info.h 2011-11-16 18:39:08.000000000 -0500
62375@@ -44,7 +44,7 @@ struct snd_info_entry_text {
62376 struct snd_info_buffer *buffer);
62377 void (*write)(struct snd_info_entry *entry,
62378 struct snd_info_buffer *buffer);
62379-};
62380+} __no_const;
62381
62382 struct snd_info_entry_ops {
62383 int (*open)(struct snd_info_entry *entry,
62384diff -urNp linux-3.1.4/include/sound/pcm.h linux-3.1.4/include/sound/pcm.h
62385--- linux-3.1.4/include/sound/pcm.h 2011-11-11 15:19:27.000000000 -0500
62386+++ linux-3.1.4/include/sound/pcm.h 2011-11-16 18:39:08.000000000 -0500
62387@@ -81,6 +81,7 @@ struct snd_pcm_ops {
62388 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
62389 int (*ack)(struct snd_pcm_substream *substream);
62390 };
62391+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
62392
62393 /*
62394 *
62395diff -urNp linux-3.1.4/include/sound/sb16_csp.h linux-3.1.4/include/sound/sb16_csp.h
62396--- linux-3.1.4/include/sound/sb16_csp.h 2011-11-11 15:19:27.000000000 -0500
62397+++ linux-3.1.4/include/sound/sb16_csp.h 2011-11-16 18:39:08.000000000 -0500
62398@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
62399 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
62400 int (*csp_stop) (struct snd_sb_csp * p);
62401 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
62402-};
62403+} __no_const;
62404
62405 /*
62406 * CSP private data
62407diff -urNp linux-3.1.4/include/sound/soc.h linux-3.1.4/include/sound/soc.h
62408--- linux-3.1.4/include/sound/soc.h 2011-11-11 15:19:27.000000000 -0500
62409+++ linux-3.1.4/include/sound/soc.h 2011-11-16 18:39:08.000000000 -0500
62410@@ -676,7 +676,7 @@ struct snd_soc_platform_driver {
62411 /* platform IO - used for platform DAPM */
62412 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
62413 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
62414-};
62415+} __do_const;
62416
62417 struct snd_soc_platform {
62418 const char *name;
62419diff -urNp linux-3.1.4/include/sound/ymfpci.h linux-3.1.4/include/sound/ymfpci.h
62420--- linux-3.1.4/include/sound/ymfpci.h 2011-11-11 15:19:27.000000000 -0500
62421+++ linux-3.1.4/include/sound/ymfpci.h 2011-11-16 18:39:08.000000000 -0500
62422@@ -358,7 +358,7 @@ struct snd_ymfpci {
62423 spinlock_t reg_lock;
62424 spinlock_t voice_lock;
62425 wait_queue_head_t interrupt_sleep;
62426- atomic_t interrupt_sleep_count;
62427+ atomic_unchecked_t interrupt_sleep_count;
62428 struct snd_info_entry *proc_entry;
62429 const struct firmware *dsp_microcode;
62430 const struct firmware *controller_microcode;
62431diff -urNp linux-3.1.4/include/target/target_core_base.h linux-3.1.4/include/target/target_core_base.h
62432--- linux-3.1.4/include/target/target_core_base.h 2011-11-11 15:19:27.000000000 -0500
62433+++ linux-3.1.4/include/target/target_core_base.h 2011-11-16 18:39:08.000000000 -0500
62434@@ -356,7 +356,7 @@ struct t10_reservation_ops {
62435 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
62436 int (*t10_pr_register)(struct se_cmd *);
62437 int (*t10_pr_clear)(struct se_cmd *);
62438-};
62439+} __no_const;
62440
62441 struct t10_reservation {
62442 /* Reservation effects all target ports */
62443@@ -496,8 +496,8 @@ struct se_cmd {
62444 atomic_t t_task_cdbs_left;
62445 atomic_t t_task_cdbs_ex_left;
62446 atomic_t t_task_cdbs_timeout_left;
62447- atomic_t t_task_cdbs_sent;
62448- atomic_t t_transport_aborted;
62449+ atomic_unchecked_t t_task_cdbs_sent;
62450+ atomic_unchecked_t t_transport_aborted;
62451 atomic_t t_transport_active;
62452 atomic_t t_transport_complete;
62453 atomic_t t_transport_queue_active;
62454@@ -744,7 +744,7 @@ struct se_device {
62455 atomic_t active_cmds;
62456 atomic_t simple_cmds;
62457 atomic_t depth_left;
62458- atomic_t dev_ordered_id;
62459+ atomic_unchecked_t dev_ordered_id;
62460 atomic_t dev_tur_active;
62461 atomic_t execute_tasks;
62462 atomic_t dev_status_thr_count;
62463diff -urNp linux-3.1.4/include/trace/events/irq.h linux-3.1.4/include/trace/events/irq.h
62464--- linux-3.1.4/include/trace/events/irq.h 2011-11-11 15:19:27.000000000 -0500
62465+++ linux-3.1.4/include/trace/events/irq.h 2011-11-16 18:39:08.000000000 -0500
62466@@ -36,7 +36,7 @@ struct softirq_action;
62467 */
62468 TRACE_EVENT(irq_handler_entry,
62469
62470- TP_PROTO(int irq, struct irqaction *action),
62471+ TP_PROTO(int irq, const struct irqaction *action),
62472
62473 TP_ARGS(irq, action),
62474
62475@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
62476 */
62477 TRACE_EVENT(irq_handler_exit,
62478
62479- TP_PROTO(int irq, struct irqaction *action, int ret),
62480+ TP_PROTO(int irq, const struct irqaction *action, int ret),
62481
62482 TP_ARGS(irq, action, ret),
62483
62484diff -urNp linux-3.1.4/include/video/udlfb.h linux-3.1.4/include/video/udlfb.h
62485--- linux-3.1.4/include/video/udlfb.h 2011-11-11 15:19:27.000000000 -0500
62486+++ linux-3.1.4/include/video/udlfb.h 2011-11-16 18:39:08.000000000 -0500
62487@@ -51,10 +51,10 @@ struct dlfb_data {
62488 int base8;
62489 u32 pseudo_palette[256];
62490 /* blit-only rendering path metrics, exposed through sysfs */
62491- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
62492- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
62493- atomic_t bytes_sent; /* to usb, after compression including overhead */
62494- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
62495+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
62496+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
62497+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
62498+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
62499 };
62500
62501 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
62502diff -urNp linux-3.1.4/include/video/uvesafb.h linux-3.1.4/include/video/uvesafb.h
62503--- linux-3.1.4/include/video/uvesafb.h 2011-11-11 15:19:27.000000000 -0500
62504+++ linux-3.1.4/include/video/uvesafb.h 2011-11-16 18:39:08.000000000 -0500
62505@@ -177,6 +177,7 @@ struct uvesafb_par {
62506 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
62507 u8 pmi_setpal; /* PMI for palette changes */
62508 u16 *pmi_base; /* protected mode interface location */
62509+ u8 *pmi_code; /* protected mode code location */
62510 void *pmi_start;
62511 void *pmi_pal;
62512 u8 *vbe_state_orig; /*
62513diff -urNp linux-3.1.4/init/do_mounts.c linux-3.1.4/init/do_mounts.c
62514--- linux-3.1.4/init/do_mounts.c 2011-11-11 15:19:27.000000000 -0500
62515+++ linux-3.1.4/init/do_mounts.c 2011-11-16 18:39:08.000000000 -0500
62516@@ -287,11 +287,11 @@ static void __init get_fs_names(char *pa
62517
62518 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
62519 {
62520- int err = sys_mount(name, "/root", fs, flags, data);
62521+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
62522 if (err)
62523 return err;
62524
62525- sys_chdir((const char __user __force *)"/root");
62526+ sys_chdir((const char __force_user*)"/root");
62527 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
62528 printk(KERN_INFO
62529 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
62530@@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
62531 va_start(args, fmt);
62532 vsprintf(buf, fmt, args);
62533 va_end(args);
62534- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
62535+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
62536 if (fd >= 0) {
62537 sys_ioctl(fd, FDEJECT, 0);
62538 sys_close(fd);
62539 }
62540 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
62541- fd = sys_open("/dev/console", O_RDWR, 0);
62542+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
62543 if (fd >= 0) {
62544 sys_ioctl(fd, TCGETS, (long)&termios);
62545 termios.c_lflag &= ~ICANON;
62546 sys_ioctl(fd, TCSETSF, (long)&termios);
62547- sys_read(fd, &c, 1);
62548+ sys_read(fd, (char __user *)&c, 1);
62549 termios.c_lflag |= ICANON;
62550 sys_ioctl(fd, TCSETSF, (long)&termios);
62551 sys_close(fd);
62552@@ -488,6 +488,6 @@ void __init prepare_namespace(void)
62553 mount_root();
62554 out:
62555 devtmpfs_mount("dev");
62556- sys_mount(".", "/", NULL, MS_MOVE, NULL);
62557- sys_chroot((const char __user __force *)".");
62558+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
62559+ sys_chroot((const char __force_user *)".");
62560 }
62561diff -urNp linux-3.1.4/init/do_mounts.h linux-3.1.4/init/do_mounts.h
62562--- linux-3.1.4/init/do_mounts.h 2011-11-11 15:19:27.000000000 -0500
62563+++ linux-3.1.4/init/do_mounts.h 2011-11-16 18:39:08.000000000 -0500
62564@@ -15,15 +15,15 @@ extern int root_mountflags;
62565
62566 static inline int create_dev(char *name, dev_t dev)
62567 {
62568- sys_unlink(name);
62569- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
62570+ sys_unlink((char __force_user *)name);
62571+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
62572 }
62573
62574 #if BITS_PER_LONG == 32
62575 static inline u32 bstat(char *name)
62576 {
62577 struct stat64 stat;
62578- if (sys_stat64(name, &stat) != 0)
62579+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
62580 return 0;
62581 if (!S_ISBLK(stat.st_mode))
62582 return 0;
62583@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
62584 static inline u32 bstat(char *name)
62585 {
62586 struct stat stat;
62587- if (sys_newstat(name, &stat) != 0)
62588+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
62589 return 0;
62590 if (!S_ISBLK(stat.st_mode))
62591 return 0;
62592diff -urNp linux-3.1.4/init/do_mounts_initrd.c linux-3.1.4/init/do_mounts_initrd.c
62593--- linux-3.1.4/init/do_mounts_initrd.c 2011-11-11 15:19:27.000000000 -0500
62594+++ linux-3.1.4/init/do_mounts_initrd.c 2011-11-16 18:39:08.000000000 -0500
62595@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
62596 create_dev("/dev/root.old", Root_RAM0);
62597 /* mount initrd on rootfs' /root */
62598 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
62599- sys_mkdir("/old", 0700);
62600- root_fd = sys_open("/", 0, 0);
62601- old_fd = sys_open("/old", 0, 0);
62602+ sys_mkdir((const char __force_user *)"/old", 0700);
62603+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
62604+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
62605 /* move initrd over / and chdir/chroot in initrd root */
62606- sys_chdir("/root");
62607- sys_mount(".", "/", NULL, MS_MOVE, NULL);
62608- sys_chroot(".");
62609+ sys_chdir((const char __force_user *)"/root");
62610+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
62611+ sys_chroot((const char __force_user *)".");
62612
62613 /*
62614 * In case that a resume from disk is carried out by linuxrc or one of
62615@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
62616
62617 /* move initrd to rootfs' /old */
62618 sys_fchdir(old_fd);
62619- sys_mount("/", ".", NULL, MS_MOVE, NULL);
62620+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
62621 /* switch root and cwd back to / of rootfs */
62622 sys_fchdir(root_fd);
62623- sys_chroot(".");
62624+ sys_chroot((const char __force_user *)".");
62625 sys_close(old_fd);
62626 sys_close(root_fd);
62627
62628 if (new_decode_dev(real_root_dev) == Root_RAM0) {
62629- sys_chdir("/old");
62630+ sys_chdir((const char __force_user *)"/old");
62631 return;
62632 }
62633
62634@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
62635 mount_root();
62636
62637 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
62638- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
62639+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
62640 if (!error)
62641 printk("okay\n");
62642 else {
62643- int fd = sys_open("/dev/root.old", O_RDWR, 0);
62644+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
62645 if (error == -ENOENT)
62646 printk("/initrd does not exist. Ignored.\n");
62647 else
62648 printk("failed\n");
62649 printk(KERN_NOTICE "Unmounting old root\n");
62650- sys_umount("/old", MNT_DETACH);
62651+ sys_umount((char __force_user *)"/old", MNT_DETACH);
62652 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
62653 if (fd < 0) {
62654 error = fd;
62655@@ -116,11 +116,11 @@ int __init initrd_load(void)
62656 * mounted in the normal path.
62657 */
62658 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
62659- sys_unlink("/initrd.image");
62660+ sys_unlink((const char __force_user *)"/initrd.image");
62661 handle_initrd();
62662 return 1;
62663 }
62664 }
62665- sys_unlink("/initrd.image");
62666+ sys_unlink((const char __force_user *)"/initrd.image");
62667 return 0;
62668 }
62669diff -urNp linux-3.1.4/init/do_mounts_md.c linux-3.1.4/init/do_mounts_md.c
62670--- linux-3.1.4/init/do_mounts_md.c 2011-11-11 15:19:27.000000000 -0500
62671+++ linux-3.1.4/init/do_mounts_md.c 2011-11-16 18:39:08.000000000 -0500
62672@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
62673 partitioned ? "_d" : "", minor,
62674 md_setup_args[ent].device_names);
62675
62676- fd = sys_open(name, 0, 0);
62677+ fd = sys_open((char __force_user *)name, 0, 0);
62678 if (fd < 0) {
62679 printk(KERN_ERR "md: open failed - cannot start "
62680 "array %s\n", name);
62681@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
62682 * array without it
62683 */
62684 sys_close(fd);
62685- fd = sys_open(name, 0, 0);
62686+ fd = sys_open((char __force_user *)name, 0, 0);
62687 sys_ioctl(fd, BLKRRPART, 0);
62688 }
62689 sys_close(fd);
62690@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
62691
62692 wait_for_device_probe();
62693
62694- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
62695+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
62696 if (fd >= 0) {
62697 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
62698 sys_close(fd);
62699diff -urNp linux-3.1.4/init/initramfs.c linux-3.1.4/init/initramfs.c
62700--- linux-3.1.4/init/initramfs.c 2011-11-11 15:19:27.000000000 -0500
62701+++ linux-3.1.4/init/initramfs.c 2011-11-16 18:39:08.000000000 -0500
62702@@ -74,7 +74,7 @@ static void __init free_hash(void)
62703 }
62704 }
62705
62706-static long __init do_utime(char __user *filename, time_t mtime)
62707+static long __init do_utime(__force char __user *filename, time_t mtime)
62708 {
62709 struct timespec t[2];
62710
62711@@ -109,7 +109,7 @@ static void __init dir_utime(void)
62712 struct dir_entry *de, *tmp;
62713 list_for_each_entry_safe(de, tmp, &dir_list, list) {
62714 list_del(&de->list);
62715- do_utime(de->name, de->mtime);
62716+ do_utime((char __force_user *)de->name, de->mtime);
62717 kfree(de->name);
62718 kfree(de);
62719 }
62720@@ -271,7 +271,7 @@ static int __init maybe_link(void)
62721 if (nlink >= 2) {
62722 char *old = find_link(major, minor, ino, mode, collected);
62723 if (old)
62724- return (sys_link(old, collected) < 0) ? -1 : 1;
62725+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
62726 }
62727 return 0;
62728 }
62729@@ -280,11 +280,11 @@ static void __init clean_path(char *path
62730 {
62731 struct stat st;
62732
62733- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
62734+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
62735 if (S_ISDIR(st.st_mode))
62736- sys_rmdir(path);
62737+ sys_rmdir((char __force_user *)path);
62738 else
62739- sys_unlink(path);
62740+ sys_unlink((char __force_user *)path);
62741 }
62742 }
62743
62744@@ -305,7 +305,7 @@ static int __init do_name(void)
62745 int openflags = O_WRONLY|O_CREAT;
62746 if (ml != 1)
62747 openflags |= O_TRUNC;
62748- wfd = sys_open(collected, openflags, mode);
62749+ wfd = sys_open((char __force_user *)collected, openflags, mode);
62750
62751 if (wfd >= 0) {
62752 sys_fchown(wfd, uid, gid);
62753@@ -317,17 +317,17 @@ static int __init do_name(void)
62754 }
62755 }
62756 } else if (S_ISDIR(mode)) {
62757- sys_mkdir(collected, mode);
62758- sys_chown(collected, uid, gid);
62759- sys_chmod(collected, mode);
62760+ sys_mkdir((char __force_user *)collected, mode);
62761+ sys_chown((char __force_user *)collected, uid, gid);
62762+ sys_chmod((char __force_user *)collected, mode);
62763 dir_add(collected, mtime);
62764 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
62765 S_ISFIFO(mode) || S_ISSOCK(mode)) {
62766 if (maybe_link() == 0) {
62767- sys_mknod(collected, mode, rdev);
62768- sys_chown(collected, uid, gid);
62769- sys_chmod(collected, mode);
62770- do_utime(collected, mtime);
62771+ sys_mknod((char __force_user *)collected, mode, rdev);
62772+ sys_chown((char __force_user *)collected, uid, gid);
62773+ sys_chmod((char __force_user *)collected, mode);
62774+ do_utime((char __force_user *)collected, mtime);
62775 }
62776 }
62777 return 0;
62778@@ -336,15 +336,15 @@ static int __init do_name(void)
62779 static int __init do_copy(void)
62780 {
62781 if (count >= body_len) {
62782- sys_write(wfd, victim, body_len);
62783+ sys_write(wfd, (char __force_user *)victim, body_len);
62784 sys_close(wfd);
62785- do_utime(vcollected, mtime);
62786+ do_utime((char __force_user *)vcollected, mtime);
62787 kfree(vcollected);
62788 eat(body_len);
62789 state = SkipIt;
62790 return 0;
62791 } else {
62792- sys_write(wfd, victim, count);
62793+ sys_write(wfd, (char __force_user *)victim, count);
62794 body_len -= count;
62795 eat(count);
62796 return 1;
62797@@ -355,9 +355,9 @@ static int __init do_symlink(void)
62798 {
62799 collected[N_ALIGN(name_len) + body_len] = '\0';
62800 clean_path(collected, 0);
62801- sys_symlink(collected + N_ALIGN(name_len), collected);
62802- sys_lchown(collected, uid, gid);
62803- do_utime(collected, mtime);
62804+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
62805+ sys_lchown((char __force_user *)collected, uid, gid);
62806+ do_utime((char __force_user *)collected, mtime);
62807 state = SkipIt;
62808 next_state = Reset;
62809 return 0;
62810diff -urNp linux-3.1.4/init/Kconfig linux-3.1.4/init/Kconfig
62811--- linux-3.1.4/init/Kconfig 2011-11-11 15:19:27.000000000 -0500
62812+++ linux-3.1.4/init/Kconfig 2011-11-16 18:39:08.000000000 -0500
62813@@ -1202,7 +1202,7 @@ config SLUB_DEBUG
62814
62815 config COMPAT_BRK
62816 bool "Disable heap randomization"
62817- default y
62818+ default n
62819 help
62820 Randomizing heap placement makes heap exploits harder, but it
62821 also breaks ancient binaries (including anything libc5 based).
62822diff -urNp linux-3.1.4/init/main.c linux-3.1.4/init/main.c
62823--- linux-3.1.4/init/main.c 2011-11-11 15:19:27.000000000 -0500
62824+++ linux-3.1.4/init/main.c 2011-11-16 18:40:44.000000000 -0500
62825@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
62826 extern void tc_init(void);
62827 #endif
62828
62829+extern void grsecurity_init(void);
62830+
62831 /*
62832 * Debug helper: via this flag we know that we are in 'early bootup code'
62833 * where only the boot processor is running with IRQ disabled. This means
62834@@ -149,6 +151,49 @@ static int __init set_reset_devices(char
62835
62836 __setup("reset_devices", set_reset_devices);
62837
62838+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
62839+extern char pax_enter_kernel_user[];
62840+extern char pax_exit_kernel_user[];
62841+extern pgdval_t clone_pgd_mask;
62842+#endif
62843+
62844+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
62845+static int __init setup_pax_nouderef(char *str)
62846+{
62847+#ifdef CONFIG_X86_32
62848+ unsigned int cpu;
62849+ struct desc_struct *gdt;
62850+
62851+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
62852+ gdt = get_cpu_gdt_table(cpu);
62853+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
62854+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
62855+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
62856+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
62857+ }
62858+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
62859+#else
62860+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
62861+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
62862+ clone_pgd_mask = ~(pgdval_t)0UL;
62863+#endif
62864+
62865+ return 0;
62866+}
62867+early_param("pax_nouderef", setup_pax_nouderef);
62868+#endif
62869+
62870+#ifdef CONFIG_PAX_SOFTMODE
62871+int pax_softmode;
62872+
62873+static int __init setup_pax_softmode(char *str)
62874+{
62875+ get_option(&str, &pax_softmode);
62876+ return 1;
62877+}
62878+__setup("pax_softmode=", setup_pax_softmode);
62879+#endif
62880+
62881 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
62882 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
62883 static const char *panic_later, *panic_param;
62884@@ -678,6 +723,7 @@ int __init_or_module do_one_initcall(ini
62885 {
62886 int count = preempt_count();
62887 int ret;
62888+ const char *msg1 = "", *msg2 = "";
62889
62890 if (initcall_debug)
62891 ret = do_one_initcall_debug(fn);
62892@@ -690,15 +736,15 @@ int __init_or_module do_one_initcall(ini
62893 sprintf(msgbuf, "error code %d ", ret);
62894
62895 if (preempt_count() != count) {
62896- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
62897+ msg1 = " preemption imbalance";
62898 preempt_count() = count;
62899 }
62900 if (irqs_disabled()) {
62901- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
62902+ msg2 = " disabled interrupts";
62903 local_irq_enable();
62904 }
62905- if (msgbuf[0]) {
62906- printk("initcall %pF returned with %s\n", fn, msgbuf);
62907+ if (msgbuf[0] || *msg1 || *msg2) {
62908+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
62909 }
62910
62911 return ret;
62912@@ -817,7 +863,7 @@ static int __init kernel_init(void * unu
62913 do_basic_setup();
62914
62915 /* Open the /dev/console on the rootfs, this should never fail */
62916- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
62917+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
62918 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
62919
62920 (void) sys_dup(0);
62921@@ -830,11 +876,13 @@ static int __init kernel_init(void * unu
62922 if (!ramdisk_execute_command)
62923 ramdisk_execute_command = "/init";
62924
62925- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
62926+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
62927 ramdisk_execute_command = NULL;
62928 prepare_namespace();
62929 }
62930
62931+ grsecurity_init();
62932+
62933 /*
62934 * Ok, we have completed the initial bootup, and
62935 * we're essentially up and running. Get rid of the
62936diff -urNp linux-3.1.4/ipc/mqueue.c linux-3.1.4/ipc/mqueue.c
62937--- linux-3.1.4/ipc/mqueue.c 2011-11-11 15:19:27.000000000 -0500
62938+++ linux-3.1.4/ipc/mqueue.c 2011-11-16 18:40:44.000000000 -0500
62939@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(st
62940 mq_bytes = (mq_msg_tblsz +
62941 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
62942
62943+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
62944 spin_lock(&mq_lock);
62945 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
62946 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
62947diff -urNp linux-3.1.4/ipc/msg.c linux-3.1.4/ipc/msg.c
62948--- linux-3.1.4/ipc/msg.c 2011-11-11 15:19:27.000000000 -0500
62949+++ linux-3.1.4/ipc/msg.c 2011-11-16 18:39:08.000000000 -0500
62950@@ -309,18 +309,19 @@ static inline int msg_security(struct ke
62951 return security_msg_queue_associate(msq, msgflg);
62952 }
62953
62954+static struct ipc_ops msg_ops = {
62955+ .getnew = newque,
62956+ .associate = msg_security,
62957+ .more_checks = NULL
62958+};
62959+
62960 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
62961 {
62962 struct ipc_namespace *ns;
62963- struct ipc_ops msg_ops;
62964 struct ipc_params msg_params;
62965
62966 ns = current->nsproxy->ipc_ns;
62967
62968- msg_ops.getnew = newque;
62969- msg_ops.associate = msg_security;
62970- msg_ops.more_checks = NULL;
62971-
62972 msg_params.key = key;
62973 msg_params.flg = msgflg;
62974
62975diff -urNp linux-3.1.4/ipc/sem.c linux-3.1.4/ipc/sem.c
62976--- linux-3.1.4/ipc/sem.c 2011-11-11 15:19:27.000000000 -0500
62977+++ linux-3.1.4/ipc/sem.c 2011-11-16 18:40:44.000000000 -0500
62978@@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
62979 return 0;
62980 }
62981
62982+static struct ipc_ops sem_ops = {
62983+ .getnew = newary,
62984+ .associate = sem_security,
62985+ .more_checks = sem_more_checks
62986+};
62987+
62988 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62989 {
62990 struct ipc_namespace *ns;
62991- struct ipc_ops sem_ops;
62992 struct ipc_params sem_params;
62993
62994 ns = current->nsproxy->ipc_ns;
62995@@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
62996 if (nsems < 0 || nsems > ns->sc_semmsl)
62997 return -EINVAL;
62998
62999- sem_ops.getnew = newary;
63000- sem_ops.associate = sem_security;
63001- sem_ops.more_checks = sem_more_checks;
63002-
63003 sem_params.key = key;
63004 sem_params.flg = semflg;
63005 sem_params.u.nsems = nsems;
63006@@ -848,6 +849,8 @@ static int semctl_main(struct ipc_namesp
63007 int nsems;
63008 struct list_head tasks;
63009
63010+ pax_track_stack();
63011+
63012 sma = sem_lock_check(ns, semid);
63013 if (IS_ERR(sma))
63014 return PTR_ERR(sma);
63015@@ -1295,6 +1298,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
63016 struct ipc_namespace *ns;
63017 struct list_head tasks;
63018
63019+ pax_track_stack();
63020+
63021 ns = current->nsproxy->ipc_ns;
63022
63023 if (nsops < 1 || semid < 0)
63024diff -urNp linux-3.1.4/ipc/shm.c linux-3.1.4/ipc/shm.c
63025--- linux-3.1.4/ipc/shm.c 2011-11-11 15:19:27.000000000 -0500
63026+++ linux-3.1.4/ipc/shm.c 2011-11-16 18:40:44.000000000 -0500
63027@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
63028 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
63029 #endif
63030
63031+#ifdef CONFIG_GRKERNSEC
63032+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
63033+ const time_t shm_createtime, const uid_t cuid,
63034+ const int shmid);
63035+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
63036+ const time_t shm_createtime);
63037+#endif
63038+
63039 void shm_init_ns(struct ipc_namespace *ns)
63040 {
63041 ns->shm_ctlmax = SHMMAX;
63042@@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *
63043 shp->shm_lprid = 0;
63044 shp->shm_atim = shp->shm_dtim = 0;
63045 shp->shm_ctim = get_seconds();
63046+#ifdef CONFIG_GRKERNSEC
63047+ {
63048+ struct timespec timeval;
63049+ do_posix_clock_monotonic_gettime(&timeval);
63050+
63051+ shp->shm_createtime = timeval.tv_sec;
63052+ }
63053+#endif
63054 shp->shm_segsz = size;
63055 shp->shm_nattch = 0;
63056 shp->shm_file = file;
63057@@ -559,18 +575,19 @@ static inline int shm_more_checks(struct
63058 return 0;
63059 }
63060
63061+static struct ipc_ops shm_ops = {
63062+ .getnew = newseg,
63063+ .associate = shm_security,
63064+ .more_checks = shm_more_checks
63065+};
63066+
63067 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
63068 {
63069 struct ipc_namespace *ns;
63070- struct ipc_ops shm_ops;
63071 struct ipc_params shm_params;
63072
63073 ns = current->nsproxy->ipc_ns;
63074
63075- shm_ops.getnew = newseg;
63076- shm_ops.associate = shm_security;
63077- shm_ops.more_checks = shm_more_checks;
63078-
63079 shm_params.key = key;
63080 shm_params.flg = shmflg;
63081 shm_params.u.size = size;
63082@@ -870,8 +887,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
63083 case SHM_LOCK:
63084 case SHM_UNLOCK:
63085 {
63086- struct file *uninitialized_var(shm_file);
63087-
63088 lru_add_drain_all(); /* drain pagevecs to lru lists */
63089
63090 shp = shm_lock_check(ns, shmid);
63091@@ -1004,9 +1019,21 @@ long do_shmat(int shmid, char __user *sh
63092 if (err)
63093 goto out_unlock;
63094
63095+#ifdef CONFIG_GRKERNSEC
63096+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
63097+ shp->shm_perm.cuid, shmid) ||
63098+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
63099+ err = -EACCES;
63100+ goto out_unlock;
63101+ }
63102+#endif
63103+
63104 path = shp->shm_file->f_path;
63105 path_get(&path);
63106 shp->shm_nattch++;
63107+#ifdef CONFIG_GRKERNSEC
63108+ shp->shm_lapid = current->pid;
63109+#endif
63110 size = i_size_read(path.dentry->d_inode);
63111 shm_unlock(shp);
63112
63113diff -urNp linux-3.1.4/kernel/acct.c linux-3.1.4/kernel/acct.c
63114--- linux-3.1.4/kernel/acct.c 2011-11-11 15:19:27.000000000 -0500
63115+++ linux-3.1.4/kernel/acct.c 2011-11-16 18:39:08.000000000 -0500
63116@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
63117 */
63118 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
63119 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
63120- file->f_op->write(file, (char *)&ac,
63121+ file->f_op->write(file, (char __force_user *)&ac,
63122 sizeof(acct_t), &file->f_pos);
63123 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
63124 set_fs(fs);
63125diff -urNp linux-3.1.4/kernel/audit.c linux-3.1.4/kernel/audit.c
63126--- linux-3.1.4/kernel/audit.c 2011-11-11 15:19:27.000000000 -0500
63127+++ linux-3.1.4/kernel/audit.c 2011-11-16 18:39:08.000000000 -0500
63128@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
63129 3) suppressed due to audit_rate_limit
63130 4) suppressed due to audit_backlog_limit
63131 */
63132-static atomic_t audit_lost = ATOMIC_INIT(0);
63133+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
63134
63135 /* The netlink socket. */
63136 static struct sock *audit_sock;
63137@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
63138 unsigned long now;
63139 int print;
63140
63141- atomic_inc(&audit_lost);
63142+ atomic_inc_unchecked(&audit_lost);
63143
63144 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
63145
63146@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
63147 printk(KERN_WARNING
63148 "audit: audit_lost=%d audit_rate_limit=%d "
63149 "audit_backlog_limit=%d\n",
63150- atomic_read(&audit_lost),
63151+ atomic_read_unchecked(&audit_lost),
63152 audit_rate_limit,
63153 audit_backlog_limit);
63154 audit_panic(message);
63155@@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_b
63156 status_set.pid = audit_pid;
63157 status_set.rate_limit = audit_rate_limit;
63158 status_set.backlog_limit = audit_backlog_limit;
63159- status_set.lost = atomic_read(&audit_lost);
63160+ status_set.lost = atomic_read_unchecked(&audit_lost);
63161 status_set.backlog = skb_queue_len(&audit_skb_queue);
63162 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
63163 &status_set, sizeof(status_set));
63164diff -urNp linux-3.1.4/kernel/auditsc.c linux-3.1.4/kernel/auditsc.c
63165--- linux-3.1.4/kernel/auditsc.c 2011-11-11 15:19:27.000000000 -0500
63166+++ linux-3.1.4/kernel/auditsc.c 2011-11-16 18:39:08.000000000 -0500
63167@@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_conte
63168 }
63169
63170 /* global counter which is incremented every time something logs in */
63171-static atomic_t session_id = ATOMIC_INIT(0);
63172+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
63173
63174 /**
63175 * audit_set_loginuid - set a task's audit_context loginuid
63176@@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT
63177 */
63178 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
63179 {
63180- unsigned int sessionid = atomic_inc_return(&session_id);
63181+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
63182 struct audit_context *context = task->audit_context;
63183
63184 if (context && context->in_syscall) {
63185diff -urNp linux-3.1.4/kernel/capability.c linux-3.1.4/kernel/capability.c
63186--- linux-3.1.4/kernel/capability.c 2011-11-11 15:19:27.000000000 -0500
63187+++ linux-3.1.4/kernel/capability.c 2011-11-16 18:40:44.000000000 -0500
63188@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
63189 * before modification is attempted and the application
63190 * fails.
63191 */
63192+ if (tocopy > ARRAY_SIZE(kdata))
63193+ return -EFAULT;
63194+
63195 if (copy_to_user(dataptr, kdata, tocopy
63196 * sizeof(struct __user_cap_data_struct))) {
63197 return -EFAULT;
63198@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *n
63199 BUG();
63200 }
63201
63202- if (security_capable(ns, current_cred(), cap) == 0) {
63203+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
63204 current->flags |= PF_SUPERPRIV;
63205 return true;
63206 }
63207@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *n
63208 }
63209 EXPORT_SYMBOL(ns_capable);
63210
63211+bool ns_capable_nolog(struct user_namespace *ns, int cap)
63212+{
63213+ if (unlikely(!cap_valid(cap))) {
63214+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
63215+ BUG();
63216+ }
63217+
63218+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
63219+ current->flags |= PF_SUPERPRIV;
63220+ return true;
63221+ }
63222+ return false;
63223+}
63224+EXPORT_SYMBOL(ns_capable_nolog);
63225+
63226+bool capable_nolog(int cap)
63227+{
63228+ return ns_capable_nolog(&init_user_ns, cap);
63229+}
63230+EXPORT_SYMBOL(capable_nolog);
63231+
63232 /**
63233 * task_ns_capable - Determine whether current task has a superior
63234 * capability targeted at a specific task's user namespace.
63235@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct
63236 }
63237 EXPORT_SYMBOL(task_ns_capable);
63238
63239+bool task_ns_capable_nolog(struct task_struct *t, int cap)
63240+{
63241+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
63242+}
63243+EXPORT_SYMBOL(task_ns_capable_nolog);
63244+
63245 /**
63246 * nsown_capable - Check superior capability to one's own user_ns
63247 * @cap: The capability in question
63248diff -urNp linux-3.1.4/kernel/cgroup.c linux-3.1.4/kernel/cgroup.c
63249--- linux-3.1.4/kernel/cgroup.c 2011-11-11 15:19:27.000000000 -0500
63250+++ linux-3.1.4/kernel/cgroup.c 2011-11-16 18:40:44.000000000 -0500
63251@@ -595,6 +595,8 @@ static struct css_set *find_css_set(
63252 struct hlist_head *hhead;
63253 struct cg_cgroup_link *link;
63254
63255+ pax_track_stack();
63256+
63257 /* First see if we already have a cgroup group that matches
63258 * the desired set */
63259 read_lock(&css_set_lock);
63260diff -urNp linux-3.1.4/kernel/compat.c linux-3.1.4/kernel/compat.c
63261--- linux-3.1.4/kernel/compat.c 2011-11-11 15:19:27.000000000 -0500
63262+++ linux-3.1.4/kernel/compat.c 2011-11-16 18:40:44.000000000 -0500
63263@@ -13,6 +13,7 @@
63264
63265 #include <linux/linkage.h>
63266 #include <linux/compat.h>
63267+#include <linux/module.h>
63268 #include <linux/errno.h>
63269 #include <linux/time.h>
63270 #include <linux/signal.h>
63271@@ -167,7 +168,7 @@ static long compat_nanosleep_restart(str
63272 mm_segment_t oldfs;
63273 long ret;
63274
63275- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
63276+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
63277 oldfs = get_fs();
63278 set_fs(KERNEL_DS);
63279 ret = hrtimer_nanosleep_restart(restart);
63280@@ -199,7 +200,7 @@ asmlinkage long compat_sys_nanosleep(str
63281 oldfs = get_fs();
63282 set_fs(KERNEL_DS);
63283 ret = hrtimer_nanosleep(&tu,
63284- rmtp ? (struct timespec __user *)&rmt : NULL,
63285+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
63286 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
63287 set_fs(oldfs);
63288
63289@@ -308,7 +309,7 @@ asmlinkage long compat_sys_sigpending(co
63290 mm_segment_t old_fs = get_fs();
63291
63292 set_fs(KERNEL_DS);
63293- ret = sys_sigpending((old_sigset_t __user *) &s);
63294+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
63295 set_fs(old_fs);
63296 if (ret == 0)
63297 ret = put_user(s, set);
63298@@ -331,8 +332,8 @@ asmlinkage long compat_sys_sigprocmask(i
63299 old_fs = get_fs();
63300 set_fs(KERNEL_DS);
63301 ret = sys_sigprocmask(how,
63302- set ? (old_sigset_t __user *) &s : NULL,
63303- oset ? (old_sigset_t __user *) &s : NULL);
63304+ set ? (old_sigset_t __force_user *) &s : NULL,
63305+ oset ? (old_sigset_t __force_user *) &s : NULL);
63306 set_fs(old_fs);
63307 if (ret == 0)
63308 if (oset)
63309@@ -369,7 +370,7 @@ asmlinkage long compat_sys_old_getrlimit
63310 mm_segment_t old_fs = get_fs();
63311
63312 set_fs(KERNEL_DS);
63313- ret = sys_old_getrlimit(resource, &r);
63314+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
63315 set_fs(old_fs);
63316
63317 if (!ret) {
63318@@ -441,7 +442,7 @@ asmlinkage long compat_sys_getrusage(int
63319 mm_segment_t old_fs = get_fs();
63320
63321 set_fs(KERNEL_DS);
63322- ret = sys_getrusage(who, (struct rusage __user *) &r);
63323+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
63324 set_fs(old_fs);
63325
63326 if (ret)
63327@@ -468,8 +469,8 @@ compat_sys_wait4(compat_pid_t pid, compa
63328 set_fs (KERNEL_DS);
63329 ret = sys_wait4(pid,
63330 (stat_addr ?
63331- (unsigned int __user *) &status : NULL),
63332- options, (struct rusage __user *) &r);
63333+ (unsigned int __force_user *) &status : NULL),
63334+ options, (struct rusage __force_user *) &r);
63335 set_fs (old_fs);
63336
63337 if (ret > 0) {
63338@@ -494,8 +495,8 @@ asmlinkage long compat_sys_waitid(int wh
63339 memset(&info, 0, sizeof(info));
63340
63341 set_fs(KERNEL_DS);
63342- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
63343- uru ? (struct rusage __user *)&ru : NULL);
63344+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
63345+ uru ? (struct rusage __force_user *)&ru : NULL);
63346 set_fs(old_fs);
63347
63348 if ((ret < 0) || (info.si_signo == 0))
63349@@ -625,8 +626,8 @@ long compat_sys_timer_settime(timer_t ti
63350 oldfs = get_fs();
63351 set_fs(KERNEL_DS);
63352 err = sys_timer_settime(timer_id, flags,
63353- (struct itimerspec __user *) &newts,
63354- (struct itimerspec __user *) &oldts);
63355+ (struct itimerspec __force_user *) &newts,
63356+ (struct itimerspec __force_user *) &oldts);
63357 set_fs(oldfs);
63358 if (!err && old && put_compat_itimerspec(old, &oldts))
63359 return -EFAULT;
63360@@ -643,7 +644,7 @@ long compat_sys_timer_gettime(timer_t ti
63361 oldfs = get_fs();
63362 set_fs(KERNEL_DS);
63363 err = sys_timer_gettime(timer_id,
63364- (struct itimerspec __user *) &ts);
63365+ (struct itimerspec __force_user *) &ts);
63366 set_fs(oldfs);
63367 if (!err && put_compat_itimerspec(setting, &ts))
63368 return -EFAULT;
63369@@ -662,7 +663,7 @@ long compat_sys_clock_settime(clockid_t
63370 oldfs = get_fs();
63371 set_fs(KERNEL_DS);
63372 err = sys_clock_settime(which_clock,
63373- (struct timespec __user *) &ts);
63374+ (struct timespec __force_user *) &ts);
63375 set_fs(oldfs);
63376 return err;
63377 }
63378@@ -677,7 +678,7 @@ long compat_sys_clock_gettime(clockid_t
63379 oldfs = get_fs();
63380 set_fs(KERNEL_DS);
63381 err = sys_clock_gettime(which_clock,
63382- (struct timespec __user *) &ts);
63383+ (struct timespec __force_user *) &ts);
63384 set_fs(oldfs);
63385 if (!err && put_compat_timespec(&ts, tp))
63386 return -EFAULT;
63387@@ -697,7 +698,7 @@ long compat_sys_clock_adjtime(clockid_t
63388
63389 oldfs = get_fs();
63390 set_fs(KERNEL_DS);
63391- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
63392+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
63393 set_fs(oldfs);
63394
63395 err = compat_put_timex(utp, &txc);
63396@@ -717,7 +718,7 @@ long compat_sys_clock_getres(clockid_t w
63397 oldfs = get_fs();
63398 set_fs(KERNEL_DS);
63399 err = sys_clock_getres(which_clock,
63400- (struct timespec __user *) &ts);
63401+ (struct timespec __force_user *) &ts);
63402 set_fs(oldfs);
63403 if (!err && tp && put_compat_timespec(&ts, tp))
63404 return -EFAULT;
63405@@ -729,9 +730,9 @@ static long compat_clock_nanosleep_resta
63406 long err;
63407 mm_segment_t oldfs;
63408 struct timespec tu;
63409- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
63410+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
63411
63412- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
63413+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
63414 oldfs = get_fs();
63415 set_fs(KERNEL_DS);
63416 err = clock_nanosleep_restart(restart);
63417@@ -763,8 +764,8 @@ long compat_sys_clock_nanosleep(clockid_
63418 oldfs = get_fs();
63419 set_fs(KERNEL_DS);
63420 err = sys_clock_nanosleep(which_clock, flags,
63421- (struct timespec __user *) &in,
63422- (struct timespec __user *) &out);
63423+ (struct timespec __force_user *) &in,
63424+ (struct timespec __force_user *) &out);
63425 set_fs(oldfs);
63426
63427 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
63428diff -urNp linux-3.1.4/kernel/configs.c linux-3.1.4/kernel/configs.c
63429--- linux-3.1.4/kernel/configs.c 2011-11-11 15:19:27.000000000 -0500
63430+++ linux-3.1.4/kernel/configs.c 2011-11-16 18:40:44.000000000 -0500
63431@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
63432 struct proc_dir_entry *entry;
63433
63434 /* create the current config file */
63435+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
63436+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
63437+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
63438+ &ikconfig_file_ops);
63439+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63440+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
63441+ &ikconfig_file_ops);
63442+#endif
63443+#else
63444 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
63445 &ikconfig_file_ops);
63446+#endif
63447+
63448 if (!entry)
63449 return -ENOMEM;
63450
63451diff -urNp linux-3.1.4/kernel/cred.c linux-3.1.4/kernel/cred.c
63452--- linux-3.1.4/kernel/cred.c 2011-11-11 15:19:27.000000000 -0500
63453+++ linux-3.1.4/kernel/cred.c 2011-11-16 18:40:44.000000000 -0500
63454@@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
63455 */
63456 void __put_cred(struct cred *cred)
63457 {
63458+ pax_track_stack();
63459+
63460 kdebug("__put_cred(%p{%d,%d})", cred,
63461 atomic_read(&cred->usage),
63462 read_cred_subscribers(cred));
63463@@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
63464 {
63465 struct cred *cred;
63466
63467+ pax_track_stack();
63468+
63469 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
63470 atomic_read(&tsk->cred->usage),
63471 read_cred_subscribers(tsk->cred));
63472@@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
63473 {
63474 const struct cred *cred;
63475
63476+ pax_track_stack();
63477+
63478 rcu_read_lock();
63479
63480 do {
63481@@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
63482 {
63483 struct cred *new;
63484
63485+ pax_track_stack();
63486+
63487 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
63488 if (!new)
63489 return NULL;
63490@@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
63491 const struct cred *old;
63492 struct cred *new;
63493
63494+ pax_track_stack();
63495+
63496 validate_process_creds();
63497
63498 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
63499@@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
63500 struct thread_group_cred *tgcred = NULL;
63501 struct cred *new;
63502
63503+ pax_track_stack();
63504+
63505 #ifdef CONFIG_KEYS
63506 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
63507 if (!tgcred)
63508@@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
63509 struct cred *new;
63510 int ret;
63511
63512+ pax_track_stack();
63513+
63514 if (
63515 #ifdef CONFIG_KEYS
63516 !p->cred->thread_keyring &&
63517@@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
63518 struct task_struct *task = current;
63519 const struct cred *old = task->real_cred;
63520
63521+ pax_track_stack();
63522+
63523 kdebug("commit_creds(%p{%d,%d})", new,
63524 atomic_read(&new->usage),
63525 read_cred_subscribers(new));
63526@@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
63527
63528 get_cred(new); /* we will require a ref for the subj creds too */
63529
63530+ gr_set_role_label(task, new->uid, new->gid);
63531+
63532 /* dumpability changes */
63533 if (old->euid != new->euid ||
63534 old->egid != new->egid ||
63535@@ -549,6 +567,8 @@ EXPORT_SYMBOL(commit_creds);
63536 */
63537 void abort_creds(struct cred *new)
63538 {
63539+ pax_track_stack();
63540+
63541 kdebug("abort_creds(%p{%d,%d})", new,
63542 atomic_read(&new->usage),
63543 read_cred_subscribers(new));
63544@@ -572,6 +592,8 @@ const struct cred *override_creds(const
63545 {
63546 const struct cred *old = current->cred;
63547
63548+ pax_track_stack();
63549+
63550 kdebug("override_creds(%p{%d,%d})", new,
63551 atomic_read(&new->usage),
63552 read_cred_subscribers(new));
63553@@ -601,6 +623,8 @@ void revert_creds(const struct cred *old
63554 {
63555 const struct cred *override = current->cred;
63556
63557+ pax_track_stack();
63558+
63559 kdebug("revert_creds(%p{%d,%d})", old,
63560 atomic_read(&old->usage),
63561 read_cred_subscribers(old));
63562@@ -647,6 +671,8 @@ struct cred *prepare_kernel_cred(struct
63563 const struct cred *old;
63564 struct cred *new;
63565
63566+ pax_track_stack();
63567+
63568 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
63569 if (!new)
63570 return NULL;
63571@@ -701,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
63572 */
63573 int set_security_override(struct cred *new, u32 secid)
63574 {
63575+ pax_track_stack();
63576+
63577 return security_kernel_act_as(new, secid);
63578 }
63579 EXPORT_SYMBOL(set_security_override);
63580@@ -720,6 +748,8 @@ int set_security_override_from_ctx(struc
63581 u32 secid;
63582 int ret;
63583
63584+ pax_track_stack();
63585+
63586 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
63587 if (ret < 0)
63588 return ret;
63589diff -urNp linux-3.1.4/kernel/debug/debug_core.c linux-3.1.4/kernel/debug/debug_core.c
63590--- linux-3.1.4/kernel/debug/debug_core.c 2011-11-11 15:19:27.000000000 -0500
63591+++ linux-3.1.4/kernel/debug/debug_core.c 2011-11-16 18:39:08.000000000 -0500
63592@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
63593 */
63594 static atomic_t masters_in_kgdb;
63595 static atomic_t slaves_in_kgdb;
63596-static atomic_t kgdb_break_tasklet_var;
63597+static atomic_unchecked_t kgdb_break_tasklet_var;
63598 atomic_t kgdb_setting_breakpoint;
63599
63600 struct task_struct *kgdb_usethread;
63601@@ -129,7 +129,7 @@ int kgdb_single_step;
63602 static pid_t kgdb_sstep_pid;
63603
63604 /* to keep track of the CPU which is doing the single stepping*/
63605-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63606+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63607
63608 /*
63609 * If you are debugging a problem where roundup (the collection of
63610@@ -542,7 +542,7 @@ return_normal:
63611 * kernel will only try for the value of sstep_tries before
63612 * giving up and continuing on.
63613 */
63614- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
63615+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
63616 (kgdb_info[cpu].task &&
63617 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
63618 atomic_set(&kgdb_active, -1);
63619@@ -636,8 +636,8 @@ cpu_master_loop:
63620 }
63621
63622 kgdb_restore:
63623- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
63624- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
63625+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
63626+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
63627 if (kgdb_info[sstep_cpu].task)
63628 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
63629 else
63630@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
63631 static void kgdb_tasklet_bpt(unsigned long ing)
63632 {
63633 kgdb_breakpoint();
63634- atomic_set(&kgdb_break_tasklet_var, 0);
63635+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
63636 }
63637
63638 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
63639
63640 void kgdb_schedule_breakpoint(void)
63641 {
63642- if (atomic_read(&kgdb_break_tasklet_var) ||
63643+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
63644 atomic_read(&kgdb_active) != -1 ||
63645 atomic_read(&kgdb_setting_breakpoint))
63646 return;
63647- atomic_inc(&kgdb_break_tasklet_var);
63648+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
63649 tasklet_schedule(&kgdb_tasklet_breakpoint);
63650 }
63651 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
63652diff -urNp linux-3.1.4/kernel/debug/kdb/kdb_main.c linux-3.1.4/kernel/debug/kdb/kdb_main.c
63653--- linux-3.1.4/kernel/debug/kdb/kdb_main.c 2011-11-11 15:19:27.000000000 -0500
63654+++ linux-3.1.4/kernel/debug/kdb/kdb_main.c 2011-11-16 18:39:08.000000000 -0500
63655@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
63656 list_for_each_entry(mod, kdb_modules, list) {
63657
63658 kdb_printf("%-20s%8u 0x%p ", mod->name,
63659- mod->core_size, (void *)mod);
63660+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
63661 #ifdef CONFIG_MODULE_UNLOAD
63662 kdb_printf("%4d ", module_refcount(mod));
63663 #endif
63664@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
63665 kdb_printf(" (Loading)");
63666 else
63667 kdb_printf(" (Live)");
63668- kdb_printf(" 0x%p", mod->module_core);
63669+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
63670
63671 #ifdef CONFIG_MODULE_UNLOAD
63672 {
63673diff -urNp linux-3.1.4/kernel/events/core.c linux-3.1.4/kernel/events/core.c
63674--- linux-3.1.4/kernel/events/core.c 2011-11-11 15:19:27.000000000 -0500
63675+++ linux-3.1.4/kernel/events/core.c 2011-11-16 18:39:08.000000000 -0500
63676@@ -172,7 +172,7 @@ int perf_proc_update_handler(struct ctl_
63677 return 0;
63678 }
63679
63680-static atomic64_t perf_event_id;
63681+static atomic64_unchecked_t perf_event_id;
63682
63683 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
63684 enum event_type_t event_type);
63685@@ -2535,7 +2535,7 @@ static void __perf_event_read(void *info
63686
63687 static inline u64 perf_event_count(struct perf_event *event)
63688 {
63689- return local64_read(&event->count) + atomic64_read(&event->child_count);
63690+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
63691 }
63692
63693 static u64 perf_event_read(struct perf_event *event)
63694@@ -3060,9 +3060,9 @@ u64 perf_event_read_value(struct perf_ev
63695 mutex_lock(&event->child_mutex);
63696 total += perf_event_read(event);
63697 *enabled += event->total_time_enabled +
63698- atomic64_read(&event->child_total_time_enabled);
63699+ atomic64_read_unchecked(&event->child_total_time_enabled);
63700 *running += event->total_time_running +
63701- atomic64_read(&event->child_total_time_running);
63702+ atomic64_read_unchecked(&event->child_total_time_running);
63703
63704 list_for_each_entry(child, &event->child_list, child_list) {
63705 total += perf_event_read(child);
63706@@ -3448,10 +3448,10 @@ void perf_event_update_userpage(struct p
63707 userpg->offset -= local64_read(&event->hw.prev_count);
63708
63709 userpg->time_enabled = enabled +
63710- atomic64_read(&event->child_total_time_enabled);
63711+ atomic64_read_unchecked(&event->child_total_time_enabled);
63712
63713 userpg->time_running = running +
63714- atomic64_read(&event->child_total_time_running);
63715+ atomic64_read_unchecked(&event->child_total_time_running);
63716
63717 barrier();
63718 ++userpg->lock;
63719@@ -3822,11 +3822,11 @@ static void perf_output_read_one(struct
63720 values[n++] = perf_event_count(event);
63721 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
63722 values[n++] = enabled +
63723- atomic64_read(&event->child_total_time_enabled);
63724+ atomic64_read_unchecked(&event->child_total_time_enabled);
63725 }
63726 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
63727 values[n++] = running +
63728- atomic64_read(&event->child_total_time_running);
63729+ atomic64_read_unchecked(&event->child_total_time_running);
63730 }
63731 if (read_format & PERF_FORMAT_ID)
63732 values[n++] = primary_event_id(event);
63733@@ -4477,12 +4477,12 @@ static void perf_event_mmap_event(struct
63734 * need to add enough zero bytes after the string to handle
63735 * the 64bit alignment we do later.
63736 */
63737- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
63738+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
63739 if (!buf) {
63740 name = strncpy(tmp, "//enomem", sizeof(tmp));
63741 goto got_name;
63742 }
63743- name = d_path(&file->f_path, buf, PATH_MAX);
63744+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
63745 if (IS_ERR(name)) {
63746 name = strncpy(tmp, "//toolong", sizeof(tmp));
63747 goto got_name;
63748@@ -5833,7 +5833,7 @@ perf_event_alloc(struct perf_event_attr
63749 event->parent = parent_event;
63750
63751 event->ns = get_pid_ns(current->nsproxy->pid_ns);
63752- event->id = atomic64_inc_return(&perf_event_id);
63753+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
63754
63755 event->state = PERF_EVENT_STATE_INACTIVE;
63756
63757@@ -6355,10 +6355,10 @@ static void sync_child_event(struct perf
63758 /*
63759 * Add back the child's count to the parent's count:
63760 */
63761- atomic64_add(child_val, &parent_event->child_count);
63762- atomic64_add(child_event->total_time_enabled,
63763+ atomic64_add_unchecked(child_val, &parent_event->child_count);
63764+ atomic64_add_unchecked(child_event->total_time_enabled,
63765 &parent_event->child_total_time_enabled);
63766- atomic64_add(child_event->total_time_running,
63767+ atomic64_add_unchecked(child_event->total_time_running,
63768 &parent_event->child_total_time_running);
63769
63770 /*
63771diff -urNp linux-3.1.4/kernel/exit.c linux-3.1.4/kernel/exit.c
63772--- linux-3.1.4/kernel/exit.c 2011-11-11 15:19:27.000000000 -0500
63773+++ linux-3.1.4/kernel/exit.c 2011-11-16 19:33:48.000000000 -0500
63774@@ -57,6 +57,10 @@
63775 #include <asm/pgtable.h>
63776 #include <asm/mmu_context.h>
63777
63778+#ifdef CONFIG_GRKERNSEC
63779+extern rwlock_t grsec_exec_file_lock;
63780+#endif
63781+
63782 static void exit_mm(struct task_struct * tsk);
63783
63784 static void __unhash_process(struct task_struct *p, bool group_dead)
63785@@ -168,6 +172,10 @@ void release_task(struct task_struct * p
63786 struct task_struct *leader;
63787 int zap_leader;
63788 repeat:
63789+#ifdef CONFIG_NET
63790+ gr_del_task_from_ip_table(p);
63791+#endif
63792+
63793 /* don't need to get the RCU readlock here - the process is dead and
63794 * can't be modifying its own credentials. But shut RCU-lockdep up */
63795 rcu_read_lock();
63796@@ -324,11 +332,22 @@ static void reparent_to_kthreadd(void)
63797 {
63798 write_lock_irq(&tasklist_lock);
63799
63800+#ifdef CONFIG_GRKERNSEC
63801+ write_lock(&grsec_exec_file_lock);
63802+ if (current->exec_file) {
63803+ fput(current->exec_file);
63804+ current->exec_file = NULL;
63805+ }
63806+ write_unlock(&grsec_exec_file_lock);
63807+#endif
63808+
63809 ptrace_unlink(current);
63810 /* Reparent to init */
63811 current->real_parent = current->parent = kthreadd_task;
63812 list_move_tail(&current->sibling, &current->real_parent->children);
63813
63814+ gr_set_kernel_label(current);
63815+
63816 /* Set the exit signal to SIGCHLD so we signal init on exit */
63817 current->exit_signal = SIGCHLD;
63818
63819@@ -380,7 +399,7 @@ int allow_signal(int sig)
63820 * know it'll be handled, so that they don't get converted to
63821 * SIGKILL or just silently dropped.
63822 */
63823- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
63824+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
63825 recalc_sigpending();
63826 spin_unlock_irq(&current->sighand->siglock);
63827 return 0;
63828@@ -416,6 +435,17 @@ void daemonize(const char *name, ...)
63829 vsnprintf(current->comm, sizeof(current->comm), name, args);
63830 va_end(args);
63831
63832+#ifdef CONFIG_GRKERNSEC
63833+ write_lock(&grsec_exec_file_lock);
63834+ if (current->exec_file) {
63835+ fput(current->exec_file);
63836+ current->exec_file = NULL;
63837+ }
63838+ write_unlock(&grsec_exec_file_lock);
63839+#endif
63840+
63841+ gr_set_kernel_label(current);
63842+
63843 /*
63844 * If we were started as result of loading a module, close all of the
63845 * user space pages. We don't need them, and if we didn't close them
63846@@ -895,6 +925,8 @@ NORET_TYPE void do_exit(long code)
63847 struct task_struct *tsk = current;
63848 int group_dead;
63849
63850+ set_fs(USER_DS);
63851+
63852 profile_task_exit(tsk);
63853
63854 WARN_ON(blk_needs_flush_plug(tsk));
63855@@ -911,7 +943,6 @@ NORET_TYPE void do_exit(long code)
63856 * mm_release()->clear_child_tid() from writing to a user-controlled
63857 * kernel address.
63858 */
63859- set_fs(USER_DS);
63860
63861 ptrace_event(PTRACE_EVENT_EXIT, code);
63862
63863@@ -973,6 +1004,9 @@ NORET_TYPE void do_exit(long code)
63864 tsk->exit_code = code;
63865 taskstats_exit(tsk, group_dead);
63866
63867+ gr_acl_handle_psacct(tsk, code);
63868+ gr_acl_handle_exit();
63869+
63870 exit_mm(tsk);
63871
63872 if (group_dead)
63873diff -urNp linux-3.1.4/kernel/fork.c linux-3.1.4/kernel/fork.c
63874--- linux-3.1.4/kernel/fork.c 2011-11-11 15:19:27.000000000 -0500
63875+++ linux-3.1.4/kernel/fork.c 2011-11-16 19:36:31.000000000 -0500
63876@@ -285,7 +285,7 @@ static struct task_struct *dup_task_stru
63877 *stackend = STACK_END_MAGIC; /* for overflow detection */
63878
63879 #ifdef CONFIG_CC_STACKPROTECTOR
63880- tsk->stack_canary = get_random_int();
63881+ tsk->stack_canary = pax_get_random_long();
63882 #endif
63883
63884 /*
63885@@ -309,13 +309,77 @@ out:
63886 }
63887
63888 #ifdef CONFIG_MMU
63889+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
63890+{
63891+ struct vm_area_struct *tmp;
63892+ unsigned long charge;
63893+ struct mempolicy *pol;
63894+ struct file *file;
63895+
63896+ charge = 0;
63897+ if (mpnt->vm_flags & VM_ACCOUNT) {
63898+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63899+ if (security_vm_enough_memory(len))
63900+ goto fail_nomem;
63901+ charge = len;
63902+ }
63903+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63904+ if (!tmp)
63905+ goto fail_nomem;
63906+ *tmp = *mpnt;
63907+ tmp->vm_mm = mm;
63908+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
63909+ pol = mpol_dup(vma_policy(mpnt));
63910+ if (IS_ERR(pol))
63911+ goto fail_nomem_policy;
63912+ vma_set_policy(tmp, pol);
63913+ if (anon_vma_fork(tmp, mpnt))
63914+ goto fail_nomem_anon_vma_fork;
63915+ tmp->vm_flags &= ~VM_LOCKED;
63916+ tmp->vm_next = tmp->vm_prev = NULL;
63917+ tmp->vm_mirror = NULL;
63918+ file = tmp->vm_file;
63919+ if (file) {
63920+ struct inode *inode = file->f_path.dentry->d_inode;
63921+ struct address_space *mapping = file->f_mapping;
63922+
63923+ get_file(file);
63924+ if (tmp->vm_flags & VM_DENYWRITE)
63925+ atomic_dec(&inode->i_writecount);
63926+ mutex_lock(&mapping->i_mmap_mutex);
63927+ if (tmp->vm_flags & VM_SHARED)
63928+ mapping->i_mmap_writable++;
63929+ flush_dcache_mmap_lock(mapping);
63930+ /* insert tmp into the share list, just after mpnt */
63931+ vma_prio_tree_add(tmp, mpnt);
63932+ flush_dcache_mmap_unlock(mapping);
63933+ mutex_unlock(&mapping->i_mmap_mutex);
63934+ }
63935+
63936+ /*
63937+ * Clear hugetlb-related page reserves for children. This only
63938+ * affects MAP_PRIVATE mappings. Faults generated by the child
63939+ * are not guaranteed to succeed, even if read-only
63940+ */
63941+ if (is_vm_hugetlb_page(tmp))
63942+ reset_vma_resv_huge_pages(tmp);
63943+
63944+ return tmp;
63945+
63946+fail_nomem_anon_vma_fork:
63947+ mpol_put(pol);
63948+fail_nomem_policy:
63949+ kmem_cache_free(vm_area_cachep, tmp);
63950+fail_nomem:
63951+ vm_unacct_memory(charge);
63952+ return NULL;
63953+}
63954+
63955 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63956 {
63957 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
63958 struct rb_node **rb_link, *rb_parent;
63959 int retval;
63960- unsigned long charge;
63961- struct mempolicy *pol;
63962
63963 down_write(&oldmm->mmap_sem);
63964 flush_cache_dup_mm(oldmm);
63965@@ -327,8 +391,8 @@ static int dup_mmap(struct mm_struct *mm
63966 mm->locked_vm = 0;
63967 mm->mmap = NULL;
63968 mm->mmap_cache = NULL;
63969- mm->free_area_cache = oldmm->mmap_base;
63970- mm->cached_hole_size = ~0UL;
63971+ mm->free_area_cache = oldmm->free_area_cache;
63972+ mm->cached_hole_size = oldmm->cached_hole_size;
63973 mm->map_count = 0;
63974 cpumask_clear(mm_cpumask(mm));
63975 mm->mm_rb = RB_ROOT;
63976@@ -344,8 +408,6 @@ static int dup_mmap(struct mm_struct *mm
63977
63978 prev = NULL;
63979 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
63980- struct file *file;
63981-
63982 if (mpnt->vm_flags & VM_DONTCOPY) {
63983 long pages = vma_pages(mpnt);
63984 mm->total_vm -= pages;
63985@@ -353,55 +415,13 @@ static int dup_mmap(struct mm_struct *mm
63986 -pages);
63987 continue;
63988 }
63989- charge = 0;
63990- if (mpnt->vm_flags & VM_ACCOUNT) {
63991- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63992- if (security_vm_enough_memory(len))
63993- goto fail_nomem;
63994- charge = len;
63995- }
63996- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63997- if (!tmp)
63998- goto fail_nomem;
63999- *tmp = *mpnt;
64000- INIT_LIST_HEAD(&tmp->anon_vma_chain);
64001- pol = mpol_dup(vma_policy(mpnt));
64002- retval = PTR_ERR(pol);
64003- if (IS_ERR(pol))
64004- goto fail_nomem_policy;
64005- vma_set_policy(tmp, pol);
64006- tmp->vm_mm = mm;
64007- if (anon_vma_fork(tmp, mpnt))
64008- goto fail_nomem_anon_vma_fork;
64009- tmp->vm_flags &= ~VM_LOCKED;
64010- tmp->vm_next = tmp->vm_prev = NULL;
64011- file = tmp->vm_file;
64012- if (file) {
64013- struct inode *inode = file->f_path.dentry->d_inode;
64014- struct address_space *mapping = file->f_mapping;
64015-
64016- get_file(file);
64017- if (tmp->vm_flags & VM_DENYWRITE)
64018- atomic_dec(&inode->i_writecount);
64019- mutex_lock(&mapping->i_mmap_mutex);
64020- if (tmp->vm_flags & VM_SHARED)
64021- mapping->i_mmap_writable++;
64022- flush_dcache_mmap_lock(mapping);
64023- /* insert tmp into the share list, just after mpnt */
64024- vma_prio_tree_add(tmp, mpnt);
64025- flush_dcache_mmap_unlock(mapping);
64026- mutex_unlock(&mapping->i_mmap_mutex);
64027+ tmp = dup_vma(mm, mpnt);
64028+ if (!tmp) {
64029+ retval = -ENOMEM;
64030+ goto out;
64031 }
64032
64033 /*
64034- * Clear hugetlb-related page reserves for children. This only
64035- * affects MAP_PRIVATE mappings. Faults generated by the child
64036- * are not guaranteed to succeed, even if read-only
64037- */
64038- if (is_vm_hugetlb_page(tmp))
64039- reset_vma_resv_huge_pages(tmp);
64040-
64041- /*
64042 * Link in the new vma and copy the page table entries.
64043 */
64044 *pprev = tmp;
64045@@ -422,6 +442,31 @@ static int dup_mmap(struct mm_struct *mm
64046 if (retval)
64047 goto out;
64048 }
64049+
64050+#ifdef CONFIG_PAX_SEGMEXEC
64051+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
64052+ struct vm_area_struct *mpnt_m;
64053+
64054+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
64055+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
64056+
64057+ if (!mpnt->vm_mirror)
64058+ continue;
64059+
64060+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
64061+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
64062+ mpnt->vm_mirror = mpnt_m;
64063+ } else {
64064+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
64065+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
64066+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
64067+ mpnt->vm_mirror->vm_mirror = mpnt;
64068+ }
64069+ }
64070+ BUG_ON(mpnt_m);
64071+ }
64072+#endif
64073+
64074 /* a new mm has just been created */
64075 arch_dup_mmap(oldmm, mm);
64076 retval = 0;
64077@@ -430,14 +475,6 @@ out:
64078 flush_tlb_mm(oldmm);
64079 up_write(&oldmm->mmap_sem);
64080 return retval;
64081-fail_nomem_anon_vma_fork:
64082- mpol_put(pol);
64083-fail_nomem_policy:
64084- kmem_cache_free(vm_area_cachep, tmp);
64085-fail_nomem:
64086- retval = -ENOMEM;
64087- vm_unacct_memory(charge);
64088- goto out;
64089 }
64090
64091 static inline int mm_alloc_pgd(struct mm_struct *mm)
64092@@ -837,13 +874,14 @@ static int copy_fs(unsigned long clone_f
64093 spin_unlock(&fs->lock);
64094 return -EAGAIN;
64095 }
64096- fs->users++;
64097+ atomic_inc(&fs->users);
64098 spin_unlock(&fs->lock);
64099 return 0;
64100 }
64101 tsk->fs = copy_fs_struct(fs);
64102 if (!tsk->fs)
64103 return -ENOMEM;
64104+ gr_set_chroot_entries(tsk, &tsk->fs->root);
64105 return 0;
64106 }
64107
64108@@ -1105,6 +1143,9 @@ static struct task_struct *copy_process(
64109 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
64110 #endif
64111 retval = -EAGAIN;
64112+
64113+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
64114+
64115 if (atomic_read(&p->real_cred->user->processes) >=
64116 task_rlimit(p, RLIMIT_NPROC)) {
64117 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
64118@@ -1264,6 +1305,8 @@ static struct task_struct *copy_process(
64119 if (clone_flags & CLONE_THREAD)
64120 p->tgid = current->tgid;
64121
64122+ gr_copy_label(p);
64123+
64124 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
64125 /*
64126 * Clear TID on mm_release()?
64127@@ -1428,6 +1471,8 @@ bad_fork_cleanup_count:
64128 bad_fork_free:
64129 free_task(p);
64130 fork_out:
64131+ gr_log_forkfail(retval);
64132+
64133 return ERR_PTR(retval);
64134 }
64135
64136@@ -1528,6 +1573,8 @@ long do_fork(unsigned long clone_flags,
64137 if (clone_flags & CLONE_PARENT_SETTID)
64138 put_user(nr, parent_tidptr);
64139
64140+ gr_handle_brute_check();
64141+
64142 if (clone_flags & CLONE_VFORK) {
64143 p->vfork_done = &vfork;
64144 init_completion(&vfork);
64145@@ -1637,7 +1684,7 @@ static int unshare_fs(unsigned long unsh
64146 return 0;
64147
64148 /* don't need lock here; in the worst case we'll do useless copy */
64149- if (fs->users == 1)
64150+ if (atomic_read(&fs->users) == 1)
64151 return 0;
64152
64153 *new_fsp = copy_fs_struct(fs);
64154@@ -1726,7 +1773,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
64155 fs = current->fs;
64156 spin_lock(&fs->lock);
64157 current->fs = new_fs;
64158- if (--fs->users)
64159+ gr_set_chroot_entries(current, &current->fs->root);
64160+ if (atomic_dec_return(&fs->users))
64161 new_fs = NULL;
64162 else
64163 new_fs = fs;
64164diff -urNp linux-3.1.4/kernel/futex.c linux-3.1.4/kernel/futex.c
64165--- linux-3.1.4/kernel/futex.c 2011-11-11 15:19:27.000000000 -0500
64166+++ linux-3.1.4/kernel/futex.c 2011-11-16 18:40:44.000000000 -0500
64167@@ -54,6 +54,7 @@
64168 #include <linux/mount.h>
64169 #include <linux/pagemap.h>
64170 #include <linux/syscalls.h>
64171+#include <linux/ptrace.h>
64172 #include <linux/signal.h>
64173 #include <linux/module.h>
64174 #include <linux/magic.h>
64175@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fsh
64176 struct page *page, *page_head;
64177 int err, ro = 0;
64178
64179+#ifdef CONFIG_PAX_SEGMEXEC
64180+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
64181+ return -EFAULT;
64182+#endif
64183+
64184 /*
64185 * The futex address must be "naturally" aligned.
64186 */
64187@@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr,
64188 struct futex_q q = futex_q_init;
64189 int ret;
64190
64191+ pax_track_stack();
64192+
64193 if (!bitset)
64194 return -EINVAL;
64195 q.bitset = bitset;
64196@@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __u
64197 struct futex_q q = futex_q_init;
64198 int res, ret;
64199
64200+ pax_track_stack();
64201+
64202 if (!bitset)
64203 return -EINVAL;
64204
64205@@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
64206 {
64207 struct robust_list_head __user *head;
64208 unsigned long ret;
64209+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
64210 const struct cred *cred = current_cred(), *pcred;
64211+#endif
64212
64213 if (!futex_cmpxchg_enabled)
64214 return -ENOSYS;
64215@@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
64216 if (!p)
64217 goto err_unlock;
64218 ret = -EPERM;
64219+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64220+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
64221+ goto err_unlock;
64222+#else
64223 pcred = __task_cred(p);
64224 /* If victim is in different user_ns, then uids are not
64225 comparable, so we must have CAP_SYS_PTRACE */
64226@@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
64227 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
64228 goto err_unlock;
64229 ok:
64230+#endif
64231 head = p->robust_list;
64232 rcu_read_unlock();
64233 }
64234@@ -2712,6 +2729,7 @@ static int __init futex_init(void)
64235 {
64236 u32 curval;
64237 int i;
64238+ mm_segment_t oldfs;
64239
64240 /*
64241 * This will fail and we want it. Some arch implementations do
64242@@ -2723,8 +2741,11 @@ static int __init futex_init(void)
64243 * implementation, the non-functional ones will return
64244 * -ENOSYS.
64245 */
64246+ oldfs = get_fs();
64247+ set_fs(USER_DS);
64248 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
64249 futex_cmpxchg_enabled = 1;
64250+ set_fs(oldfs);
64251
64252 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
64253 plist_head_init(&futex_queues[i].chain);
64254diff -urNp linux-3.1.4/kernel/futex_compat.c linux-3.1.4/kernel/futex_compat.c
64255--- linux-3.1.4/kernel/futex_compat.c 2011-11-11 15:19:27.000000000 -0500
64256+++ linux-3.1.4/kernel/futex_compat.c 2011-11-16 18:40:44.000000000 -0500
64257@@ -10,6 +10,7 @@
64258 #include <linux/compat.h>
64259 #include <linux/nsproxy.h>
64260 #include <linux/futex.h>
64261+#include <linux/ptrace.h>
64262
64263 #include <asm/uaccess.h>
64264
64265@@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
64266 {
64267 struct compat_robust_list_head __user *head;
64268 unsigned long ret;
64269- const struct cred *cred = current_cred(), *pcred;
64270+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
64271+ const struct cred *cred = current_cred();
64272+ const struct cred *pcred;
64273+#endif
64274
64275 if (!futex_cmpxchg_enabled)
64276 return -ENOSYS;
64277@@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
64278 if (!p)
64279 goto err_unlock;
64280 ret = -EPERM;
64281+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64282+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
64283+ goto err_unlock;
64284+#else
64285 pcred = __task_cred(p);
64286 /* If victim is in different user_ns, then uids are not
64287 comparable, so we must have CAP_SYS_PTRACE */
64288@@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
64289 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
64290 goto err_unlock;
64291 ok:
64292+#endif
64293 head = p->compat_robust_list;
64294 rcu_read_unlock();
64295 }
64296diff -urNp linux-3.1.4/kernel/gcov/base.c linux-3.1.4/kernel/gcov/base.c
64297--- linux-3.1.4/kernel/gcov/base.c 2011-11-11 15:19:27.000000000 -0500
64298+++ linux-3.1.4/kernel/gcov/base.c 2011-11-16 18:39:08.000000000 -0500
64299@@ -102,11 +102,6 @@ void gcov_enable_events(void)
64300 }
64301
64302 #ifdef CONFIG_MODULES
64303-static inline int within(void *addr, void *start, unsigned long size)
64304-{
64305- return ((addr >= start) && (addr < start + size));
64306-}
64307-
64308 /* Update list and generate events when modules are unloaded. */
64309 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
64310 void *data)
64311@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
64312 prev = NULL;
64313 /* Remove entries located in module from linked list. */
64314 for (info = gcov_info_head; info; info = info->next) {
64315- if (within(info, mod->module_core, mod->core_size)) {
64316+ if (within_module_core_rw((unsigned long)info, mod)) {
64317 if (prev)
64318 prev->next = info->next;
64319 else
64320diff -urNp linux-3.1.4/kernel/hrtimer.c linux-3.1.4/kernel/hrtimer.c
64321--- linux-3.1.4/kernel/hrtimer.c 2011-11-11 15:19:27.000000000 -0500
64322+++ linux-3.1.4/kernel/hrtimer.c 2011-11-16 18:39:08.000000000 -0500
64323@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
64324 local_irq_restore(flags);
64325 }
64326
64327-static void run_hrtimer_softirq(struct softirq_action *h)
64328+static void run_hrtimer_softirq(void)
64329 {
64330 hrtimer_peek_ahead_timers();
64331 }
64332diff -urNp linux-3.1.4/kernel/jump_label.c linux-3.1.4/kernel/jump_label.c
64333--- linux-3.1.4/kernel/jump_label.c 2011-11-11 15:19:27.000000000 -0500
64334+++ linux-3.1.4/kernel/jump_label.c 2011-11-16 18:39:08.000000000 -0500
64335@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entr
64336
64337 size = (((unsigned long)stop - (unsigned long)start)
64338 / sizeof(struct jump_entry));
64339+ pax_open_kernel();
64340 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
64341+ pax_close_kernel();
64342 }
64343
64344 static void jump_label_update(struct jump_label_key *key, int enable);
64345@@ -297,10 +299,12 @@ static void jump_label_invalidate_module
64346 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
64347 struct jump_entry *iter;
64348
64349+ pax_open_kernel();
64350 for (iter = iter_start; iter < iter_stop; iter++) {
64351 if (within_module_init(iter->code, mod))
64352 iter->code = 0;
64353 }
64354+ pax_close_kernel();
64355 }
64356
64357 static int
64358diff -urNp linux-3.1.4/kernel/kallsyms.c linux-3.1.4/kernel/kallsyms.c
64359--- linux-3.1.4/kernel/kallsyms.c 2011-11-11 15:19:27.000000000 -0500
64360+++ linux-3.1.4/kernel/kallsyms.c 2011-11-16 18:40:44.000000000 -0500
64361@@ -11,6 +11,9 @@
64362 * Changed the compression method from stem compression to "table lookup"
64363 * compression (see scripts/kallsyms.c for a more complete description)
64364 */
64365+#ifdef CONFIG_GRKERNSEC_HIDESYM
64366+#define __INCLUDED_BY_HIDESYM 1
64367+#endif
64368 #include <linux/kallsyms.h>
64369 #include <linux/module.h>
64370 #include <linux/init.h>
64371@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
64372
64373 static inline int is_kernel_inittext(unsigned long addr)
64374 {
64375+ if (system_state != SYSTEM_BOOTING)
64376+ return 0;
64377+
64378 if (addr >= (unsigned long)_sinittext
64379 && addr <= (unsigned long)_einittext)
64380 return 1;
64381 return 0;
64382 }
64383
64384+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64385+#ifdef CONFIG_MODULES
64386+static inline int is_module_text(unsigned long addr)
64387+{
64388+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
64389+ return 1;
64390+
64391+ addr = ktla_ktva(addr);
64392+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
64393+}
64394+#else
64395+static inline int is_module_text(unsigned long addr)
64396+{
64397+ return 0;
64398+}
64399+#endif
64400+#endif
64401+
64402 static inline int is_kernel_text(unsigned long addr)
64403 {
64404 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
64405@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
64406
64407 static inline int is_kernel(unsigned long addr)
64408 {
64409+
64410+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64411+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
64412+ return 1;
64413+
64414+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
64415+#else
64416 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
64417+#endif
64418+
64419 return 1;
64420 return in_gate_area_no_mm(addr);
64421 }
64422
64423 static int is_ksym_addr(unsigned long addr)
64424 {
64425+
64426+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64427+ if (is_module_text(addr))
64428+ return 0;
64429+#endif
64430+
64431 if (all_var)
64432 return is_kernel(addr);
64433
64434@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
64435
64436 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
64437 {
64438- iter->name[0] = '\0';
64439 iter->nameoff = get_symbol_offset(new_pos);
64440 iter->pos = new_pos;
64441 }
64442@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
64443 {
64444 struct kallsym_iter *iter = m->private;
64445
64446+#ifdef CONFIG_GRKERNSEC_HIDESYM
64447+ if (current_uid())
64448+ return 0;
64449+#endif
64450+
64451 /* Some debugging symbols have no name. Ignore them. */
64452 if (!iter->name[0])
64453 return 0;
64454@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
64455 struct kallsym_iter *iter;
64456 int ret;
64457
64458- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
64459+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
64460 if (!iter)
64461 return -ENOMEM;
64462 reset_iter(iter, 0);
64463diff -urNp linux-3.1.4/kernel/kexec.c linux-3.1.4/kernel/kexec.c
64464--- linux-3.1.4/kernel/kexec.c 2011-11-11 15:19:27.000000000 -0500
64465+++ linux-3.1.4/kernel/kexec.c 2011-11-16 18:39:08.000000000 -0500
64466@@ -1033,7 +1033,8 @@ asmlinkage long compat_sys_kexec_load(un
64467 unsigned long flags)
64468 {
64469 struct compat_kexec_segment in;
64470- struct kexec_segment out, __user *ksegments;
64471+ struct kexec_segment out;
64472+ struct kexec_segment __user *ksegments;
64473 unsigned long i, result;
64474
64475 /* Don't allow clients that don't understand the native
64476diff -urNp linux-3.1.4/kernel/kmod.c linux-3.1.4/kernel/kmod.c
64477--- linux-3.1.4/kernel/kmod.c 2011-11-11 15:19:27.000000000 -0500
64478+++ linux-3.1.4/kernel/kmod.c 2011-11-16 18:40:44.000000000 -0500
64479@@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
64480 * If module auto-loading support is disabled then this function
64481 * becomes a no-operation.
64482 */
64483-int __request_module(bool wait, const char *fmt, ...)
64484+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
64485 {
64486- va_list args;
64487 char module_name[MODULE_NAME_LEN];
64488 unsigned int max_modprobes;
64489 int ret;
64490- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
64491+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
64492 static char *envp[] = { "HOME=/",
64493 "TERM=linux",
64494 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
64495@@ -88,9 +87,7 @@ int __request_module(bool wait, const ch
64496 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
64497 static int kmod_loop_msg;
64498
64499- va_start(args, fmt);
64500- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
64501- va_end(args);
64502+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
64503 if (ret >= MODULE_NAME_LEN)
64504 return -ENAMETOOLONG;
64505
64506@@ -98,6 +95,20 @@ int __request_module(bool wait, const ch
64507 if (ret)
64508 return ret;
64509
64510+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64511+ if (!current_uid()) {
64512+ /* hack to workaround consolekit/udisks stupidity */
64513+ read_lock(&tasklist_lock);
64514+ if (!strcmp(current->comm, "mount") &&
64515+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
64516+ read_unlock(&tasklist_lock);
64517+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
64518+ return -EPERM;
64519+ }
64520+ read_unlock(&tasklist_lock);
64521+ }
64522+#endif
64523+
64524 /* If modprobe needs a service that is in a module, we get a recursive
64525 * loop. Limit the number of running kmod threads to max_threads/2 or
64526 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
64527@@ -133,6 +144,47 @@ int __request_module(bool wait, const ch
64528 atomic_dec(&kmod_concurrent);
64529 return ret;
64530 }
64531+
64532+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
64533+{
64534+ va_list args;
64535+ int ret;
64536+
64537+ va_start(args, fmt);
64538+ ret = ____request_module(wait, module_param, fmt, args);
64539+ va_end(args);
64540+
64541+ return ret;
64542+}
64543+
64544+int __request_module(bool wait, const char *fmt, ...)
64545+{
64546+ va_list args;
64547+ int ret;
64548+
64549+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64550+ if (current_uid()) {
64551+ char module_param[MODULE_NAME_LEN];
64552+
64553+ memset(module_param, 0, sizeof(module_param));
64554+
64555+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
64556+
64557+ va_start(args, fmt);
64558+ ret = ____request_module(wait, module_param, fmt, args);
64559+ va_end(args);
64560+
64561+ return ret;
64562+ }
64563+#endif
64564+
64565+ va_start(args, fmt);
64566+ ret = ____request_module(wait, NULL, fmt, args);
64567+ va_end(args);
64568+
64569+ return ret;
64570+}
64571+
64572 EXPORT_SYMBOL(__request_module);
64573 #endif /* CONFIG_MODULES */
64574
64575@@ -222,7 +274,7 @@ static int wait_for_helper(void *data)
64576 *
64577 * Thus the __user pointer cast is valid here.
64578 */
64579- sys_wait4(pid, (int __user *)&ret, 0, NULL);
64580+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
64581
64582 /*
64583 * If ret is 0, either ____call_usermodehelper failed and the
64584diff -urNp linux-3.1.4/kernel/kprobes.c linux-3.1.4/kernel/kprobes.c
64585--- linux-3.1.4/kernel/kprobes.c 2011-11-11 15:19:27.000000000 -0500
64586+++ linux-3.1.4/kernel/kprobes.c 2011-11-16 18:39:08.000000000 -0500
64587@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
64588 * kernel image and loaded module images reside. This is required
64589 * so x86_64 can correctly handle the %rip-relative fixups.
64590 */
64591- kip->insns = module_alloc(PAGE_SIZE);
64592+ kip->insns = module_alloc_exec(PAGE_SIZE);
64593 if (!kip->insns) {
64594 kfree(kip);
64595 return NULL;
64596@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
64597 */
64598 if (!list_is_singular(&kip->list)) {
64599 list_del(&kip->list);
64600- module_free(NULL, kip->insns);
64601+ module_free_exec(NULL, kip->insns);
64602 kfree(kip);
64603 }
64604 return 1;
64605@@ -1949,7 +1949,7 @@ static int __init init_kprobes(void)
64606 {
64607 int i, err = 0;
64608 unsigned long offset = 0, size = 0;
64609- char *modname, namebuf[128];
64610+ char *modname, namebuf[KSYM_NAME_LEN];
64611 const char *symbol_name;
64612 void *addr;
64613 struct kprobe_blackpoint *kb;
64614@@ -2075,7 +2075,7 @@ static int __kprobes show_kprobe_addr(st
64615 const char *sym = NULL;
64616 unsigned int i = *(loff_t *) v;
64617 unsigned long offset = 0;
64618- char *modname, namebuf[128];
64619+ char *modname, namebuf[KSYM_NAME_LEN];
64620
64621 head = &kprobe_table[i];
64622 preempt_disable();
64623diff -urNp linux-3.1.4/kernel/lockdep.c linux-3.1.4/kernel/lockdep.c
64624--- linux-3.1.4/kernel/lockdep.c 2011-11-11 15:19:27.000000000 -0500
64625+++ linux-3.1.4/kernel/lockdep.c 2011-11-16 18:39:08.000000000 -0500
64626@@ -583,6 +583,10 @@ static int static_obj(void *obj)
64627 end = (unsigned long) &_end,
64628 addr = (unsigned long) obj;
64629
64630+#ifdef CONFIG_PAX_KERNEXEC
64631+ start = ktla_ktva(start);
64632+#endif
64633+
64634 /*
64635 * static variable?
64636 */
64637@@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *
64638 if (!static_obj(lock->key)) {
64639 debug_locks_off();
64640 printk("INFO: trying to register non-static key.\n");
64641+ printk("lock:%pS key:%pS.\n", lock, lock->key);
64642 printk("the code is fine but needs lockdep annotation.\n");
64643 printk("turning off the locking correctness validator.\n");
64644 dump_stack();
64645@@ -2948,7 +2953,7 @@ static int __lock_acquire(struct lockdep
64646 if (!class)
64647 return 0;
64648 }
64649- atomic_inc((atomic_t *)&class->ops);
64650+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
64651 if (very_verbose(class)) {
64652 printk("\nacquire class [%p] %s", class->key, class->name);
64653 if (class->name_version > 1)
64654diff -urNp linux-3.1.4/kernel/lockdep_proc.c linux-3.1.4/kernel/lockdep_proc.c
64655--- linux-3.1.4/kernel/lockdep_proc.c 2011-11-11 15:19:27.000000000 -0500
64656+++ linux-3.1.4/kernel/lockdep_proc.c 2011-11-16 18:39:08.000000000 -0500
64657@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
64658
64659 static void print_name(struct seq_file *m, struct lock_class *class)
64660 {
64661- char str[128];
64662+ char str[KSYM_NAME_LEN];
64663 const char *name = class->name;
64664
64665 if (!name) {
64666diff -urNp linux-3.1.4/kernel/module.c linux-3.1.4/kernel/module.c
64667--- linux-3.1.4/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
64668+++ linux-3.1.4/kernel/module.c 2011-12-02 17:38:47.000000000 -0500
64669@@ -58,6 +58,7 @@
64670 #include <linux/jump_label.h>
64671 #include <linux/pfn.h>
64672 #include <linux/bsearch.h>
64673+#include <linux/grsecurity.h>
64674
64675 #define CREATE_TRACE_POINTS
64676 #include <trace/events/module.h>
64677@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
64678
64679 /* Bounds of module allocation, for speeding __module_address.
64680 * Protected by module_mutex. */
64681-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
64682+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
64683+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
64684
64685 int register_module_notifier(struct notifier_block * nb)
64686 {
64687@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(cons
64688 return true;
64689
64690 list_for_each_entry_rcu(mod, &modules, list) {
64691- struct symsearch arr[] = {
64692+ struct symsearch modarr[] = {
64693 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
64694 NOT_GPL_ONLY, false },
64695 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
64696@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(cons
64697 #endif
64698 };
64699
64700- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
64701+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
64702 return true;
64703 }
64704 return false;
64705@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(
64706 static int percpu_modalloc(struct module *mod,
64707 unsigned long size, unsigned long align)
64708 {
64709- if (align > PAGE_SIZE) {
64710+ if (align-1 >= PAGE_SIZE) {
64711 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
64712 mod->name, align, PAGE_SIZE);
64713 align = PAGE_SIZE;
64714@@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
64715 */
64716 #ifdef CONFIG_SYSFS
64717
64718-#ifdef CONFIG_KALLSYMS
64719+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
64720 static inline bool sect_empty(const Elf_Shdr *sect)
64721 {
64722 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
64723@@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base
64724
64725 static void unset_module_core_ro_nx(struct module *mod)
64726 {
64727- set_page_attributes(mod->module_core + mod->core_text_size,
64728- mod->module_core + mod->core_size,
64729+ set_page_attributes(mod->module_core_rw,
64730+ mod->module_core_rw + mod->core_size_rw,
64731 set_memory_x);
64732- set_page_attributes(mod->module_core,
64733- mod->module_core + mod->core_ro_size,
64734+ set_page_attributes(mod->module_core_rx,
64735+ mod->module_core_rx + mod->core_size_rx,
64736 set_memory_rw);
64737 }
64738
64739 static void unset_module_init_ro_nx(struct module *mod)
64740 {
64741- set_page_attributes(mod->module_init + mod->init_text_size,
64742- mod->module_init + mod->init_size,
64743+ set_page_attributes(mod->module_init_rw,
64744+ mod->module_init_rw + mod->init_size_rw,
64745 set_memory_x);
64746- set_page_attributes(mod->module_init,
64747- mod->module_init + mod->init_ro_size,
64748+ set_page_attributes(mod->module_init_rx,
64749+ mod->module_init_rx + mod->init_size_rx,
64750 set_memory_rw);
64751 }
64752
64753@@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
64754
64755 mutex_lock(&module_mutex);
64756 list_for_each_entry_rcu(mod, &modules, list) {
64757- if ((mod->module_core) && (mod->core_text_size)) {
64758- set_page_attributes(mod->module_core,
64759- mod->module_core + mod->core_text_size,
64760+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
64761+ set_page_attributes(mod->module_core_rx,
64762+ mod->module_core_rx + mod->core_size_rx,
64763 set_memory_rw);
64764 }
64765- if ((mod->module_init) && (mod->init_text_size)) {
64766- set_page_attributes(mod->module_init,
64767- mod->module_init + mod->init_text_size,
64768+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
64769+ set_page_attributes(mod->module_init_rx,
64770+ mod->module_init_rx + mod->init_size_rx,
64771 set_memory_rw);
64772 }
64773 }
64774@@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
64775
64776 mutex_lock(&module_mutex);
64777 list_for_each_entry_rcu(mod, &modules, list) {
64778- if ((mod->module_core) && (mod->core_text_size)) {
64779- set_page_attributes(mod->module_core,
64780- mod->module_core + mod->core_text_size,
64781+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
64782+ set_page_attributes(mod->module_core_rx,
64783+ mod->module_core_rx + mod->core_size_rx,
64784 set_memory_ro);
64785 }
64786- if ((mod->module_init) && (mod->init_text_size)) {
64787- set_page_attributes(mod->module_init,
64788- mod->module_init + mod->init_text_size,
64789+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
64790+ set_page_attributes(mod->module_init_rx,
64791+ mod->module_init_rx + mod->init_size_rx,
64792 set_memory_ro);
64793 }
64794 }
64795@@ -1748,16 +1750,19 @@ static void free_module(struct module *m
64796
64797 /* This may be NULL, but that's OK */
64798 unset_module_init_ro_nx(mod);
64799- module_free(mod, mod->module_init);
64800+ module_free(mod, mod->module_init_rw);
64801+ module_free_exec(mod, mod->module_init_rx);
64802 kfree(mod->args);
64803 percpu_modfree(mod);
64804
64805 /* Free lock-classes: */
64806- lockdep_free_key_range(mod->module_core, mod->core_size);
64807+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
64808+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
64809
64810 /* Finally, free the core (containing the module structure) */
64811 unset_module_core_ro_nx(mod);
64812- module_free(mod, mod->module_core);
64813+ module_free_exec(mod, mod->module_core_rx);
64814+ module_free(mod, mod->module_core_rw);
64815
64816 #ifdef CONFIG_MPU
64817 update_protections(current->mm);
64818@@ -1826,10 +1831,31 @@ static int simplify_symbols(struct modul
64819 unsigned int i;
64820 int ret = 0;
64821 const struct kernel_symbol *ksym;
64822+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64823+ int is_fs_load = 0;
64824+ int register_filesystem_found = 0;
64825+ char *p;
64826+
64827+ p = strstr(mod->args, "grsec_modharden_fs");
64828+ if (p) {
64829+ char *endptr = p + strlen("grsec_modharden_fs");
64830+ /* copy \0 as well */
64831+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
64832+ is_fs_load = 1;
64833+ }
64834+#endif
64835
64836 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
64837 const char *name = info->strtab + sym[i].st_name;
64838
64839+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64840+ /* it's a real shame this will never get ripped and copied
64841+ upstream! ;(
64842+ */
64843+ if (is_fs_load && !strcmp(name, "register_filesystem"))
64844+ register_filesystem_found = 1;
64845+#endif
64846+
64847 switch (sym[i].st_shndx) {
64848 case SHN_COMMON:
64849 /* We compiled with -fno-common. These are not
64850@@ -1850,7 +1876,9 @@ static int simplify_symbols(struct modul
64851 ksym = resolve_symbol_wait(mod, info, name);
64852 /* Ok if resolved. */
64853 if (ksym && !IS_ERR(ksym)) {
64854+ pax_open_kernel();
64855 sym[i].st_value = ksym->value;
64856+ pax_close_kernel();
64857 break;
64858 }
64859
64860@@ -1869,11 +1897,20 @@ static int simplify_symbols(struct modul
64861 secbase = (unsigned long)mod_percpu(mod);
64862 else
64863 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
64864+ pax_open_kernel();
64865 sym[i].st_value += secbase;
64866+ pax_close_kernel();
64867 break;
64868 }
64869 }
64870
64871+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64872+ if (is_fs_load && !register_filesystem_found) {
64873+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
64874+ ret = -EPERM;
64875+ }
64876+#endif
64877+
64878 return ret;
64879 }
64880
64881@@ -1977,22 +2014,12 @@ static void layout_sections(struct modul
64882 || s->sh_entsize != ~0UL
64883 || strstarts(sname, ".init"))
64884 continue;
64885- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
64886+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64887+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
64888+ else
64889+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
64890 DEBUGP("\t%s\n", name);
64891 }
64892- switch (m) {
64893- case 0: /* executable */
64894- mod->core_size = debug_align(mod->core_size);
64895- mod->core_text_size = mod->core_size;
64896- break;
64897- case 1: /* RO: text and ro-data */
64898- mod->core_size = debug_align(mod->core_size);
64899- mod->core_ro_size = mod->core_size;
64900- break;
64901- case 3: /* whole core */
64902- mod->core_size = debug_align(mod->core_size);
64903- break;
64904- }
64905 }
64906
64907 DEBUGP("Init section allocation order:\n");
64908@@ -2006,23 +2033,13 @@ static void layout_sections(struct modul
64909 || s->sh_entsize != ~0UL
64910 || !strstarts(sname, ".init"))
64911 continue;
64912- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
64913- | INIT_OFFSET_MASK);
64914+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64915+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
64916+ else
64917+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
64918+ s->sh_entsize |= INIT_OFFSET_MASK;
64919 DEBUGP("\t%s\n", sname);
64920 }
64921- switch (m) {
64922- case 0: /* executable */
64923- mod->init_size = debug_align(mod->init_size);
64924- mod->init_text_size = mod->init_size;
64925- break;
64926- case 1: /* RO: text and ro-data */
64927- mod->init_size = debug_align(mod->init_size);
64928- mod->init_ro_size = mod->init_size;
64929- break;
64930- case 3: /* whole init */
64931- mod->init_size = debug_align(mod->init_size);
64932- break;
64933- }
64934 }
64935 }
64936
64937@@ -2187,7 +2204,7 @@ static void layout_symtab(struct module
64938
64939 /* Put symbol section at end of init part of module. */
64940 symsect->sh_flags |= SHF_ALLOC;
64941- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
64942+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
64943 info->index.sym) | INIT_OFFSET_MASK;
64944 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
64945
64946@@ -2204,19 +2221,19 @@ static void layout_symtab(struct module
64947 }
64948
64949 /* Append room for core symbols at end of core part. */
64950- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
64951- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
64952+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
64953+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
64954
64955 /* Put string table section at end of init part of module. */
64956 strsect->sh_flags |= SHF_ALLOC;
64957- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
64958+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
64959 info->index.str) | INIT_OFFSET_MASK;
64960 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
64961
64962 /* Append room for core symbols' strings at end of core part. */
64963- info->stroffs = mod->core_size;
64964+ info->stroffs = mod->core_size_rx;
64965 __set_bit(0, info->strmap);
64966- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
64967+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
64968 }
64969
64970 static void add_kallsyms(struct module *mod, const struct load_info *info)
64971@@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *
64972 /* Make sure we get permanent strtab: don't use info->strtab. */
64973 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
64974
64975+ pax_open_kernel();
64976+
64977 /* Set types up while we still have access to sections. */
64978 for (i = 0; i < mod->num_symtab; i++)
64979 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
64980
64981- mod->core_symtab = dst = mod->module_core + info->symoffs;
64982+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
64983 src = mod->symtab;
64984 *dst = *src;
64985 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
64986@@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *
64987 }
64988 mod->core_num_syms = ndst;
64989
64990- mod->core_strtab = s = mod->module_core + info->stroffs;
64991+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
64992 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
64993 if (test_bit(i, info->strmap))
64994 *++s = mod->strtab[i];
64995+
64996+ pax_close_kernel();
64997 }
64998 #else
64999 static inline void layout_symtab(struct module *mod, struct load_info *info)
65000@@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long
65001 return size == 0 ? NULL : vmalloc_exec(size);
65002 }
65003
65004-static void *module_alloc_update_bounds(unsigned long size)
65005+static void *module_alloc_update_bounds_rw(unsigned long size)
65006 {
65007 void *ret = module_alloc(size);
65008
65009 if (ret) {
65010 mutex_lock(&module_mutex);
65011 /* Update module bounds. */
65012- if ((unsigned long)ret < module_addr_min)
65013- module_addr_min = (unsigned long)ret;
65014- if ((unsigned long)ret + size > module_addr_max)
65015- module_addr_max = (unsigned long)ret + size;
65016+ if ((unsigned long)ret < module_addr_min_rw)
65017+ module_addr_min_rw = (unsigned long)ret;
65018+ if ((unsigned long)ret + size > module_addr_max_rw)
65019+ module_addr_max_rw = (unsigned long)ret + size;
65020+ mutex_unlock(&module_mutex);
65021+ }
65022+ return ret;
65023+}
65024+
65025+static void *module_alloc_update_bounds_rx(unsigned long size)
65026+{
65027+ void *ret = module_alloc_exec(size);
65028+
65029+ if (ret) {
65030+ mutex_lock(&module_mutex);
65031+ /* Update module bounds. */
65032+ if ((unsigned long)ret < module_addr_min_rx)
65033+ module_addr_min_rx = (unsigned long)ret;
65034+ if ((unsigned long)ret + size > module_addr_max_rx)
65035+ module_addr_max_rx = (unsigned long)ret + size;
65036 mutex_unlock(&module_mutex);
65037 }
65038 return ret;
65039@@ -2474,8 +2511,14 @@ static struct module *setup_load_info(st
65040 static int check_modinfo(struct module *mod, struct load_info *info)
65041 {
65042 const char *modmagic = get_modinfo(info, "vermagic");
65043+ const char *license = get_modinfo(info, "license");
65044 int err;
65045
65046+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
65047+ if (!license || !license_is_gpl_compatible(license))
65048+ return -ENOEXEC;
65049+#endif
65050+
65051 /* This is allowed: modprobe --force will invalidate it. */
65052 if (!modmagic) {
65053 err = try_to_force_load(mod, "bad vermagic");
65054@@ -2495,7 +2538,7 @@ static int check_modinfo(struct module *
65055 }
65056
65057 /* Set up license info based on the info section */
65058- set_license(mod, get_modinfo(info, "license"));
65059+ set_license(mod, license);
65060
65061 return 0;
65062 }
65063@@ -2589,7 +2632,7 @@ static int move_module(struct module *mo
65064 void *ptr;
65065
65066 /* Do the allocs. */
65067- ptr = module_alloc_update_bounds(mod->core_size);
65068+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
65069 /*
65070 * The pointer to this block is stored in the module structure
65071 * which is inside the block. Just mark it as not being a
65072@@ -2599,23 +2642,50 @@ static int move_module(struct module *mo
65073 if (!ptr)
65074 return -ENOMEM;
65075
65076- memset(ptr, 0, mod->core_size);
65077- mod->module_core = ptr;
65078+ memset(ptr, 0, mod->core_size_rw);
65079+ mod->module_core_rw = ptr;
65080
65081- ptr = module_alloc_update_bounds(mod->init_size);
65082+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
65083 /*
65084 * The pointer to this block is stored in the module structure
65085 * which is inside the block. This block doesn't need to be
65086 * scanned as it contains data and code that will be freed
65087 * after the module is initialized.
65088 */
65089- kmemleak_ignore(ptr);
65090- if (!ptr && mod->init_size) {
65091- module_free(mod, mod->module_core);
65092+ kmemleak_not_leak(ptr);
65093+ if (!ptr && mod->init_size_rw) {
65094+ module_free(mod, mod->module_core_rw);
65095+ return -ENOMEM;
65096+ }
65097+ memset(ptr, 0, mod->init_size_rw);
65098+ mod->module_init_rw = ptr;
65099+
65100+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
65101+ kmemleak_not_leak(ptr);
65102+ if (!ptr) {
65103+ module_free(mod, mod->module_init_rw);
65104+ module_free(mod, mod->module_core_rw);
65105 return -ENOMEM;
65106 }
65107- memset(ptr, 0, mod->init_size);
65108- mod->module_init = ptr;
65109+
65110+ pax_open_kernel();
65111+ memset(ptr, 0, mod->core_size_rx);
65112+ pax_close_kernel();
65113+ mod->module_core_rx = ptr;
65114+
65115+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
65116+ kmemleak_not_leak(ptr);
65117+ if (!ptr && mod->init_size_rx) {
65118+ module_free_exec(mod, mod->module_core_rx);
65119+ module_free(mod, mod->module_init_rw);
65120+ module_free(mod, mod->module_core_rw);
65121+ return -ENOMEM;
65122+ }
65123+
65124+ pax_open_kernel();
65125+ memset(ptr, 0, mod->init_size_rx);
65126+ pax_close_kernel();
65127+ mod->module_init_rx = ptr;
65128
65129 /* Transfer each section which specifies SHF_ALLOC */
65130 DEBUGP("final section addresses:\n");
65131@@ -2626,16 +2696,45 @@ static int move_module(struct module *mo
65132 if (!(shdr->sh_flags & SHF_ALLOC))
65133 continue;
65134
65135- if (shdr->sh_entsize & INIT_OFFSET_MASK)
65136- dest = mod->module_init
65137- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
65138- else
65139- dest = mod->module_core + shdr->sh_entsize;
65140+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
65141+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
65142+ dest = mod->module_init_rw
65143+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
65144+ else
65145+ dest = mod->module_init_rx
65146+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
65147+ } else {
65148+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
65149+ dest = mod->module_core_rw + shdr->sh_entsize;
65150+ else
65151+ dest = mod->module_core_rx + shdr->sh_entsize;
65152+ }
65153+
65154+ if (shdr->sh_type != SHT_NOBITS) {
65155+
65156+#ifdef CONFIG_PAX_KERNEXEC
65157+#ifdef CONFIG_X86_64
65158+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
65159+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
65160+#endif
65161+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
65162+ pax_open_kernel();
65163+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
65164+ pax_close_kernel();
65165+ } else
65166+#endif
65167
65168- if (shdr->sh_type != SHT_NOBITS)
65169 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
65170+ }
65171 /* Update sh_addr to point to copy in image. */
65172- shdr->sh_addr = (unsigned long)dest;
65173+
65174+#ifdef CONFIG_PAX_KERNEXEC
65175+ if (shdr->sh_flags & SHF_EXECINSTR)
65176+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
65177+ else
65178+#endif
65179+
65180+ shdr->sh_addr = (unsigned long)dest;
65181 DEBUGP("\t0x%lx %s\n",
65182 shdr->sh_addr, info->secstrings + shdr->sh_name);
65183 }
65184@@ -2686,12 +2785,12 @@ static void flush_module_icache(const st
65185 * Do it before processing of module parameters, so the module
65186 * can provide parameter accessor functions of its own.
65187 */
65188- if (mod->module_init)
65189- flush_icache_range((unsigned long)mod->module_init,
65190- (unsigned long)mod->module_init
65191- + mod->init_size);
65192- flush_icache_range((unsigned long)mod->module_core,
65193- (unsigned long)mod->module_core + mod->core_size);
65194+ if (mod->module_init_rx)
65195+ flush_icache_range((unsigned long)mod->module_init_rx,
65196+ (unsigned long)mod->module_init_rx
65197+ + mod->init_size_rx);
65198+ flush_icache_range((unsigned long)mod->module_core_rx,
65199+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
65200
65201 set_fs(old_fs);
65202 }
65203@@ -2771,8 +2870,10 @@ static void module_deallocate(struct mod
65204 {
65205 kfree(info->strmap);
65206 percpu_modfree(mod);
65207- module_free(mod, mod->module_init);
65208- module_free(mod, mod->module_core);
65209+ module_free_exec(mod, mod->module_init_rx);
65210+ module_free_exec(mod, mod->module_core_rx);
65211+ module_free(mod, mod->module_init_rw);
65212+ module_free(mod, mod->module_core_rw);
65213 }
65214
65215 int __weak module_finalize(const Elf_Ehdr *hdr,
65216@@ -2836,9 +2937,38 @@ static struct module *load_module(void _
65217 if (err)
65218 goto free_unload;
65219
65220+ /* Now copy in args */
65221+ mod->args = strndup_user(uargs, ~0UL >> 1);
65222+ if (IS_ERR(mod->args)) {
65223+ err = PTR_ERR(mod->args);
65224+ goto free_unload;
65225+ }
65226+
65227 /* Set up MODINFO_ATTR fields */
65228 setup_modinfo(mod, &info);
65229
65230+#ifdef CONFIG_GRKERNSEC_MODHARDEN
65231+ {
65232+ char *p, *p2;
65233+
65234+ if (strstr(mod->args, "grsec_modharden_netdev")) {
65235+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
65236+ err = -EPERM;
65237+ goto free_modinfo;
65238+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
65239+ p += strlen("grsec_modharden_normal");
65240+ p2 = strstr(p, "_");
65241+ if (p2) {
65242+ *p2 = '\0';
65243+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
65244+ *p2 = '_';
65245+ }
65246+ err = -EPERM;
65247+ goto free_modinfo;
65248+ }
65249+ }
65250+#endif
65251+
65252 /* Fix up syms, so that st_value is a pointer to location. */
65253 err = simplify_symbols(mod, &info);
65254 if (err < 0)
65255@@ -2854,13 +2984,6 @@ static struct module *load_module(void _
65256
65257 flush_module_icache(mod);
65258
65259- /* Now copy in args */
65260- mod->args = strndup_user(uargs, ~0UL >> 1);
65261- if (IS_ERR(mod->args)) {
65262- err = PTR_ERR(mod->args);
65263- goto free_arch_cleanup;
65264- }
65265-
65266 /* Mark state as coming so strong_try_module_get() ignores us. */
65267 mod->state = MODULE_STATE_COMING;
65268
65269@@ -2920,11 +3043,10 @@ static struct module *load_module(void _
65270 unlock:
65271 mutex_unlock(&module_mutex);
65272 synchronize_sched();
65273- kfree(mod->args);
65274- free_arch_cleanup:
65275 module_arch_cleanup(mod);
65276 free_modinfo:
65277 free_modinfo(mod);
65278+ kfree(mod->args);
65279 free_unload:
65280 module_unload_free(mod);
65281 free_module:
65282@@ -2965,16 +3087,16 @@ SYSCALL_DEFINE3(init_module, void __user
65283 MODULE_STATE_COMING, mod);
65284
65285 /* Set RO and NX regions for core */
65286- set_section_ro_nx(mod->module_core,
65287- mod->core_text_size,
65288- mod->core_ro_size,
65289- mod->core_size);
65290+ set_section_ro_nx(mod->module_core_rx,
65291+ mod->core_size_rx,
65292+ mod->core_size_rx,
65293+ mod->core_size_rx);
65294
65295 /* Set RO and NX regions for init */
65296- set_section_ro_nx(mod->module_init,
65297- mod->init_text_size,
65298- mod->init_ro_size,
65299- mod->init_size);
65300+ set_section_ro_nx(mod->module_init_rx,
65301+ mod->init_size_rx,
65302+ mod->init_size_rx,
65303+ mod->init_size_rx);
65304
65305 do_mod_ctors(mod);
65306 /* Start the module */
65307@@ -3020,11 +3142,12 @@ SYSCALL_DEFINE3(init_module, void __user
65308 mod->strtab = mod->core_strtab;
65309 #endif
65310 unset_module_init_ro_nx(mod);
65311- module_free(mod, mod->module_init);
65312- mod->module_init = NULL;
65313- mod->init_size = 0;
65314- mod->init_ro_size = 0;
65315- mod->init_text_size = 0;
65316+ module_free(mod, mod->module_init_rw);
65317+ module_free_exec(mod, mod->module_init_rx);
65318+ mod->module_init_rw = NULL;
65319+ mod->module_init_rx = NULL;
65320+ mod->init_size_rw = 0;
65321+ mod->init_size_rx = 0;
65322 mutex_unlock(&module_mutex);
65323
65324 return 0;
65325@@ -3055,10 +3178,16 @@ static const char *get_ksymbol(struct mo
65326 unsigned long nextval;
65327
65328 /* At worse, next value is at end of module */
65329- if (within_module_init(addr, mod))
65330- nextval = (unsigned long)mod->module_init+mod->init_text_size;
65331+ if (within_module_init_rx(addr, mod))
65332+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
65333+ else if (within_module_init_rw(addr, mod))
65334+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
65335+ else if (within_module_core_rx(addr, mod))
65336+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
65337+ else if (within_module_core_rw(addr, mod))
65338+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
65339 else
65340- nextval = (unsigned long)mod->module_core+mod->core_text_size;
65341+ return NULL;
65342
65343 /* Scan for closest preceding symbol, and next symbol. (ELF
65344 starts real symbols at 1). */
65345@@ -3304,7 +3433,7 @@ static int m_show(struct seq_file *m, vo
65346 char buf[8];
65347
65348 seq_printf(m, "%s %u",
65349- mod->name, mod->init_size + mod->core_size);
65350+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
65351 print_unload_info(m, mod);
65352
65353 /* Informative for users. */
65354@@ -3313,7 +3442,7 @@ static int m_show(struct seq_file *m, vo
65355 mod->state == MODULE_STATE_COMING ? "Loading":
65356 "Live");
65357 /* Used by oprofile and other similar tools. */
65358- seq_printf(m, " 0x%pK", mod->module_core);
65359+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
65360
65361 /* Taints info */
65362 if (mod->taints)
65363@@ -3349,7 +3478,17 @@ static const struct file_operations proc
65364
65365 static int __init proc_modules_init(void)
65366 {
65367+#ifndef CONFIG_GRKERNSEC_HIDESYM
65368+#ifdef CONFIG_GRKERNSEC_PROC_USER
65369+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
65370+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65371+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
65372+#else
65373 proc_create("modules", 0, NULL, &proc_modules_operations);
65374+#endif
65375+#else
65376+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
65377+#endif
65378 return 0;
65379 }
65380 module_init(proc_modules_init);
65381@@ -3408,12 +3547,12 @@ struct module *__module_address(unsigned
65382 {
65383 struct module *mod;
65384
65385- if (addr < module_addr_min || addr > module_addr_max)
65386+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
65387+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
65388 return NULL;
65389
65390 list_for_each_entry_rcu(mod, &modules, list)
65391- if (within_module_core(addr, mod)
65392- || within_module_init(addr, mod))
65393+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
65394 return mod;
65395 return NULL;
65396 }
65397@@ -3447,11 +3586,20 @@ bool is_module_text_address(unsigned lon
65398 */
65399 struct module *__module_text_address(unsigned long addr)
65400 {
65401- struct module *mod = __module_address(addr);
65402+ struct module *mod;
65403+
65404+#ifdef CONFIG_X86_32
65405+ addr = ktla_ktva(addr);
65406+#endif
65407+
65408+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
65409+ return NULL;
65410+
65411+ mod = __module_address(addr);
65412+
65413 if (mod) {
65414 /* Make sure it's within the text section. */
65415- if (!within(addr, mod->module_init, mod->init_text_size)
65416- && !within(addr, mod->module_core, mod->core_text_size))
65417+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
65418 mod = NULL;
65419 }
65420 return mod;
65421diff -urNp linux-3.1.4/kernel/mutex.c linux-3.1.4/kernel/mutex.c
65422--- linux-3.1.4/kernel/mutex.c 2011-11-11 15:19:27.000000000 -0500
65423+++ linux-3.1.4/kernel/mutex.c 2011-11-16 18:39:08.000000000 -0500
65424@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock,
65425 spin_lock_mutex(&lock->wait_lock, flags);
65426
65427 debug_mutex_lock_common(lock, &waiter);
65428- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
65429+ debug_mutex_add_waiter(lock, &waiter, task);
65430
65431 /* add waiting tasks to the end of the waitqueue (FIFO): */
65432 list_add_tail(&waiter.list, &lock->wait_list);
65433@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock,
65434 * TASK_UNINTERRUPTIBLE case.)
65435 */
65436 if (unlikely(signal_pending_state(state, task))) {
65437- mutex_remove_waiter(lock, &waiter,
65438- task_thread_info(task));
65439+ mutex_remove_waiter(lock, &waiter, task);
65440 mutex_release(&lock->dep_map, 1, ip);
65441 spin_unlock_mutex(&lock->wait_lock, flags);
65442
65443@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock,
65444 done:
65445 lock_acquired(&lock->dep_map, ip);
65446 /* got the lock - rejoice! */
65447- mutex_remove_waiter(lock, &waiter, current_thread_info());
65448+ mutex_remove_waiter(lock, &waiter, task);
65449 mutex_set_owner(lock);
65450
65451 /* set it to 0 if there are no waiters left: */
65452diff -urNp linux-3.1.4/kernel/mutex-debug.c linux-3.1.4/kernel/mutex-debug.c
65453--- linux-3.1.4/kernel/mutex-debug.c 2011-11-11 15:19:27.000000000 -0500
65454+++ linux-3.1.4/kernel/mutex-debug.c 2011-11-16 18:39:08.000000000 -0500
65455@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
65456 }
65457
65458 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
65459- struct thread_info *ti)
65460+ struct task_struct *task)
65461 {
65462 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
65463
65464 /* Mark the current thread as blocked on the lock: */
65465- ti->task->blocked_on = waiter;
65466+ task->blocked_on = waiter;
65467 }
65468
65469 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
65470- struct thread_info *ti)
65471+ struct task_struct *task)
65472 {
65473 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
65474- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
65475- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
65476- ti->task->blocked_on = NULL;
65477+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
65478+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
65479+ task->blocked_on = NULL;
65480
65481 list_del_init(&waiter->list);
65482 waiter->task = NULL;
65483diff -urNp linux-3.1.4/kernel/mutex-debug.h linux-3.1.4/kernel/mutex-debug.h
65484--- linux-3.1.4/kernel/mutex-debug.h 2011-11-11 15:19:27.000000000 -0500
65485+++ linux-3.1.4/kernel/mutex-debug.h 2011-11-16 18:39:08.000000000 -0500
65486@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
65487 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
65488 extern void debug_mutex_add_waiter(struct mutex *lock,
65489 struct mutex_waiter *waiter,
65490- struct thread_info *ti);
65491+ struct task_struct *task);
65492 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
65493- struct thread_info *ti);
65494+ struct task_struct *task);
65495 extern void debug_mutex_unlock(struct mutex *lock);
65496 extern void debug_mutex_init(struct mutex *lock, const char *name,
65497 struct lock_class_key *key);
65498diff -urNp linux-3.1.4/kernel/padata.c linux-3.1.4/kernel/padata.c
65499--- linux-3.1.4/kernel/padata.c 2011-11-11 15:19:27.000000000 -0500
65500+++ linux-3.1.4/kernel/padata.c 2011-11-16 18:39:08.000000000 -0500
65501@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
65502 padata->pd = pd;
65503 padata->cb_cpu = cb_cpu;
65504
65505- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
65506- atomic_set(&pd->seq_nr, -1);
65507+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
65508+ atomic_set_unchecked(&pd->seq_nr, -1);
65509
65510- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
65511+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
65512
65513 target_cpu = padata_cpu_hash(padata);
65514 queue = per_cpu_ptr(pd->pqueue, target_cpu);
65515@@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
65516 padata_init_pqueues(pd);
65517 padata_init_squeues(pd);
65518 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
65519- atomic_set(&pd->seq_nr, -1);
65520+ atomic_set_unchecked(&pd->seq_nr, -1);
65521 atomic_set(&pd->reorder_objects, 0);
65522 atomic_set(&pd->refcnt, 0);
65523 pd->pinst = pinst;
65524diff -urNp linux-3.1.4/kernel/panic.c linux-3.1.4/kernel/panic.c
65525--- linux-3.1.4/kernel/panic.c 2011-11-11 15:19:27.000000000 -0500
65526+++ linux-3.1.4/kernel/panic.c 2011-11-16 18:40:44.000000000 -0500
65527@@ -371,7 +371,7 @@ static void warn_slowpath_common(const c
65528 const char *board;
65529
65530 printk(KERN_WARNING "------------[ cut here ]------------\n");
65531- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
65532+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
65533 board = dmi_get_system_info(DMI_PRODUCT_NAME);
65534 if (board)
65535 printk(KERN_WARNING "Hardware name: %s\n", board);
65536@@ -426,7 +426,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
65537 */
65538 void __stack_chk_fail(void)
65539 {
65540- panic("stack-protector: Kernel stack is corrupted in: %p\n",
65541+ dump_stack();
65542+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
65543 __builtin_return_address(0));
65544 }
65545 EXPORT_SYMBOL(__stack_chk_fail);
65546diff -urNp linux-3.1.4/kernel/pid.c linux-3.1.4/kernel/pid.c
65547--- linux-3.1.4/kernel/pid.c 2011-11-11 15:19:27.000000000 -0500
65548+++ linux-3.1.4/kernel/pid.c 2011-11-16 18:40:44.000000000 -0500
65549@@ -33,6 +33,7 @@
65550 #include <linux/rculist.h>
65551 #include <linux/bootmem.h>
65552 #include <linux/hash.h>
65553+#include <linux/security.h>
65554 #include <linux/pid_namespace.h>
65555 #include <linux/init_task.h>
65556 #include <linux/syscalls.h>
65557@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
65558
65559 int pid_max = PID_MAX_DEFAULT;
65560
65561-#define RESERVED_PIDS 300
65562+#define RESERVED_PIDS 500
65563
65564 int pid_max_min = RESERVED_PIDS + 1;
65565 int pid_max_max = PID_MAX_LIMIT;
65566@@ -418,8 +419,15 @@ EXPORT_SYMBOL(pid_task);
65567 */
65568 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
65569 {
65570+ struct task_struct *task;
65571+
65572 rcu_lockdep_assert(rcu_read_lock_held());
65573- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65574+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65575+
65576+ if (gr_pid_is_chrooted(task))
65577+ return NULL;
65578+
65579+ return task;
65580 }
65581
65582 struct task_struct *find_task_by_vpid(pid_t vnr)
65583@@ -427,6 +435,12 @@ struct task_struct *find_task_by_vpid(pi
65584 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
65585 }
65586
65587+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
65588+{
65589+ rcu_lockdep_assert(rcu_read_lock_held());
65590+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
65591+}
65592+
65593 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
65594 {
65595 struct pid *pid;
65596diff -urNp linux-3.1.4/kernel/posix-cpu-timers.c linux-3.1.4/kernel/posix-cpu-timers.c
65597--- linux-3.1.4/kernel/posix-cpu-timers.c 2011-11-11 15:19:27.000000000 -0500
65598+++ linux-3.1.4/kernel/posix-cpu-timers.c 2011-11-16 18:40:44.000000000 -0500
65599@@ -6,6 +6,7 @@
65600 #include <linux/posix-timers.h>
65601 #include <linux/errno.h>
65602 #include <linux/math64.h>
65603+#include <linux/security.h>
65604 #include <asm/uaccess.h>
65605 #include <linux/kernel_stat.h>
65606 #include <trace/events/timer.h>
65607@@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
65608
65609 static __init int init_posix_cpu_timers(void)
65610 {
65611- struct k_clock process = {
65612+ static struct k_clock process = {
65613 .clock_getres = process_cpu_clock_getres,
65614 .clock_get = process_cpu_clock_get,
65615 .timer_create = process_cpu_timer_create,
65616 .nsleep = process_cpu_nsleep,
65617 .nsleep_restart = process_cpu_nsleep_restart,
65618 };
65619- struct k_clock thread = {
65620+ static struct k_clock thread = {
65621 .clock_getres = thread_cpu_clock_getres,
65622 .clock_get = thread_cpu_clock_get,
65623 .timer_create = thread_cpu_timer_create,
65624diff -urNp linux-3.1.4/kernel/posix-timers.c linux-3.1.4/kernel/posix-timers.c
65625--- linux-3.1.4/kernel/posix-timers.c 2011-11-11 15:19:27.000000000 -0500
65626+++ linux-3.1.4/kernel/posix-timers.c 2011-11-16 18:40:44.000000000 -0500
65627@@ -43,6 +43,7 @@
65628 #include <linux/idr.h>
65629 #include <linux/posix-clock.h>
65630 #include <linux/posix-timers.h>
65631+#include <linux/grsecurity.h>
65632 #include <linux/syscalls.h>
65633 #include <linux/wait.h>
65634 #include <linux/workqueue.h>
65635@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
65636 * which we beg off on and pass to do_sys_settimeofday().
65637 */
65638
65639-static struct k_clock posix_clocks[MAX_CLOCKS];
65640+static struct k_clock *posix_clocks[MAX_CLOCKS];
65641
65642 /*
65643 * These ones are defined below.
65644@@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
65645 */
65646 static __init int init_posix_timers(void)
65647 {
65648- struct k_clock clock_realtime = {
65649+ static struct k_clock clock_realtime = {
65650 .clock_getres = hrtimer_get_res,
65651 .clock_get = posix_clock_realtime_get,
65652 .clock_set = posix_clock_realtime_set,
65653@@ -239,7 +240,7 @@ static __init int init_posix_timers(void
65654 .timer_get = common_timer_get,
65655 .timer_del = common_timer_del,
65656 };
65657- struct k_clock clock_monotonic = {
65658+ static struct k_clock clock_monotonic = {
65659 .clock_getres = hrtimer_get_res,
65660 .clock_get = posix_ktime_get_ts,
65661 .nsleep = common_nsleep,
65662@@ -249,19 +250,19 @@ static __init int init_posix_timers(void
65663 .timer_get = common_timer_get,
65664 .timer_del = common_timer_del,
65665 };
65666- struct k_clock clock_monotonic_raw = {
65667+ static struct k_clock clock_monotonic_raw = {
65668 .clock_getres = hrtimer_get_res,
65669 .clock_get = posix_get_monotonic_raw,
65670 };
65671- struct k_clock clock_realtime_coarse = {
65672+ static struct k_clock clock_realtime_coarse = {
65673 .clock_getres = posix_get_coarse_res,
65674 .clock_get = posix_get_realtime_coarse,
65675 };
65676- struct k_clock clock_monotonic_coarse = {
65677+ static struct k_clock clock_monotonic_coarse = {
65678 .clock_getres = posix_get_coarse_res,
65679 .clock_get = posix_get_monotonic_coarse,
65680 };
65681- struct k_clock clock_boottime = {
65682+ static struct k_clock clock_boottime = {
65683 .clock_getres = hrtimer_get_res,
65684 .clock_get = posix_get_boottime,
65685 .nsleep = common_nsleep,
65686@@ -272,6 +273,8 @@ static __init int init_posix_timers(void
65687 .timer_del = common_timer_del,
65688 };
65689
65690+ pax_track_stack();
65691+
65692 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
65693 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
65694 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
65695@@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
65696 return;
65697 }
65698
65699- posix_clocks[clock_id] = *new_clock;
65700+ posix_clocks[clock_id] = new_clock;
65701 }
65702 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
65703
65704@@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock
65705 return (id & CLOCKFD_MASK) == CLOCKFD ?
65706 &clock_posix_dynamic : &clock_posix_cpu;
65707
65708- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
65709+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
65710 return NULL;
65711- return &posix_clocks[id];
65712+ return posix_clocks[id];
65713 }
65714
65715 static int common_timer_create(struct k_itimer *new_timer)
65716@@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
65717 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
65718 return -EFAULT;
65719
65720+ /* only the CLOCK_REALTIME clock can be set, all other clocks
65721+ have their clock_set fptr set to a nosettime dummy function
65722+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
65723+ call common_clock_set, which calls do_sys_settimeofday, which
65724+ we hook
65725+ */
65726+
65727 return kc->clock_set(which_clock, &new_tp);
65728 }
65729
65730diff -urNp linux-3.1.4/kernel/power/poweroff.c linux-3.1.4/kernel/power/poweroff.c
65731--- linux-3.1.4/kernel/power/poweroff.c 2011-11-11 15:19:27.000000000 -0500
65732+++ linux-3.1.4/kernel/power/poweroff.c 2011-11-16 18:39:08.000000000 -0500
65733@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
65734 .enable_mask = SYSRQ_ENABLE_BOOT,
65735 };
65736
65737-static int pm_sysrq_init(void)
65738+static int __init pm_sysrq_init(void)
65739 {
65740 register_sysrq_key('o', &sysrq_poweroff_op);
65741 return 0;
65742diff -urNp linux-3.1.4/kernel/power/process.c linux-3.1.4/kernel/power/process.c
65743--- linux-3.1.4/kernel/power/process.c 2011-11-11 15:19:27.000000000 -0500
65744+++ linux-3.1.4/kernel/power/process.c 2011-11-16 18:39:08.000000000 -0500
65745@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
65746 u64 elapsed_csecs64;
65747 unsigned int elapsed_csecs;
65748 bool wakeup = false;
65749+ bool timedout = false;
65750
65751 do_gettimeofday(&start);
65752
65753@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
65754
65755 while (true) {
65756 todo = 0;
65757+ if (time_after(jiffies, end_time))
65758+ timedout = true;
65759 read_lock(&tasklist_lock);
65760 do_each_thread(g, p) {
65761 if (frozen(p) || !freezable(p))
65762@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
65763 * try_to_stop() after schedule() in ptrace/signal
65764 * stop sees TIF_FREEZE.
65765 */
65766- if (!task_is_stopped_or_traced(p) &&
65767- !freezer_should_skip(p))
65768+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
65769 todo++;
65770+ if (timedout) {
65771+ printk(KERN_ERR "Task refusing to freeze:\n");
65772+ sched_show_task(p);
65773+ }
65774+ }
65775 } while_each_thread(g, p);
65776 read_unlock(&tasklist_lock);
65777
65778@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
65779 todo += wq_busy;
65780 }
65781
65782- if (!todo || time_after(jiffies, end_time))
65783+ if (!todo || timedout)
65784 break;
65785
65786 if (pm_wakeup_pending()) {
65787diff -urNp linux-3.1.4/kernel/printk.c linux-3.1.4/kernel/printk.c
65788--- linux-3.1.4/kernel/printk.c 2011-11-11 15:19:27.000000000 -0500
65789+++ linux-3.1.4/kernel/printk.c 2011-11-16 19:38:11.000000000 -0500
65790@@ -313,6 +313,11 @@ static int check_syslog_permissions(int
65791 if (from_file && type != SYSLOG_ACTION_OPEN)
65792 return 0;
65793
65794+#ifdef CONFIG_GRKERNSEC_DMESG
65795+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
65796+ return -EPERM;
65797+#endif
65798+
65799 if (syslog_action_restricted(type)) {
65800 if (capable(CAP_SYSLOG))
65801 return 0;
65802diff -urNp linux-3.1.4/kernel/profile.c linux-3.1.4/kernel/profile.c
65803--- linux-3.1.4/kernel/profile.c 2011-11-11 15:19:27.000000000 -0500
65804+++ linux-3.1.4/kernel/profile.c 2011-11-16 18:39:08.000000000 -0500
65805@@ -39,7 +39,7 @@ struct profile_hit {
65806 /* Oprofile timer tick hook */
65807 static int (*timer_hook)(struct pt_regs *) __read_mostly;
65808
65809-static atomic_t *prof_buffer;
65810+static atomic_unchecked_t *prof_buffer;
65811 static unsigned long prof_len, prof_shift;
65812
65813 int prof_on __read_mostly;
65814@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
65815 hits[i].pc = 0;
65816 continue;
65817 }
65818- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65819+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65820 hits[i].hits = hits[i].pc = 0;
65821 }
65822 }
65823@@ -342,9 +342,9 @@ static void do_profile_hits(int type, vo
65824 * Add the current hit(s) and flush the write-queue out
65825 * to the global buffer:
65826 */
65827- atomic_add(nr_hits, &prof_buffer[pc]);
65828+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
65829 for (i = 0; i < NR_PROFILE_HIT; ++i) {
65830- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65831+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65832 hits[i].pc = hits[i].hits = 0;
65833 }
65834 out:
65835@@ -419,7 +419,7 @@ static void do_profile_hits(int type, vo
65836 {
65837 unsigned long pc;
65838 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
65839- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65840+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65841 }
65842 #endif /* !CONFIG_SMP */
65843
65844@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
65845 return -EFAULT;
65846 buf++; p++; count--; read++;
65847 }
65848- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
65849+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
65850 if (copy_to_user(buf, (void *)pnt, count))
65851 return -EFAULT;
65852 read += count;
65853@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
65854 }
65855 #endif
65856 profile_discard_flip_buffers();
65857- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
65858+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
65859 return count;
65860 }
65861
65862diff -urNp linux-3.1.4/kernel/ptrace.c linux-3.1.4/kernel/ptrace.c
65863--- linux-3.1.4/kernel/ptrace.c 2011-11-11 15:19:27.000000000 -0500
65864+++ linux-3.1.4/kernel/ptrace.c 2011-11-16 19:50:22.000000000 -0500
65865@@ -161,7 +161,8 @@ int ptrace_check_attach(struct task_stru
65866 return ret;
65867 }
65868
65869-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65870+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
65871+ unsigned int log)
65872 {
65873 const struct cred *cred = current_cred(), *tcred;
65874
65875@@ -187,7 +188,8 @@ int __ptrace_may_access(struct task_stru
65876 cred->gid == tcred->sgid &&
65877 cred->gid == tcred->gid))
65878 goto ok;
65879- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
65880+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
65881+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
65882 goto ok;
65883 rcu_read_unlock();
65884 return -EPERM;
65885@@ -196,7 +198,9 @@ ok:
65886 smp_rmb();
65887 if (task->mm)
65888 dumpable = get_dumpable(task->mm);
65889- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
65890+ if (!dumpable &&
65891+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
65892+ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
65893 return -EPERM;
65894
65895 return security_ptrace_access_check(task, mode);
65896@@ -206,7 +210,16 @@ bool ptrace_may_access(struct task_struc
65897 {
65898 int err;
65899 task_lock(task);
65900- err = __ptrace_may_access(task, mode);
65901+ err = __ptrace_may_access(task, mode, 0);
65902+ task_unlock(task);
65903+ return !err;
65904+}
65905+
65906+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
65907+{
65908+ int err;
65909+ task_lock(task);
65910+ err = __ptrace_may_access(task, mode, 1);
65911 task_unlock(task);
65912 return !err;
65913 }
65914@@ -251,7 +264,7 @@ static int ptrace_attach(struct task_str
65915 goto out;
65916
65917 task_lock(task);
65918- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
65919+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
65920 task_unlock(task);
65921 if (retval)
65922 goto unlock_creds;
65923@@ -266,7 +279,7 @@ static int ptrace_attach(struct task_str
65924 task->ptrace = PT_PTRACED;
65925 if (seize)
65926 task->ptrace |= PT_SEIZED;
65927- if (task_ns_capable(task, CAP_SYS_PTRACE))
65928+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
65929 task->ptrace |= PT_PTRACE_CAP;
65930
65931 __ptrace_link(task, current);
65932@@ -461,6 +474,8 @@ int ptrace_readdata(struct task_struct *
65933 {
65934 int copied = 0;
65935
65936+ pax_track_stack();
65937+
65938 while (len > 0) {
65939 char buf[128];
65940 int this_len, retval;
65941@@ -472,7 +487,7 @@ int ptrace_readdata(struct task_struct *
65942 break;
65943 return -EIO;
65944 }
65945- if (copy_to_user(dst, buf, retval))
65946+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
65947 return -EFAULT;
65948 copied += retval;
65949 src += retval;
65950@@ -486,6 +501,8 @@ int ptrace_writedata(struct task_struct
65951 {
65952 int copied = 0;
65953
65954+ pax_track_stack();
65955+
65956 while (len > 0) {
65957 char buf[128];
65958 int this_len, retval;
65959@@ -669,10 +686,12 @@ int ptrace_request(struct task_struct *c
65960 bool seized = child->ptrace & PT_SEIZED;
65961 int ret = -EIO;
65962 siginfo_t siginfo, *si;
65963- void __user *datavp = (void __user *) data;
65964+ void __user *datavp = (__force void __user *) data;
65965 unsigned long __user *datalp = datavp;
65966 unsigned long flags;
65967
65968+ pax_track_stack();
65969+
65970 switch (request) {
65971 case PTRACE_PEEKTEXT:
65972 case PTRACE_PEEKDATA:
65973@@ -871,14 +890,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
65974 goto out;
65975 }
65976
65977+ if (gr_handle_ptrace(child, request)) {
65978+ ret = -EPERM;
65979+ goto out_put_task_struct;
65980+ }
65981+
65982 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65983 ret = ptrace_attach(child, request, data);
65984 /*
65985 * Some architectures need to do book-keeping after
65986 * a ptrace attach.
65987 */
65988- if (!ret)
65989+ if (!ret) {
65990 arch_ptrace_attach(child);
65991+ gr_audit_ptrace(child);
65992+ }
65993 goto out_put_task_struct;
65994 }
65995
65996@@ -904,7 +930,7 @@ int generic_ptrace_peekdata(struct task_
65997 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
65998 if (copied != sizeof(tmp))
65999 return -EIO;
66000- return put_user(tmp, (unsigned long __user *)data);
66001+ return put_user(tmp, (__force unsigned long __user *)data);
66002 }
66003
66004 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
66005@@ -927,6 +953,8 @@ int compat_ptrace_request(struct task_st
66006 siginfo_t siginfo;
66007 int ret;
66008
66009+ pax_track_stack();
66010+
66011 switch (request) {
66012 case PTRACE_PEEKTEXT:
66013 case PTRACE_PEEKDATA:
66014@@ -1014,14 +1042,21 @@ asmlinkage long compat_sys_ptrace(compat
66015 goto out;
66016 }
66017
66018+ if (gr_handle_ptrace(child, request)) {
66019+ ret = -EPERM;
66020+ goto out_put_task_struct;
66021+ }
66022+
66023 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
66024 ret = ptrace_attach(child, request, data);
66025 /*
66026 * Some architectures need to do book-keeping after
66027 * a ptrace attach.
66028 */
66029- if (!ret)
66030+ if (!ret) {
66031 arch_ptrace_attach(child);
66032+ gr_audit_ptrace(child);
66033+ }
66034 goto out_put_task_struct;
66035 }
66036
66037diff -urNp linux-3.1.4/kernel/rcutorture.c linux-3.1.4/kernel/rcutorture.c
66038--- linux-3.1.4/kernel/rcutorture.c 2011-11-11 15:19:27.000000000 -0500
66039+++ linux-3.1.4/kernel/rcutorture.c 2011-11-16 18:39:08.000000000 -0500
66040@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
66041 { 0 };
66042 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
66043 { 0 };
66044-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
66045-static atomic_t n_rcu_torture_alloc;
66046-static atomic_t n_rcu_torture_alloc_fail;
66047-static atomic_t n_rcu_torture_free;
66048-static atomic_t n_rcu_torture_mberror;
66049-static atomic_t n_rcu_torture_error;
66050+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
66051+static atomic_unchecked_t n_rcu_torture_alloc;
66052+static atomic_unchecked_t n_rcu_torture_alloc_fail;
66053+static atomic_unchecked_t n_rcu_torture_free;
66054+static atomic_unchecked_t n_rcu_torture_mberror;
66055+static atomic_unchecked_t n_rcu_torture_error;
66056 static long n_rcu_torture_boost_ktrerror;
66057 static long n_rcu_torture_boost_rterror;
66058 static long n_rcu_torture_boost_failure;
66059@@ -223,11 +223,11 @@ rcu_torture_alloc(void)
66060
66061 spin_lock_bh(&rcu_torture_lock);
66062 if (list_empty(&rcu_torture_freelist)) {
66063- atomic_inc(&n_rcu_torture_alloc_fail);
66064+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
66065 spin_unlock_bh(&rcu_torture_lock);
66066 return NULL;
66067 }
66068- atomic_inc(&n_rcu_torture_alloc);
66069+ atomic_inc_unchecked(&n_rcu_torture_alloc);
66070 p = rcu_torture_freelist.next;
66071 list_del_init(p);
66072 spin_unlock_bh(&rcu_torture_lock);
66073@@ -240,7 +240,7 @@ rcu_torture_alloc(void)
66074 static void
66075 rcu_torture_free(struct rcu_torture *p)
66076 {
66077- atomic_inc(&n_rcu_torture_free);
66078+ atomic_inc_unchecked(&n_rcu_torture_free);
66079 spin_lock_bh(&rcu_torture_lock);
66080 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
66081 spin_unlock_bh(&rcu_torture_lock);
66082@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
66083 i = rp->rtort_pipe_count;
66084 if (i > RCU_TORTURE_PIPE_LEN)
66085 i = RCU_TORTURE_PIPE_LEN;
66086- atomic_inc(&rcu_torture_wcount[i]);
66087+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
66088 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
66089 rp->rtort_mbtest = 0;
66090 rcu_torture_free(rp);
66091@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_fr
66092 i = rp->rtort_pipe_count;
66093 if (i > RCU_TORTURE_PIPE_LEN)
66094 i = RCU_TORTURE_PIPE_LEN;
66095- atomic_inc(&rcu_torture_wcount[i]);
66096+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
66097 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
66098 rp->rtort_mbtest = 0;
66099 list_del(&rp->rtort_free);
66100@@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
66101 i = old_rp->rtort_pipe_count;
66102 if (i > RCU_TORTURE_PIPE_LEN)
66103 i = RCU_TORTURE_PIPE_LEN;
66104- atomic_inc(&rcu_torture_wcount[i]);
66105+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
66106 old_rp->rtort_pipe_count++;
66107 cur_ops->deferred_free(old_rp);
66108 }
66109@@ -950,7 +950,7 @@ static void rcu_torture_timer(unsigned l
66110 return;
66111 }
66112 if (p->rtort_mbtest == 0)
66113- atomic_inc(&n_rcu_torture_mberror);
66114+ atomic_inc_unchecked(&n_rcu_torture_mberror);
66115 spin_lock(&rand_lock);
66116 cur_ops->read_delay(&rand);
66117 n_rcu_torture_timers++;
66118@@ -1011,7 +1011,7 @@ rcu_torture_reader(void *arg)
66119 continue;
66120 }
66121 if (p->rtort_mbtest == 0)
66122- atomic_inc(&n_rcu_torture_mberror);
66123+ atomic_inc_unchecked(&n_rcu_torture_mberror);
66124 cur_ops->read_delay(&rand);
66125 preempt_disable();
66126 pipe_count = p->rtort_pipe_count;
66127@@ -1070,16 +1070,16 @@ rcu_torture_printk(char *page)
66128 rcu_torture_current,
66129 rcu_torture_current_version,
66130 list_empty(&rcu_torture_freelist),
66131- atomic_read(&n_rcu_torture_alloc),
66132- atomic_read(&n_rcu_torture_alloc_fail),
66133- atomic_read(&n_rcu_torture_free),
66134- atomic_read(&n_rcu_torture_mberror),
66135+ atomic_read_unchecked(&n_rcu_torture_alloc),
66136+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
66137+ atomic_read_unchecked(&n_rcu_torture_free),
66138+ atomic_read_unchecked(&n_rcu_torture_mberror),
66139 n_rcu_torture_boost_ktrerror,
66140 n_rcu_torture_boost_rterror,
66141 n_rcu_torture_boost_failure,
66142 n_rcu_torture_boosts,
66143 n_rcu_torture_timers);
66144- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
66145+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
66146 n_rcu_torture_boost_ktrerror != 0 ||
66147 n_rcu_torture_boost_rterror != 0 ||
66148 n_rcu_torture_boost_failure != 0)
66149@@ -1087,7 +1087,7 @@ rcu_torture_printk(char *page)
66150 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
66151 if (i > 1) {
66152 cnt += sprintf(&page[cnt], "!!! ");
66153- atomic_inc(&n_rcu_torture_error);
66154+ atomic_inc_unchecked(&n_rcu_torture_error);
66155 WARN_ON_ONCE(1);
66156 }
66157 cnt += sprintf(&page[cnt], "Reader Pipe: ");
66158@@ -1101,7 +1101,7 @@ rcu_torture_printk(char *page)
66159 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
66160 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
66161 cnt += sprintf(&page[cnt], " %d",
66162- atomic_read(&rcu_torture_wcount[i]));
66163+ atomic_read_unchecked(&rcu_torture_wcount[i]));
66164 }
66165 cnt += sprintf(&page[cnt], "\n");
66166 if (cur_ops->stats)
66167@@ -1410,7 +1410,7 @@ rcu_torture_cleanup(void)
66168
66169 if (cur_ops->cleanup)
66170 cur_ops->cleanup();
66171- if (atomic_read(&n_rcu_torture_error))
66172+ if (atomic_read_unchecked(&n_rcu_torture_error))
66173 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
66174 else
66175 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
66176@@ -1474,17 +1474,17 @@ rcu_torture_init(void)
66177
66178 rcu_torture_current = NULL;
66179 rcu_torture_current_version = 0;
66180- atomic_set(&n_rcu_torture_alloc, 0);
66181- atomic_set(&n_rcu_torture_alloc_fail, 0);
66182- atomic_set(&n_rcu_torture_free, 0);
66183- atomic_set(&n_rcu_torture_mberror, 0);
66184- atomic_set(&n_rcu_torture_error, 0);
66185+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
66186+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
66187+ atomic_set_unchecked(&n_rcu_torture_free, 0);
66188+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
66189+ atomic_set_unchecked(&n_rcu_torture_error, 0);
66190 n_rcu_torture_boost_ktrerror = 0;
66191 n_rcu_torture_boost_rterror = 0;
66192 n_rcu_torture_boost_failure = 0;
66193 n_rcu_torture_boosts = 0;
66194 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
66195- atomic_set(&rcu_torture_wcount[i], 0);
66196+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
66197 for_each_possible_cpu(cpu) {
66198 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
66199 per_cpu(rcu_torture_count, cpu)[i] = 0;
66200diff -urNp linux-3.1.4/kernel/rcutree.c linux-3.1.4/kernel/rcutree.c
66201--- linux-3.1.4/kernel/rcutree.c 2011-11-11 15:19:27.000000000 -0500
66202+++ linux-3.1.4/kernel/rcutree.c 2011-11-16 18:39:08.000000000 -0500
66203@@ -356,9 +356,9 @@ void rcu_enter_nohz(void)
66204 }
66205 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
66206 smp_mb__before_atomic_inc(); /* See above. */
66207- atomic_inc(&rdtp->dynticks);
66208+ atomic_inc_unchecked(&rdtp->dynticks);
66209 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
66210- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
66211+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
66212 local_irq_restore(flags);
66213
66214 /* If the interrupt queued a callback, get out of dyntick mode. */
66215@@ -387,10 +387,10 @@ void rcu_exit_nohz(void)
66216 return;
66217 }
66218 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
66219- atomic_inc(&rdtp->dynticks);
66220+ atomic_inc_unchecked(&rdtp->dynticks);
66221 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
66222 smp_mb__after_atomic_inc(); /* See above. */
66223- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
66224+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
66225 local_irq_restore(flags);
66226 }
66227
66228@@ -406,14 +406,14 @@ void rcu_nmi_enter(void)
66229 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
66230
66231 if (rdtp->dynticks_nmi_nesting == 0 &&
66232- (atomic_read(&rdtp->dynticks) & 0x1))
66233+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
66234 return;
66235 rdtp->dynticks_nmi_nesting++;
66236 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
66237- atomic_inc(&rdtp->dynticks);
66238+ atomic_inc_unchecked(&rdtp->dynticks);
66239 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
66240 smp_mb__after_atomic_inc(); /* See above. */
66241- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
66242+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
66243 }
66244
66245 /**
66246@@ -432,9 +432,9 @@ void rcu_nmi_exit(void)
66247 return;
66248 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
66249 smp_mb__before_atomic_inc(); /* See above. */
66250- atomic_inc(&rdtp->dynticks);
66251+ atomic_inc_unchecked(&rdtp->dynticks);
66252 smp_mb__after_atomic_inc(); /* Force delay to next write. */
66253- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
66254+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
66255 }
66256
66257 /**
66258@@ -469,7 +469,7 @@ void rcu_irq_exit(void)
66259 */
66260 static int dyntick_save_progress_counter(struct rcu_data *rdp)
66261 {
66262- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
66263+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
66264 return 0;
66265 }
66266
66267@@ -484,7 +484,7 @@ static int rcu_implicit_dynticks_qs(stru
66268 unsigned long curr;
66269 unsigned long snap;
66270
66271- curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
66272+ curr = (unsigned long)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
66273 snap = (unsigned long)rdp->dynticks_snap;
66274
66275 /*
66276@@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state
66277 /*
66278 * Do softirq processing for the current CPU.
66279 */
66280-static void rcu_process_callbacks(struct softirq_action *unused)
66281+static void rcu_process_callbacks(void)
66282 {
66283 __rcu_process_callbacks(&rcu_sched_state,
66284 &__get_cpu_var(rcu_sched_data));
66285diff -urNp linux-3.1.4/kernel/rcutree.h linux-3.1.4/kernel/rcutree.h
66286--- linux-3.1.4/kernel/rcutree.h 2011-11-11 15:19:27.000000000 -0500
66287+++ linux-3.1.4/kernel/rcutree.h 2011-11-16 18:39:08.000000000 -0500
66288@@ -86,7 +86,7 @@
66289 struct rcu_dynticks {
66290 int dynticks_nesting; /* Track irq/process nesting level. */
66291 int dynticks_nmi_nesting; /* Track NMI nesting level. */
66292- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
66293+ atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
66294 };
66295
66296 /* RCU's kthread states for tracing. */
66297diff -urNp linux-3.1.4/kernel/rcutree_plugin.h linux-3.1.4/kernel/rcutree_plugin.h
66298--- linux-3.1.4/kernel/rcutree_plugin.h 2011-11-11 15:19:27.000000000 -0500
66299+++ linux-3.1.4/kernel/rcutree_plugin.h 2011-12-02 17:38:47.000000000 -0500
66300@@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
66301
66302 /* Clean up and exit. */
66303 smp_mb(); /* ensure expedited GP seen before counter increment. */
66304- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
66305+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
66306 unlock_mb_ret:
66307 mutex_unlock(&sync_rcu_preempt_exp_mutex);
66308 mb_ret:
66309@@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
66310
66311 #else /* #ifndef CONFIG_SMP */
66312
66313-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
66314-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
66315+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
66316+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
66317
66318 static int synchronize_sched_expedited_cpu_stop(void *data)
66319 {
66320@@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
66321 int firstsnap, s, snap, trycount = 0;
66322
66323 /* Note that atomic_inc_return() implies full memory barrier. */
66324- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
66325+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
66326 get_online_cpus();
66327
66328 /*
66329@@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
66330 }
66331
66332 /* Check to see if someone else did our work for us. */
66333- s = atomic_read(&sync_sched_expedited_done);
66334+ s = atomic_read_unchecked(&sync_sched_expedited_done);
66335 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
66336 smp_mb(); /* ensure test happens before caller kfree */
66337 return;
66338@@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
66339 * grace period works for us.
66340 */
66341 get_online_cpus();
66342- snap = atomic_read(&sync_sched_expedited_started) - 1;
66343+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
66344 smp_mb(); /* ensure read is before try_stop_cpus(). */
66345 }
66346
66347@@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
66348 * than we did beat us to the punch.
66349 */
66350 do {
66351- s = atomic_read(&sync_sched_expedited_done);
66352+ s = atomic_read_unchecked(&sync_sched_expedited_done);
66353 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
66354 smp_mb(); /* ensure test happens before caller kfree */
66355 break;
66356 }
66357- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
66358+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
66359
66360 put_online_cpus();
66361 }
66362@@ -1953,7 +1953,7 @@ int rcu_needs_cpu(int cpu)
66363 for_each_online_cpu(thatcpu) {
66364 if (thatcpu == cpu)
66365 continue;
66366- snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
66367+ snap = atomic_add_return_unchecked(0, &per_cpu(rcu_dynticks,
66368 thatcpu).dynticks);
66369 smp_mb(); /* Order sampling of snap with end of grace period. */
66370 if ((snap & 0x1) != 0) {
66371diff -urNp linux-3.1.4/kernel/rcutree_trace.c linux-3.1.4/kernel/rcutree_trace.c
66372--- linux-3.1.4/kernel/rcutree_trace.c 2011-11-11 15:19:27.000000000 -0500
66373+++ linux-3.1.4/kernel/rcutree_trace.c 2011-12-02 17:38:47.000000000 -0500
66374@@ -74,7 +74,7 @@ static void print_one_rcu_data(struct se
66375 rdp->qs_pending);
66376 #ifdef CONFIG_NO_HZ
66377 seq_printf(m, " dt=%d/%d/%d df=%lu",
66378- atomic_read(&rdp->dynticks->dynticks),
66379+ atomic_read_unchecked(&rdp->dynticks->dynticks),
66380 rdp->dynticks->dynticks_nesting,
66381 rdp->dynticks->dynticks_nmi_nesting,
66382 rdp->dynticks_fqs);
66383@@ -148,7 +148,7 @@ static void print_one_rcu_data_csv(struc
66384 rdp->qs_pending);
66385 #ifdef CONFIG_NO_HZ
66386 seq_printf(m, ",%d,%d,%d,%lu",
66387- atomic_read(&rdp->dynticks->dynticks),
66388+ atomic_read_unchecked(&rdp->dynticks->dynticks),
66389 rdp->dynticks->dynticks_nesting,
66390 rdp->dynticks->dynticks_nmi_nesting,
66391 rdp->dynticks_fqs);
66392diff -urNp linux-3.1.4/kernel/relay.c linux-3.1.4/kernel/relay.c
66393--- linux-3.1.4/kernel/relay.c 2011-11-11 15:19:27.000000000 -0500
66394+++ linux-3.1.4/kernel/relay.c 2011-11-16 18:40:44.000000000 -0500
66395@@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
66396 };
66397 ssize_t ret;
66398
66399+ pax_track_stack();
66400+
66401 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
66402 return 0;
66403 if (splice_grow_spd(pipe, &spd))
66404diff -urNp linux-3.1.4/kernel/resource.c linux-3.1.4/kernel/resource.c
66405--- linux-3.1.4/kernel/resource.c 2011-11-11 15:19:27.000000000 -0500
66406+++ linux-3.1.4/kernel/resource.c 2011-11-16 18:40:44.000000000 -0500
66407@@ -141,8 +141,18 @@ static const struct file_operations proc
66408
66409 static int __init ioresources_init(void)
66410 {
66411+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66412+#ifdef CONFIG_GRKERNSEC_PROC_USER
66413+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
66414+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
66415+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66416+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
66417+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
66418+#endif
66419+#else
66420 proc_create("ioports", 0, NULL, &proc_ioports_operations);
66421 proc_create("iomem", 0, NULL, &proc_iomem_operations);
66422+#endif
66423 return 0;
66424 }
66425 __initcall(ioresources_init);
66426diff -urNp linux-3.1.4/kernel/rtmutex-tester.c linux-3.1.4/kernel/rtmutex-tester.c
66427--- linux-3.1.4/kernel/rtmutex-tester.c 2011-11-11 15:19:27.000000000 -0500
66428+++ linux-3.1.4/kernel/rtmutex-tester.c 2011-11-16 18:39:08.000000000 -0500
66429@@ -20,7 +20,7 @@
66430 #define MAX_RT_TEST_MUTEXES 8
66431
66432 static spinlock_t rttest_lock;
66433-static atomic_t rttest_event;
66434+static atomic_unchecked_t rttest_event;
66435
66436 struct test_thread_data {
66437 int opcode;
66438@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
66439
66440 case RTTEST_LOCKCONT:
66441 td->mutexes[td->opdata] = 1;
66442- td->event = atomic_add_return(1, &rttest_event);
66443+ td->event = atomic_add_return_unchecked(1, &rttest_event);
66444 return 0;
66445
66446 case RTTEST_RESET:
66447@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
66448 return 0;
66449
66450 case RTTEST_RESETEVENT:
66451- atomic_set(&rttest_event, 0);
66452+ atomic_set_unchecked(&rttest_event, 0);
66453 return 0;
66454
66455 default:
66456@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
66457 return ret;
66458
66459 td->mutexes[id] = 1;
66460- td->event = atomic_add_return(1, &rttest_event);
66461+ td->event = atomic_add_return_unchecked(1, &rttest_event);
66462 rt_mutex_lock(&mutexes[id]);
66463- td->event = atomic_add_return(1, &rttest_event);
66464+ td->event = atomic_add_return_unchecked(1, &rttest_event);
66465 td->mutexes[id] = 4;
66466 return 0;
66467
66468@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
66469 return ret;
66470
66471 td->mutexes[id] = 1;
66472- td->event = atomic_add_return(1, &rttest_event);
66473+ td->event = atomic_add_return_unchecked(1, &rttest_event);
66474 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
66475- td->event = atomic_add_return(1, &rttest_event);
66476+ td->event = atomic_add_return_unchecked(1, &rttest_event);
66477 td->mutexes[id] = ret ? 0 : 4;
66478 return ret ? -EINTR : 0;
66479
66480@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
66481 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
66482 return ret;
66483
66484- td->event = atomic_add_return(1, &rttest_event);
66485+ td->event = atomic_add_return_unchecked(1, &rttest_event);
66486 rt_mutex_unlock(&mutexes[id]);
66487- td->event = atomic_add_return(1, &rttest_event);
66488+ td->event = atomic_add_return_unchecked(1, &rttest_event);
66489 td->mutexes[id] = 0;
66490 return 0;
66491
66492@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
66493 break;
66494
66495 td->mutexes[dat] = 2;
66496- td->event = atomic_add_return(1, &rttest_event);
66497+ td->event = atomic_add_return_unchecked(1, &rttest_event);
66498 break;
66499
66500 default:
66501@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
66502 return;
66503
66504 td->mutexes[dat] = 3;
66505- td->event = atomic_add_return(1, &rttest_event);
66506+ td->event = atomic_add_return_unchecked(1, &rttest_event);
66507 break;
66508
66509 case RTTEST_LOCKNOWAIT:
66510@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
66511 return;
66512
66513 td->mutexes[dat] = 1;
66514- td->event = atomic_add_return(1, &rttest_event);
66515+ td->event = atomic_add_return_unchecked(1, &rttest_event);
66516 return;
66517
66518 default:
66519diff -urNp linux-3.1.4/kernel/sched_autogroup.c linux-3.1.4/kernel/sched_autogroup.c
66520--- linux-3.1.4/kernel/sched_autogroup.c 2011-11-11 15:19:27.000000000 -0500
66521+++ linux-3.1.4/kernel/sched_autogroup.c 2011-11-16 18:39:08.000000000 -0500
66522@@ -7,7 +7,7 @@
66523
66524 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
66525 static struct autogroup autogroup_default;
66526-static atomic_t autogroup_seq_nr;
66527+static atomic_unchecked_t autogroup_seq_nr;
66528
66529 static void __init autogroup_init(struct task_struct *init_task)
66530 {
66531@@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
66532
66533 kref_init(&ag->kref);
66534 init_rwsem(&ag->lock);
66535- ag->id = atomic_inc_return(&autogroup_seq_nr);
66536+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
66537 ag->tg = tg;
66538 #ifdef CONFIG_RT_GROUP_SCHED
66539 /*
66540diff -urNp linux-3.1.4/kernel/sched.c linux-3.1.4/kernel/sched.c
66541--- linux-3.1.4/kernel/sched.c 2011-11-11 15:19:27.000000000 -0500
66542+++ linux-3.1.4/kernel/sched.c 2011-11-16 18:40:44.000000000 -0500
66543@@ -4264,6 +4264,8 @@ static void __sched __schedule(void)
66544 struct rq *rq;
66545 int cpu;
66546
66547+ pax_track_stack();
66548+
66549 need_resched:
66550 preempt_disable();
66551 cpu = smp_processor_id();
66552@@ -4950,6 +4952,8 @@ int can_nice(const struct task_struct *p
66553 /* convert nice value [19,-20] to rlimit style value [1,40] */
66554 int nice_rlim = 20 - nice;
66555
66556+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
66557+
66558 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
66559 capable(CAP_SYS_NICE));
66560 }
66561@@ -4983,7 +4987,8 @@ SYSCALL_DEFINE1(nice, int, increment)
66562 if (nice > 19)
66563 nice = 19;
66564
66565- if (increment < 0 && !can_nice(current, nice))
66566+ if (increment < 0 && (!can_nice(current, nice) ||
66567+ gr_handle_chroot_nice()))
66568 return -EPERM;
66569
66570 retval = security_task_setnice(current, nice);
66571@@ -5127,6 +5132,7 @@ recheck:
66572 unsigned long rlim_rtprio =
66573 task_rlimit(p, RLIMIT_RTPRIO);
66574
66575+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
66576 /* can't set/change the rt policy */
66577 if (policy != p->policy && !rlim_rtprio)
66578 return -EPERM;
66579diff -urNp linux-3.1.4/kernel/sched_fair.c linux-3.1.4/kernel/sched_fair.c
66580--- linux-3.1.4/kernel/sched_fair.c 2011-11-11 15:19:27.000000000 -0500
66581+++ linux-3.1.4/kernel/sched_fair.c 2011-11-16 18:39:08.000000000 -0500
66582@@ -4062,7 +4062,7 @@ static void nohz_idle_balance(int this_c
66583 * run_rebalance_domains is triggered when needed from the scheduler tick.
66584 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
66585 */
66586-static void run_rebalance_domains(struct softirq_action *h)
66587+static void run_rebalance_domains(void)
66588 {
66589 int this_cpu = smp_processor_id();
66590 struct rq *this_rq = cpu_rq(this_cpu);
66591diff -urNp linux-3.1.4/kernel/signal.c linux-3.1.4/kernel/signal.c
66592--- linux-3.1.4/kernel/signal.c 2011-11-11 15:19:27.000000000 -0500
66593+++ linux-3.1.4/kernel/signal.c 2011-11-16 19:30:04.000000000 -0500
66594@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
66595
66596 int print_fatal_signals __read_mostly;
66597
66598-static void __user *sig_handler(struct task_struct *t, int sig)
66599+static __sighandler_t sig_handler(struct task_struct *t, int sig)
66600 {
66601 return t->sighand->action[sig - 1].sa.sa_handler;
66602 }
66603
66604-static int sig_handler_ignored(void __user *handler, int sig)
66605+static int sig_handler_ignored(__sighandler_t handler, int sig)
66606 {
66607 /* Is it explicitly or implicitly ignored? */
66608 return handler == SIG_IGN ||
66609@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
66610 static int sig_task_ignored(struct task_struct *t, int sig,
66611 int from_ancestor_ns)
66612 {
66613- void __user *handler;
66614+ __sighandler_t handler;
66615
66616 handler = sig_handler(t, sig);
66617
66618@@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_st
66619 atomic_inc(&user->sigpending);
66620 rcu_read_unlock();
66621
66622+ if (!override_rlimit)
66623+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
66624+
66625 if (override_rlimit ||
66626 atomic_read(&user->sigpending) <=
66627 task_rlimit(t, RLIMIT_SIGPENDING)) {
66628@@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct
66629
66630 int unhandled_signal(struct task_struct *tsk, int sig)
66631 {
66632- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
66633+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
66634 if (is_global_init(tsk))
66635 return 1;
66636 if (handler != SIG_IGN && handler != SIG_DFL)
66637@@ -815,6 +818,13 @@ static int check_kill_permission(int sig
66638 }
66639 }
66640
66641+ /* allow glibc communication via tgkill to other threads in our
66642+ thread group */
66643+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
66644+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
66645+ && gr_handle_signal(t, sig))
66646+ return -EPERM;
66647+
66648 return security_task_kill(t, info, sig, 0);
66649 }
66650
66651@@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct si
66652 return send_signal(sig, info, p, 1);
66653 }
66654
66655-static int
66656+int
66657 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66658 {
66659 return send_signal(sig, info, t, 0);
66660@@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *
66661 unsigned long int flags;
66662 int ret, blocked, ignored;
66663 struct k_sigaction *action;
66664+ int is_unhandled = 0;
66665
66666 spin_lock_irqsave(&t->sighand->siglock, flags);
66667 action = &t->sighand->action[sig-1];
66668@@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *
66669 }
66670 if (action->sa.sa_handler == SIG_DFL)
66671 t->signal->flags &= ~SIGNAL_UNKILLABLE;
66672+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
66673+ is_unhandled = 1;
66674 ret = specific_send_sig_info(sig, info, t);
66675 spin_unlock_irqrestore(&t->sighand->siglock, flags);
66676
66677+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
66678+ normal operation */
66679+ if (is_unhandled) {
66680+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
66681+ gr_handle_crash(t, sig);
66682+ }
66683+
66684 return ret;
66685 }
66686
66687@@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct
66688 ret = check_kill_permission(sig, info, p);
66689 rcu_read_unlock();
66690
66691- if (!ret && sig)
66692+ if (!ret && sig) {
66693 ret = do_send_sig_info(sig, info, p, true);
66694+ if (!ret)
66695+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
66696+ }
66697
66698 return ret;
66699 }
66700@@ -1909,6 +1932,8 @@ static void ptrace_do_notify(int signr,
66701 {
66702 siginfo_t info;
66703
66704+ pax_track_stack();
66705+
66706 memset(&info, 0, sizeof info);
66707 info.si_signo = signr;
66708 info.si_code = exit_code;
66709@@ -2748,7 +2773,15 @@ do_send_specific(pid_t tgid, pid_t pid,
66710 int error = -ESRCH;
66711
66712 rcu_read_lock();
66713- p = find_task_by_vpid(pid);
66714+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
66715+ /* allow glibc communication via tgkill to other threads in our
66716+ thread group */
66717+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
66718+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
66719+ p = find_task_by_vpid_unrestricted(pid);
66720+ else
66721+#endif
66722+ p = find_task_by_vpid(pid);
66723 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
66724 error = check_kill_permission(sig, info, p);
66725 /*
66726diff -urNp linux-3.1.4/kernel/smp.c linux-3.1.4/kernel/smp.c
66727--- linux-3.1.4/kernel/smp.c 2011-11-11 15:19:27.000000000 -0500
66728+++ linux-3.1.4/kernel/smp.c 2011-11-16 18:39:08.000000000 -0500
66729@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t fu
66730 }
66731 EXPORT_SYMBOL(smp_call_function);
66732
66733-void ipi_call_lock(void)
66734+void ipi_call_lock(void) __acquires(call_function.lock)
66735 {
66736 raw_spin_lock(&call_function.lock);
66737 }
66738
66739-void ipi_call_unlock(void)
66740+void ipi_call_unlock(void) __releases(call_function.lock)
66741 {
66742 raw_spin_unlock(&call_function.lock);
66743 }
66744
66745-void ipi_call_lock_irq(void)
66746+void ipi_call_lock_irq(void) __acquires(call_function.lock)
66747 {
66748 raw_spin_lock_irq(&call_function.lock);
66749 }
66750
66751-void ipi_call_unlock_irq(void)
66752+void ipi_call_unlock_irq(void) __releases(call_function.lock)
66753 {
66754 raw_spin_unlock_irq(&call_function.lock);
66755 }
66756diff -urNp linux-3.1.4/kernel/softirq.c linux-3.1.4/kernel/softirq.c
66757--- linux-3.1.4/kernel/softirq.c 2011-11-11 15:19:27.000000000 -0500
66758+++ linux-3.1.4/kernel/softirq.c 2011-11-16 18:39:08.000000000 -0500
66759@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
66760
66761 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
66762
66763-char *softirq_to_name[NR_SOFTIRQS] = {
66764+const char * const softirq_to_name[NR_SOFTIRQS] = {
66765 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
66766 "TASKLET", "SCHED", "HRTIMER", "RCU"
66767 };
66768@@ -235,7 +235,7 @@ restart:
66769 kstat_incr_softirqs_this_cpu(vec_nr);
66770
66771 trace_softirq_entry(vec_nr);
66772- h->action(h);
66773+ h->action();
66774 trace_softirq_exit(vec_nr);
66775 if (unlikely(prev_count != preempt_count())) {
66776 printk(KERN_ERR "huh, entered softirq %u %s %p"
66777@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
66778 local_irq_restore(flags);
66779 }
66780
66781-void open_softirq(int nr, void (*action)(struct softirq_action *))
66782+void open_softirq(int nr, void (*action)(void))
66783 {
66784- softirq_vec[nr].action = action;
66785+ pax_open_kernel();
66786+ *(void **)&softirq_vec[nr].action = action;
66787+ pax_close_kernel();
66788 }
66789
66790 /*
66791@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct
66792
66793 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
66794
66795-static void tasklet_action(struct softirq_action *a)
66796+static void tasklet_action(void)
66797 {
66798 struct tasklet_struct *list;
66799
66800@@ -476,7 +478,7 @@ static void tasklet_action(struct softir
66801 }
66802 }
66803
66804-static void tasklet_hi_action(struct softirq_action *a)
66805+static void tasklet_hi_action(void)
66806 {
66807 struct tasklet_struct *list;
66808
66809diff -urNp linux-3.1.4/kernel/sys.c linux-3.1.4/kernel/sys.c
66810--- linux-3.1.4/kernel/sys.c 2011-11-11 15:19:27.000000000 -0500
66811+++ linux-3.1.4/kernel/sys.c 2011-11-16 18:40:44.000000000 -0500
66812@@ -157,6 +157,12 @@ static int set_one_prio(struct task_stru
66813 error = -EACCES;
66814 goto out;
66815 }
66816+
66817+ if (gr_handle_chroot_setpriority(p, niceval)) {
66818+ error = -EACCES;
66819+ goto out;
66820+ }
66821+
66822 no_nice = security_task_setnice(p, niceval);
66823 if (no_nice) {
66824 error = no_nice;
66825@@ -571,6 +577,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
66826 goto error;
66827 }
66828
66829+ if (gr_check_group_change(new->gid, new->egid, -1))
66830+ goto error;
66831+
66832 if (rgid != (gid_t) -1 ||
66833 (egid != (gid_t) -1 && egid != old->gid))
66834 new->sgid = new->egid;
66835@@ -600,6 +609,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
66836 old = current_cred();
66837
66838 retval = -EPERM;
66839+
66840+ if (gr_check_group_change(gid, gid, gid))
66841+ goto error;
66842+
66843 if (nsown_capable(CAP_SETGID))
66844 new->gid = new->egid = new->sgid = new->fsgid = gid;
66845 else if (gid == old->gid || gid == old->sgid)
66846@@ -687,6 +700,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
66847 goto error;
66848 }
66849
66850+ if (gr_check_user_change(new->uid, new->euid, -1))
66851+ goto error;
66852+
66853 if (new->uid != old->uid) {
66854 retval = set_user(new);
66855 if (retval < 0)
66856@@ -731,6 +747,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
66857 old = current_cred();
66858
66859 retval = -EPERM;
66860+
66861+ if (gr_check_crash_uid(uid))
66862+ goto error;
66863+ if (gr_check_user_change(uid, uid, uid))
66864+ goto error;
66865+
66866 if (nsown_capable(CAP_SETUID)) {
66867 new->suid = new->uid = uid;
66868 if (uid != old->uid) {
66869@@ -785,6 +807,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
66870 goto error;
66871 }
66872
66873+ if (gr_check_user_change(ruid, euid, -1))
66874+ goto error;
66875+
66876 if (ruid != (uid_t) -1) {
66877 new->uid = ruid;
66878 if (ruid != old->uid) {
66879@@ -849,6 +874,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
66880 goto error;
66881 }
66882
66883+ if (gr_check_group_change(rgid, egid, -1))
66884+ goto error;
66885+
66886 if (rgid != (gid_t) -1)
66887 new->gid = rgid;
66888 if (egid != (gid_t) -1)
66889@@ -895,6 +923,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66890 old = current_cred();
66891 old_fsuid = old->fsuid;
66892
66893+ if (gr_check_user_change(-1, -1, uid))
66894+ goto error;
66895+
66896 if (uid == old->uid || uid == old->euid ||
66897 uid == old->suid || uid == old->fsuid ||
66898 nsown_capable(CAP_SETUID)) {
66899@@ -905,6 +936,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66900 }
66901 }
66902
66903+error:
66904 abort_creds(new);
66905 return old_fsuid;
66906
66907@@ -931,12 +963,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
66908 if (gid == old->gid || gid == old->egid ||
66909 gid == old->sgid || gid == old->fsgid ||
66910 nsown_capable(CAP_SETGID)) {
66911+ if (gr_check_group_change(-1, -1, gid))
66912+ goto error;
66913+
66914 if (gid != old_fsgid) {
66915 new->fsgid = gid;
66916 goto change_okay;
66917 }
66918 }
66919
66920+error:
66921 abort_creds(new);
66922 return old_fsgid;
66923
66924@@ -1242,19 +1278,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_
66925 return -EFAULT;
66926
66927 down_read(&uts_sem);
66928- error = __copy_to_user(&name->sysname, &utsname()->sysname,
66929+ error = __copy_to_user(name->sysname, &utsname()->sysname,
66930 __OLD_UTS_LEN);
66931 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
66932- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
66933+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
66934 __OLD_UTS_LEN);
66935 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
66936- error |= __copy_to_user(&name->release, &utsname()->release,
66937+ error |= __copy_to_user(name->release, &utsname()->release,
66938 __OLD_UTS_LEN);
66939 error |= __put_user(0, name->release + __OLD_UTS_LEN);
66940- error |= __copy_to_user(&name->version, &utsname()->version,
66941+ error |= __copy_to_user(name->version, &utsname()->version,
66942 __OLD_UTS_LEN);
66943 error |= __put_user(0, name->version + __OLD_UTS_LEN);
66944- error |= __copy_to_user(&name->machine, &utsname()->machine,
66945+ error |= __copy_to_user(name->machine, &utsname()->machine,
66946 __OLD_UTS_LEN);
66947 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
66948 up_read(&uts_sem);
66949@@ -1717,7 +1753,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
66950 error = get_dumpable(me->mm);
66951 break;
66952 case PR_SET_DUMPABLE:
66953- if (arg2 < 0 || arg2 > 1) {
66954+ if (arg2 > 1) {
66955 error = -EINVAL;
66956 break;
66957 }
66958diff -urNp linux-3.1.4/kernel/sysctl_binary.c linux-3.1.4/kernel/sysctl_binary.c
66959--- linux-3.1.4/kernel/sysctl_binary.c 2011-11-11 15:19:27.000000000 -0500
66960+++ linux-3.1.4/kernel/sysctl_binary.c 2011-11-16 18:39:08.000000000 -0500
66961@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *f
66962 int i;
66963
66964 set_fs(KERNEL_DS);
66965- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66966+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66967 set_fs(old_fs);
66968 if (result < 0)
66969 goto out_kfree;
66970@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *f
66971 }
66972
66973 set_fs(KERNEL_DS);
66974- result = vfs_write(file, buffer, str - buffer, &pos);
66975+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66976 set_fs(old_fs);
66977 if (result < 0)
66978 goto out_kfree;
66979@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file
66980 int i;
66981
66982 set_fs(KERNEL_DS);
66983- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66984+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66985 set_fs(old_fs);
66986 if (result < 0)
66987 goto out_kfree;
66988@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file
66989 }
66990
66991 set_fs(KERNEL_DS);
66992- result = vfs_write(file, buffer, str - buffer, &pos);
66993+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66994 set_fs(old_fs);
66995 if (result < 0)
66996 goto out_kfree;
66997@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *fil
66998 int i;
66999
67000 set_fs(KERNEL_DS);
67001- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
67002+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
67003 set_fs(old_fs);
67004 if (result < 0)
67005 goto out;
67006@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struc
67007 __le16 dnaddr;
67008
67009 set_fs(KERNEL_DS);
67010- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
67011+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
67012 set_fs(old_fs);
67013 if (result < 0)
67014 goto out;
67015@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struc
67016 le16_to_cpu(dnaddr) & 0x3ff);
67017
67018 set_fs(KERNEL_DS);
67019- result = vfs_write(file, buf, len, &pos);
67020+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
67021 set_fs(old_fs);
67022 if (result < 0)
67023 goto out;
67024diff -urNp linux-3.1.4/kernel/sysctl.c linux-3.1.4/kernel/sysctl.c
67025--- linux-3.1.4/kernel/sysctl.c 2011-11-11 15:19:27.000000000 -0500
67026+++ linux-3.1.4/kernel/sysctl.c 2011-11-16 18:40:44.000000000 -0500
67027@@ -85,6 +85,13 @@
67028
67029
67030 #if defined(CONFIG_SYSCTL)
67031+#include <linux/grsecurity.h>
67032+#include <linux/grinternal.h>
67033+
67034+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
67035+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
67036+ const int op);
67037+extern int gr_handle_chroot_sysctl(const int op);
67038
67039 /* External variables not in a header file. */
67040 extern int sysctl_overcommit_memory;
67041@@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_tabl
67042 }
67043
67044 #endif
67045+extern struct ctl_table grsecurity_table[];
67046
67047 static struct ctl_table root_table[];
67048 static struct ctl_table_root sysctl_table_root;
67049@@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
67050 int sysctl_legacy_va_layout;
67051 #endif
67052
67053+#ifdef CONFIG_PAX_SOFTMODE
67054+static ctl_table pax_table[] = {
67055+ {
67056+ .procname = "softmode",
67057+ .data = &pax_softmode,
67058+ .maxlen = sizeof(unsigned int),
67059+ .mode = 0600,
67060+ .proc_handler = &proc_dointvec,
67061+ },
67062+
67063+ { }
67064+};
67065+#endif
67066+
67067 /* The default sysctl tables: */
67068
67069 static struct ctl_table root_table[] = {
67070@@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
67071 #endif
67072
67073 static struct ctl_table kern_table[] = {
67074+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
67075+ {
67076+ .procname = "grsecurity",
67077+ .mode = 0500,
67078+ .child = grsecurity_table,
67079+ },
67080+#endif
67081+
67082+#ifdef CONFIG_PAX_SOFTMODE
67083+ {
67084+ .procname = "pax",
67085+ .mode = 0500,
67086+ .child = pax_table,
67087+ },
67088+#endif
67089+
67090 {
67091 .procname = "sched_child_runs_first",
67092 .data = &sysctl_sched_child_runs_first,
67093@@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
67094 .data = &modprobe_path,
67095 .maxlen = KMOD_PATH_LEN,
67096 .mode = 0644,
67097- .proc_handler = proc_dostring,
67098+ .proc_handler = proc_dostring_modpriv,
67099 },
67100 {
67101 .procname = "modules_disabled",
67102@@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
67103 .extra1 = &zero,
67104 .extra2 = &one,
67105 },
67106+#endif
67107 {
67108 .procname = "kptr_restrict",
67109 .data = &kptr_restrict,
67110 .maxlen = sizeof(int),
67111 .mode = 0644,
67112 .proc_handler = proc_dmesg_restrict,
67113+#ifdef CONFIG_GRKERNSEC_HIDESYM
67114+ .extra1 = &two,
67115+#else
67116 .extra1 = &zero,
67117+#endif
67118 .extra2 = &two,
67119 },
67120-#endif
67121 {
67122 .procname = "ngroups_max",
67123 .data = &ngroups_max,
67124@@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
67125 .proc_handler = proc_dointvec_minmax,
67126 .extra1 = &zero,
67127 },
67128+ {
67129+ .procname = "heap_stack_gap",
67130+ .data = &sysctl_heap_stack_gap,
67131+ .maxlen = sizeof(sysctl_heap_stack_gap),
67132+ .mode = 0644,
67133+ .proc_handler = proc_doulongvec_minmax,
67134+ },
67135 #else
67136 {
67137 .procname = "nr_trim_pages",
67138@@ -1709,6 +1758,17 @@ static int test_perm(int mode, int op)
67139 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
67140 {
67141 int mode;
67142+ int error;
67143+
67144+ if (table->parent != NULL && table->parent->procname != NULL &&
67145+ table->procname != NULL &&
67146+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
67147+ return -EACCES;
67148+ if (gr_handle_chroot_sysctl(op))
67149+ return -EACCES;
67150+ error = gr_handle_sysctl(table, op);
67151+ if (error)
67152+ return error;
67153
67154 if (root->permissions)
67155 mode = root->permissions(root, current->nsproxy, table);
67156@@ -2113,6 +2173,16 @@ int proc_dostring(struct ctl_table *tabl
67157 buffer, lenp, ppos);
67158 }
67159
67160+int proc_dostring_modpriv(struct ctl_table *table, int write,
67161+ void __user *buffer, size_t *lenp, loff_t *ppos)
67162+{
67163+ if (write && !capable(CAP_SYS_MODULE))
67164+ return -EPERM;
67165+
67166+ return _proc_do_string(table->data, table->maxlen, write,
67167+ buffer, lenp, ppos);
67168+}
67169+
67170 static size_t proc_skip_spaces(char **buf)
67171 {
67172 size_t ret;
67173@@ -2218,6 +2288,8 @@ static int proc_put_long(void __user **b
67174 len = strlen(tmp);
67175 if (len > *size)
67176 len = *size;
67177+ if (len > sizeof(tmp))
67178+ len = sizeof(tmp);
67179 if (copy_to_user(*buf, tmp, len))
67180 return -EFAULT;
67181 *size -= len;
67182@@ -2534,8 +2606,11 @@ static int __do_proc_doulongvec_minmax(v
67183 *i = val;
67184 } else {
67185 val = convdiv * (*i) / convmul;
67186- if (!first)
67187+ if (!first) {
67188 err = proc_put_char(&buffer, &left, '\t');
67189+ if (err)
67190+ break;
67191+ }
67192 err = proc_put_long(&buffer, &left, val, false);
67193 if (err)
67194 break;
67195@@ -2930,6 +3005,12 @@ int proc_dostring(struct ctl_table *tabl
67196 return -ENOSYS;
67197 }
67198
67199+int proc_dostring_modpriv(struct ctl_table *table, int write,
67200+ void __user *buffer, size_t *lenp, loff_t *ppos)
67201+{
67202+ return -ENOSYS;
67203+}
67204+
67205 int proc_dointvec(struct ctl_table *table, int write,
67206 void __user *buffer, size_t *lenp, loff_t *ppos)
67207 {
67208@@ -2986,6 +3067,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
67209 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
67210 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
67211 EXPORT_SYMBOL(proc_dostring);
67212+EXPORT_SYMBOL(proc_dostring_modpriv);
67213 EXPORT_SYMBOL(proc_doulongvec_minmax);
67214 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
67215 EXPORT_SYMBOL(register_sysctl_table);
67216diff -urNp linux-3.1.4/kernel/sysctl_check.c linux-3.1.4/kernel/sysctl_check.c
67217--- linux-3.1.4/kernel/sysctl_check.c 2011-11-11 15:19:27.000000000 -0500
67218+++ linux-3.1.4/kernel/sysctl_check.c 2011-11-16 18:40:44.000000000 -0500
67219@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
67220 set_fail(&fail, table, "Directory with extra2");
67221 } else {
67222 if ((table->proc_handler == proc_dostring) ||
67223+ (table->proc_handler == proc_dostring_modpriv) ||
67224 (table->proc_handler == proc_dointvec) ||
67225 (table->proc_handler == proc_dointvec_minmax) ||
67226 (table->proc_handler == proc_dointvec_jiffies) ||
67227diff -urNp linux-3.1.4/kernel/taskstats.c linux-3.1.4/kernel/taskstats.c
67228--- linux-3.1.4/kernel/taskstats.c 2011-11-11 15:19:27.000000000 -0500
67229+++ linux-3.1.4/kernel/taskstats.c 2011-11-16 19:35:09.000000000 -0500
67230@@ -27,9 +27,12 @@
67231 #include <linux/cgroup.h>
67232 #include <linux/fs.h>
67233 #include <linux/file.h>
67234+#include <linux/grsecurity.h>
67235 #include <net/genetlink.h>
67236 #include <linux/atomic.h>
67237
67238+extern int gr_is_taskstats_denied(int pid);
67239+
67240 /*
67241 * Maximum length of a cpumask that can be specified in
67242 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
67243@@ -556,6 +559,9 @@ err:
67244
67245 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
67246 {
67247+ if (gr_is_taskstats_denied(current->pid))
67248+ return -EACCES;
67249+
67250 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
67251 return cmd_attr_register_cpumask(info);
67252 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
67253diff -urNp linux-3.1.4/kernel/time/alarmtimer.c linux-3.1.4/kernel/time/alarmtimer.c
67254--- linux-3.1.4/kernel/time/alarmtimer.c 2011-11-11 15:19:27.000000000 -0500
67255+++ linux-3.1.4/kernel/time/alarmtimer.c 2011-11-16 18:39:08.000000000 -0500
67256@@ -693,7 +693,7 @@ static int __init alarmtimer_init(void)
67257 {
67258 int error = 0;
67259 int i;
67260- struct k_clock alarm_clock = {
67261+ static struct k_clock alarm_clock = {
67262 .clock_getres = alarm_clock_getres,
67263 .clock_get = alarm_clock_get,
67264 .timer_create = alarm_timer_create,
67265diff -urNp linux-3.1.4/kernel/time/tick-broadcast.c linux-3.1.4/kernel/time/tick-broadcast.c
67266--- linux-3.1.4/kernel/time/tick-broadcast.c 2011-11-11 15:19:27.000000000 -0500
67267+++ linux-3.1.4/kernel/time/tick-broadcast.c 2011-11-16 18:39:08.000000000 -0500
67268@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
67269 * then clear the broadcast bit.
67270 */
67271 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
67272- int cpu = smp_processor_id();
67273+ cpu = smp_processor_id();
67274
67275 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
67276 tick_broadcast_clear_oneshot(cpu);
67277diff -urNp linux-3.1.4/kernel/time/timekeeping.c linux-3.1.4/kernel/time/timekeeping.c
67278--- linux-3.1.4/kernel/time/timekeeping.c 2011-11-11 15:19:27.000000000 -0500
67279+++ linux-3.1.4/kernel/time/timekeeping.c 2011-11-16 18:40:44.000000000 -0500
67280@@ -14,6 +14,7 @@
67281 #include <linux/init.h>
67282 #include <linux/mm.h>
67283 #include <linux/sched.h>
67284+#include <linux/grsecurity.h>
67285 #include <linux/syscore_ops.h>
67286 #include <linux/clocksource.h>
67287 #include <linux/jiffies.h>
67288@@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
67289 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
67290 return -EINVAL;
67291
67292+ gr_log_timechange();
67293+
67294 write_seqlock_irqsave(&xtime_lock, flags);
67295
67296 timekeeping_forward_now();
67297diff -urNp linux-3.1.4/kernel/time/timer_list.c linux-3.1.4/kernel/time/timer_list.c
67298--- linux-3.1.4/kernel/time/timer_list.c 2011-11-11 15:19:27.000000000 -0500
67299+++ linux-3.1.4/kernel/time/timer_list.c 2011-11-16 18:40:44.000000000 -0500
67300@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
67301
67302 static void print_name_offset(struct seq_file *m, void *sym)
67303 {
67304+#ifdef CONFIG_GRKERNSEC_HIDESYM
67305+ SEQ_printf(m, "<%p>", NULL);
67306+#else
67307 char symname[KSYM_NAME_LEN];
67308
67309 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
67310 SEQ_printf(m, "<%pK>", sym);
67311 else
67312 SEQ_printf(m, "%s", symname);
67313+#endif
67314 }
67315
67316 static void
67317@@ -112,7 +116,11 @@ next_one:
67318 static void
67319 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
67320 {
67321+#ifdef CONFIG_GRKERNSEC_HIDESYM
67322+ SEQ_printf(m, " .base: %p\n", NULL);
67323+#else
67324 SEQ_printf(m, " .base: %pK\n", base);
67325+#endif
67326 SEQ_printf(m, " .index: %d\n",
67327 base->index);
67328 SEQ_printf(m, " .resolution: %Lu nsecs\n",
67329@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
67330 {
67331 struct proc_dir_entry *pe;
67332
67333+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67334+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
67335+#else
67336 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
67337+#endif
67338 if (!pe)
67339 return -ENOMEM;
67340 return 0;
67341diff -urNp linux-3.1.4/kernel/time/timer_stats.c linux-3.1.4/kernel/time/timer_stats.c
67342--- linux-3.1.4/kernel/time/timer_stats.c 2011-11-11 15:19:27.000000000 -0500
67343+++ linux-3.1.4/kernel/time/timer_stats.c 2011-11-16 18:40:44.000000000 -0500
67344@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
67345 static unsigned long nr_entries;
67346 static struct entry entries[MAX_ENTRIES];
67347
67348-static atomic_t overflow_count;
67349+static atomic_unchecked_t overflow_count;
67350
67351 /*
67352 * The entries are in a hash-table, for fast lookup:
67353@@ -140,7 +140,7 @@ static void reset_entries(void)
67354 nr_entries = 0;
67355 memset(entries, 0, sizeof(entries));
67356 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
67357- atomic_set(&overflow_count, 0);
67358+ atomic_set_unchecked(&overflow_count, 0);
67359 }
67360
67361 static struct entry *alloc_entry(void)
67362@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
67363 if (likely(entry))
67364 entry->count++;
67365 else
67366- atomic_inc(&overflow_count);
67367+ atomic_inc_unchecked(&overflow_count);
67368
67369 out_unlock:
67370 raw_spin_unlock_irqrestore(lock, flags);
67371@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
67372
67373 static void print_name_offset(struct seq_file *m, unsigned long addr)
67374 {
67375+#ifdef CONFIG_GRKERNSEC_HIDESYM
67376+ seq_printf(m, "<%p>", NULL);
67377+#else
67378 char symname[KSYM_NAME_LEN];
67379
67380 if (lookup_symbol_name(addr, symname) < 0)
67381 seq_printf(m, "<%p>", (void *)addr);
67382 else
67383 seq_printf(m, "%s", symname);
67384+#endif
67385 }
67386
67387 static int tstats_show(struct seq_file *m, void *v)
67388@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
67389
67390 seq_puts(m, "Timer Stats Version: v0.2\n");
67391 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
67392- if (atomic_read(&overflow_count))
67393+ if (atomic_read_unchecked(&overflow_count))
67394 seq_printf(m, "Overflow: %d entries\n",
67395- atomic_read(&overflow_count));
67396+ atomic_read_unchecked(&overflow_count));
67397
67398 for (i = 0; i < nr_entries; i++) {
67399 entry = entries + i;
67400@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
67401 {
67402 struct proc_dir_entry *pe;
67403
67404+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67405+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
67406+#else
67407 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
67408+#endif
67409 if (!pe)
67410 return -ENOMEM;
67411 return 0;
67412diff -urNp linux-3.1.4/kernel/time.c linux-3.1.4/kernel/time.c
67413--- linux-3.1.4/kernel/time.c 2011-11-11 15:19:27.000000000 -0500
67414+++ linux-3.1.4/kernel/time.c 2011-11-16 18:40:44.000000000 -0500
67415@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
67416 return error;
67417
67418 if (tz) {
67419+ /* we log in do_settimeofday called below, so don't log twice
67420+ */
67421+ if (!tv)
67422+ gr_log_timechange();
67423+
67424 /* SMP safe, global irq locking makes it work. */
67425 sys_tz = *tz;
67426 update_vsyscall_tz();
67427diff -urNp linux-3.1.4/kernel/timer.c linux-3.1.4/kernel/timer.c
67428--- linux-3.1.4/kernel/timer.c 2011-11-11 15:19:27.000000000 -0500
67429+++ linux-3.1.4/kernel/timer.c 2011-11-16 18:39:08.000000000 -0500
67430@@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
67431 /*
67432 * This function runs timers and the timer-tq in bottom half context.
67433 */
67434-static void run_timer_softirq(struct softirq_action *h)
67435+static void run_timer_softirq(void)
67436 {
67437 struct tvec_base *base = __this_cpu_read(tvec_bases);
67438
67439diff -urNp linux-3.1.4/kernel/trace/blktrace.c linux-3.1.4/kernel/trace/blktrace.c
67440--- linux-3.1.4/kernel/trace/blktrace.c 2011-11-11 15:19:27.000000000 -0500
67441+++ linux-3.1.4/kernel/trace/blktrace.c 2011-11-16 18:39:08.000000000 -0500
67442@@ -323,7 +323,7 @@ static ssize_t blk_dropped_read(struct f
67443 struct blk_trace *bt = filp->private_data;
67444 char buf[16];
67445
67446- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
67447+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
67448
67449 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
67450 }
67451@@ -388,7 +388,7 @@ static int blk_subbuf_start_callback(str
67452 return 1;
67453
67454 bt = buf->chan->private_data;
67455- atomic_inc(&bt->dropped);
67456+ atomic_inc_unchecked(&bt->dropped);
67457 return 0;
67458 }
67459
67460@@ -489,7 +489,7 @@ int do_blk_trace_setup(struct request_qu
67461
67462 bt->dir = dir;
67463 bt->dev = dev;
67464- atomic_set(&bt->dropped, 0);
67465+ atomic_set_unchecked(&bt->dropped, 0);
67466
67467 ret = -EIO;
67468 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
67469diff -urNp linux-3.1.4/kernel/trace/ftrace.c linux-3.1.4/kernel/trace/ftrace.c
67470--- linux-3.1.4/kernel/trace/ftrace.c 2011-11-11 15:19:27.000000000 -0500
67471+++ linux-3.1.4/kernel/trace/ftrace.c 2011-11-16 18:39:08.000000000 -0500
67472@@ -1585,12 +1585,17 @@ ftrace_code_disable(struct module *mod,
67473 if (unlikely(ftrace_disabled))
67474 return 0;
67475
67476+ ret = ftrace_arch_code_modify_prepare();
67477+ FTRACE_WARN_ON(ret);
67478+ if (ret)
67479+ return 0;
67480+
67481 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
67482+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
67483 if (ret) {
67484 ftrace_bug(ret, ip);
67485- return 0;
67486 }
67487- return 1;
67488+ return ret ? 0 : 1;
67489 }
67490
67491 /*
67492@@ -2607,7 +2612,7 @@ static void ftrace_free_entry_rcu(struct
67493
67494 int
67495 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
67496- void *data)
67497+ void *data)
67498 {
67499 struct ftrace_func_probe *entry;
67500 struct ftrace_page *pg;
67501diff -urNp linux-3.1.4/kernel/trace/trace.c linux-3.1.4/kernel/trace/trace.c
67502--- linux-3.1.4/kernel/trace/trace.c 2011-11-11 15:19:27.000000000 -0500
67503+++ linux-3.1.4/kernel/trace/trace.c 2011-11-16 18:40:44.000000000 -0500
67504@@ -3451,6 +3451,8 @@ static ssize_t tracing_splice_read_pipe(
67505 size_t rem;
67506 unsigned int i;
67507
67508+ pax_track_stack();
67509+
67510 if (splice_grow_spd(pipe, &spd))
67511 return -ENOMEM;
67512
67513@@ -3926,6 +3928,8 @@ tracing_buffers_splice_read(struct file
67514 int entries, size, i;
67515 size_t ret;
67516
67517+ pax_track_stack();
67518+
67519 if (splice_grow_spd(pipe, &spd))
67520 return -ENOMEM;
67521
67522@@ -4093,10 +4097,9 @@ static const struct file_operations trac
67523 };
67524 #endif
67525
67526-static struct dentry *d_tracer;
67527-
67528 struct dentry *tracing_init_dentry(void)
67529 {
67530+ static struct dentry *d_tracer;
67531 static int once;
67532
67533 if (d_tracer)
67534@@ -4116,10 +4119,9 @@ struct dentry *tracing_init_dentry(void)
67535 return d_tracer;
67536 }
67537
67538-static struct dentry *d_percpu;
67539-
67540 struct dentry *tracing_dentry_percpu(void)
67541 {
67542+ static struct dentry *d_percpu;
67543 static int once;
67544 struct dentry *d_tracer;
67545
67546diff -urNp linux-3.1.4/kernel/trace/trace_events.c linux-3.1.4/kernel/trace/trace_events.c
67547--- linux-3.1.4/kernel/trace/trace_events.c 2011-11-11 15:19:27.000000000 -0500
67548+++ linux-3.1.4/kernel/trace/trace_events.c 2011-11-16 18:39:08.000000000 -0500
67549@@ -1300,10 +1300,6 @@ static LIST_HEAD(ftrace_module_file_list
67550 struct ftrace_module_file_ops {
67551 struct list_head list;
67552 struct module *mod;
67553- struct file_operations id;
67554- struct file_operations enable;
67555- struct file_operations format;
67556- struct file_operations filter;
67557 };
67558
67559 static struct ftrace_module_file_ops *
67560@@ -1324,17 +1320,12 @@ trace_create_file_ops(struct module *mod
67561
67562 file_ops->mod = mod;
67563
67564- file_ops->id = ftrace_event_id_fops;
67565- file_ops->id.owner = mod;
67566-
67567- file_ops->enable = ftrace_enable_fops;
67568- file_ops->enable.owner = mod;
67569-
67570- file_ops->filter = ftrace_event_filter_fops;
67571- file_ops->filter.owner = mod;
67572-
67573- file_ops->format = ftrace_event_format_fops;
67574- file_ops->format.owner = mod;
67575+ pax_open_kernel();
67576+ *(void **)&mod->trace_id.owner = mod;
67577+ *(void **)&mod->trace_enable.owner = mod;
67578+ *(void **)&mod->trace_filter.owner = mod;
67579+ *(void **)&mod->trace_format.owner = mod;
67580+ pax_close_kernel();
67581
67582 list_add(&file_ops->list, &ftrace_module_file_list);
67583
67584@@ -1358,8 +1349,8 @@ static void trace_module_add_events(stru
67585
67586 for_each_event(call, start, end) {
67587 __trace_add_event_call(*call, mod,
67588- &file_ops->id, &file_ops->enable,
67589- &file_ops->filter, &file_ops->format);
67590+ &mod->trace_id, &mod->trace_enable,
67591+ &mod->trace_filter, &mod->trace_format);
67592 }
67593 }
67594
67595diff -urNp linux-3.1.4/kernel/trace/trace_kprobe.c linux-3.1.4/kernel/trace/trace_kprobe.c
67596--- linux-3.1.4/kernel/trace/trace_kprobe.c 2011-11-11 15:19:27.000000000 -0500
67597+++ linux-3.1.4/kernel/trace/trace_kprobe.c 2011-11-16 18:39:08.000000000 -0500
67598@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(me
67599 long ret;
67600 int maxlen = get_rloc_len(*(u32 *)dest);
67601 u8 *dst = get_rloc_data(dest);
67602- u8 *src = addr;
67603+ const u8 __user *src = (const u8 __force_user *)addr;
67604 mm_segment_t old_fs = get_fs();
67605 if (!maxlen)
67606 return;
67607@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(me
67608 pagefault_disable();
67609 do
67610 ret = __copy_from_user_inatomic(dst++, src++, 1);
67611- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
67612+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
67613 dst[-1] = '\0';
67614 pagefault_enable();
67615 set_fs(old_fs);
67616@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(me
67617 ((u8 *)get_rloc_data(dest))[0] = '\0';
67618 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
67619 } else
67620- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
67621+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
67622 get_rloc_offs(*(u32 *)dest));
67623 }
67624 /* Return the length of string -- including null terminal byte */
67625@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(me
67626 set_fs(KERNEL_DS);
67627 pagefault_disable();
67628 do {
67629- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
67630+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
67631 len++;
67632 } while (c && ret == 0 && len < MAX_STRING_SIZE);
67633 pagefault_enable();
67634diff -urNp linux-3.1.4/kernel/trace/trace_mmiotrace.c linux-3.1.4/kernel/trace/trace_mmiotrace.c
67635--- linux-3.1.4/kernel/trace/trace_mmiotrace.c 2011-11-11 15:19:27.000000000 -0500
67636+++ linux-3.1.4/kernel/trace/trace_mmiotrace.c 2011-11-16 18:39:08.000000000 -0500
67637@@ -24,7 +24,7 @@ struct header_iter {
67638 static struct trace_array *mmio_trace_array;
67639 static bool overrun_detected;
67640 static unsigned long prev_overruns;
67641-static atomic_t dropped_count;
67642+static atomic_unchecked_t dropped_count;
67643
67644 static void mmio_reset_data(struct trace_array *tr)
67645 {
67646@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
67647
67648 static unsigned long count_overruns(struct trace_iterator *iter)
67649 {
67650- unsigned long cnt = atomic_xchg(&dropped_count, 0);
67651+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
67652 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
67653
67654 if (over > prev_overruns)
67655@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
67656 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
67657 sizeof(*entry), 0, pc);
67658 if (!event) {
67659- atomic_inc(&dropped_count);
67660+ atomic_inc_unchecked(&dropped_count);
67661 return;
67662 }
67663 entry = ring_buffer_event_data(event);
67664@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
67665 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
67666 sizeof(*entry), 0, pc);
67667 if (!event) {
67668- atomic_inc(&dropped_count);
67669+ atomic_inc_unchecked(&dropped_count);
67670 return;
67671 }
67672 entry = ring_buffer_event_data(event);
67673diff -urNp linux-3.1.4/kernel/trace/trace_output.c linux-3.1.4/kernel/trace/trace_output.c
67674--- linux-3.1.4/kernel/trace/trace_output.c 2011-11-11 15:19:27.000000000 -0500
67675+++ linux-3.1.4/kernel/trace/trace_output.c 2011-11-16 18:39:08.000000000 -0500
67676@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
67677
67678 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
67679 if (!IS_ERR(p)) {
67680- p = mangle_path(s->buffer + s->len, p, "\n");
67681+ p = mangle_path(s->buffer + s->len, p, "\n\\");
67682 if (p) {
67683 s->len = p - s->buffer;
67684 return 1;
67685diff -urNp linux-3.1.4/kernel/trace/trace_stack.c linux-3.1.4/kernel/trace/trace_stack.c
67686--- linux-3.1.4/kernel/trace/trace_stack.c 2011-11-11 15:19:27.000000000 -0500
67687+++ linux-3.1.4/kernel/trace/trace_stack.c 2011-11-16 18:39:08.000000000 -0500
67688@@ -50,7 +50,7 @@ static inline void check_stack(void)
67689 return;
67690
67691 /* we do not handle interrupt stacks yet */
67692- if (!object_is_on_stack(&this_size))
67693+ if (!object_starts_on_stack(&this_size))
67694 return;
67695
67696 local_irq_save(flags);
67697diff -urNp linux-3.1.4/kernel/trace/trace_workqueue.c linux-3.1.4/kernel/trace/trace_workqueue.c
67698--- linux-3.1.4/kernel/trace/trace_workqueue.c 2011-11-11 15:19:27.000000000 -0500
67699+++ linux-3.1.4/kernel/trace/trace_workqueue.c 2011-11-16 18:39:08.000000000 -0500
67700@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
67701 int cpu;
67702 pid_t pid;
67703 /* Can be inserted from interrupt or user context, need to be atomic */
67704- atomic_t inserted;
67705+ atomic_unchecked_t inserted;
67706 /*
67707 * Don't need to be atomic, works are serialized in a single workqueue thread
67708 * on a single CPU.
67709@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
67710 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
67711 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
67712 if (node->pid == wq_thread->pid) {
67713- atomic_inc(&node->inserted);
67714+ atomic_inc_unchecked(&node->inserted);
67715 goto found;
67716 }
67717 }
67718@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
67719 tsk = get_pid_task(pid, PIDTYPE_PID);
67720 if (tsk) {
67721 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
67722- atomic_read(&cws->inserted), cws->executed,
67723+ atomic_read_unchecked(&cws->inserted), cws->executed,
67724 tsk->comm);
67725 put_task_struct(tsk);
67726 }
67727diff -urNp linux-3.1.4/lib/bitmap.c linux-3.1.4/lib/bitmap.c
67728--- linux-3.1.4/lib/bitmap.c 2011-11-11 15:19:27.000000000 -0500
67729+++ linux-3.1.4/lib/bitmap.c 2011-11-16 18:39:08.000000000 -0500
67730@@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsi
67731 {
67732 int c, old_c, totaldigits, ndigits, nchunks, nbits;
67733 u32 chunk;
67734- const char __user *ubuf = buf;
67735+ const char __user *ubuf = (const char __force_user *)buf;
67736
67737 bitmap_zero(maskp, nmaskbits);
67738
67739@@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user
67740 {
67741 if (!access_ok(VERIFY_READ, ubuf, ulen))
67742 return -EFAULT;
67743- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
67744+ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
67745 }
67746 EXPORT_SYMBOL(bitmap_parse_user);
67747
67748@@ -594,7 +594,7 @@ static int __bitmap_parselist(const char
67749 {
67750 unsigned a, b;
67751 int c, old_c, totaldigits;
67752- const char __user *ubuf = buf;
67753+ const char __user *ubuf = (const char __force_user *)buf;
67754 int exp_digit, in_range;
67755
67756 totaldigits = c = 0;
67757@@ -694,7 +694,7 @@ int bitmap_parselist_user(const char __u
67758 {
67759 if (!access_ok(VERIFY_READ, ubuf, ulen))
67760 return -EFAULT;
67761- return __bitmap_parselist((const char *)ubuf,
67762+ return __bitmap_parselist((const char __force_kernel *)ubuf,
67763 ulen, 1, maskp, nmaskbits);
67764 }
67765 EXPORT_SYMBOL(bitmap_parselist_user);
67766diff -urNp linux-3.1.4/lib/bug.c linux-3.1.4/lib/bug.c
67767--- linux-3.1.4/lib/bug.c 2011-11-11 15:19:27.000000000 -0500
67768+++ linux-3.1.4/lib/bug.c 2011-11-16 18:39:08.000000000 -0500
67769@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
67770 return BUG_TRAP_TYPE_NONE;
67771
67772 bug = find_bug(bugaddr);
67773+ if (!bug)
67774+ return BUG_TRAP_TYPE_NONE;
67775
67776 file = NULL;
67777 line = 0;
67778diff -urNp linux-3.1.4/lib/debugobjects.c linux-3.1.4/lib/debugobjects.c
67779--- linux-3.1.4/lib/debugobjects.c 2011-11-11 15:19:27.000000000 -0500
67780+++ linux-3.1.4/lib/debugobjects.c 2011-11-16 18:39:08.000000000 -0500
67781@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
67782 if (limit > 4)
67783 return;
67784
67785- is_on_stack = object_is_on_stack(addr);
67786+ is_on_stack = object_starts_on_stack(addr);
67787 if (is_on_stack == onstack)
67788 return;
67789
67790diff -urNp linux-3.1.4/lib/devres.c linux-3.1.4/lib/devres.c
67791--- linux-3.1.4/lib/devres.c 2011-11-11 15:19:27.000000000 -0500
67792+++ linux-3.1.4/lib/devres.c 2011-11-16 18:39:08.000000000 -0500
67793@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
67794 void devm_iounmap(struct device *dev, void __iomem *addr)
67795 {
67796 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
67797- (void *)addr));
67798+ (void __force *)addr));
67799 iounmap(addr);
67800 }
67801 EXPORT_SYMBOL(devm_iounmap);
67802@@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *de
67803 {
67804 ioport_unmap(addr);
67805 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
67806- devm_ioport_map_match, (void *)addr));
67807+ devm_ioport_map_match, (void __force *)addr));
67808 }
67809 EXPORT_SYMBOL(devm_ioport_unmap);
67810
67811diff -urNp linux-3.1.4/lib/dma-debug.c linux-3.1.4/lib/dma-debug.c
67812--- linux-3.1.4/lib/dma-debug.c 2011-11-11 15:19:27.000000000 -0500
67813+++ linux-3.1.4/lib/dma-debug.c 2011-11-16 18:39:08.000000000 -0500
67814@@ -870,7 +870,7 @@ out:
67815
67816 static void check_for_stack(struct device *dev, void *addr)
67817 {
67818- if (object_is_on_stack(addr))
67819+ if (object_starts_on_stack(addr))
67820 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
67821 "stack [addr=%p]\n", addr);
67822 }
67823diff -urNp linux-3.1.4/lib/extable.c linux-3.1.4/lib/extable.c
67824--- linux-3.1.4/lib/extable.c 2011-11-11 15:19:27.000000000 -0500
67825+++ linux-3.1.4/lib/extable.c 2011-11-16 18:39:08.000000000 -0500
67826@@ -13,6 +13,7 @@
67827 #include <linux/init.h>
67828 #include <linux/sort.h>
67829 #include <asm/uaccess.h>
67830+#include <asm/pgtable.h>
67831
67832 #ifndef ARCH_HAS_SORT_EXTABLE
67833 /*
67834@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const v
67835 void sort_extable(struct exception_table_entry *start,
67836 struct exception_table_entry *finish)
67837 {
67838+ pax_open_kernel();
67839 sort(start, finish - start, sizeof(struct exception_table_entry),
67840 cmp_ex, NULL);
67841+ pax_close_kernel();
67842 }
67843
67844 #ifdef CONFIG_MODULES
67845diff -urNp linux-3.1.4/lib/inflate.c linux-3.1.4/lib/inflate.c
67846--- linux-3.1.4/lib/inflate.c 2011-11-11 15:19:27.000000000 -0500
67847+++ linux-3.1.4/lib/inflate.c 2011-11-16 18:39:08.000000000 -0500
67848@@ -269,7 +269,7 @@ static void free(void *where)
67849 malloc_ptr = free_mem_ptr;
67850 }
67851 #else
67852-#define malloc(a) kmalloc(a, GFP_KERNEL)
67853+#define malloc(a) kmalloc((a), GFP_KERNEL)
67854 #define free(a) kfree(a)
67855 #endif
67856
67857diff -urNp linux-3.1.4/lib/Kconfig.debug linux-3.1.4/lib/Kconfig.debug
67858--- linux-3.1.4/lib/Kconfig.debug 2011-11-11 15:19:27.000000000 -0500
67859+++ linux-3.1.4/lib/Kconfig.debug 2011-11-16 18:40:44.000000000 -0500
67860@@ -1091,6 +1091,7 @@ config LATENCYTOP
67861 depends on DEBUG_KERNEL
67862 depends on STACKTRACE_SUPPORT
67863 depends on PROC_FS
67864+ depends on !GRKERNSEC_HIDESYM
67865 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
67866 select KALLSYMS
67867 select KALLSYMS_ALL
67868diff -urNp linux-3.1.4/lib/kref.c linux-3.1.4/lib/kref.c
67869--- linux-3.1.4/lib/kref.c 2011-11-11 15:19:27.000000000 -0500
67870+++ linux-3.1.4/lib/kref.c 2011-11-16 18:39:08.000000000 -0500
67871@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
67872 */
67873 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
67874 {
67875- WARN_ON(release == NULL);
67876+ BUG_ON(release == NULL);
67877 WARN_ON(release == (void (*)(struct kref *))kfree);
67878
67879 if (atomic_dec_and_test(&kref->refcount)) {
67880diff -urNp linux-3.1.4/lib/radix-tree.c linux-3.1.4/lib/radix-tree.c
67881--- linux-3.1.4/lib/radix-tree.c 2011-11-11 15:19:27.000000000 -0500
67882+++ linux-3.1.4/lib/radix-tree.c 2011-11-16 18:39:08.000000000 -0500
67883@@ -80,7 +80,7 @@ struct radix_tree_preload {
67884 int nr;
67885 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
67886 };
67887-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
67888+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
67889
67890 static inline void *ptr_to_indirect(void *ptr)
67891 {
67892diff -urNp linux-3.1.4/lib/vsprintf.c linux-3.1.4/lib/vsprintf.c
67893--- linux-3.1.4/lib/vsprintf.c 2011-11-11 15:19:27.000000000 -0500
67894+++ linux-3.1.4/lib/vsprintf.c 2011-11-16 18:40:44.000000000 -0500
67895@@ -16,6 +16,9 @@
67896 * - scnprintf and vscnprintf
67897 */
67898
67899+#ifdef CONFIG_GRKERNSEC_HIDESYM
67900+#define __INCLUDED_BY_HIDESYM 1
67901+#endif
67902 #include <stdarg.h>
67903 #include <linux/module.h>
67904 #include <linux/types.h>
67905@@ -432,7 +435,7 @@ char *symbol_string(char *buf, char *end
67906 char sym[KSYM_SYMBOL_LEN];
67907 if (ext == 'B')
67908 sprint_backtrace(sym, value);
67909- else if (ext != 'f' && ext != 's')
67910+ else if (ext != 'f' && ext != 's' && ext != 'a')
67911 sprint_symbol(sym, value);
67912 else
67913 kallsyms_lookup(value, NULL, NULL, NULL, sym);
67914@@ -796,7 +799,11 @@ char *uuid_string(char *buf, char *end,
67915 return string(buf, end, uuid, spec);
67916 }
67917
67918+#ifdef CONFIG_GRKERNSEC_HIDESYM
67919+int kptr_restrict __read_mostly = 2;
67920+#else
67921 int kptr_restrict __read_mostly;
67922+#endif
67923
67924 /*
67925 * Show a '%p' thing. A kernel extension is that the '%p' is followed
67926@@ -810,6 +817,8 @@ int kptr_restrict __read_mostly;
67927 * - 'S' For symbolic direct pointers with offset
67928 * - 's' For symbolic direct pointers without offset
67929 * - 'B' For backtraced symbolic direct pointers with offset
67930+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
67931+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
67932 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
67933 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
67934 * - 'M' For a 6-byte MAC address, it prints the address in the
67935@@ -854,12 +863,12 @@ char *pointer(const char *fmt, char *buf
67936 {
67937 if (!ptr && *fmt != 'K') {
67938 /*
67939- * Print (null) with the same width as a pointer so it makes
67940+ * Print (nil) with the same width as a pointer so it makes
67941 * tabular output look nice.
67942 */
67943 if (spec.field_width == -1)
67944 spec.field_width = 2 * sizeof(void *);
67945- return string(buf, end, "(null)", spec);
67946+ return string(buf, end, "(nil)", spec);
67947 }
67948
67949 switch (*fmt) {
67950@@ -869,6 +878,13 @@ char *pointer(const char *fmt, char *buf
67951 /* Fallthrough */
67952 case 'S':
67953 case 's':
67954+#ifdef CONFIG_GRKERNSEC_HIDESYM
67955+ break;
67956+#else
67957+ return symbol_string(buf, end, ptr, spec, *fmt);
67958+#endif
67959+ case 'A':
67960+ case 'a':
67961 case 'B':
67962 return symbol_string(buf, end, ptr, spec, *fmt);
67963 case 'R':
67964@@ -1627,11 +1643,11 @@ int bstr_printf(char *buf, size_t size,
67965 typeof(type) value; \
67966 if (sizeof(type) == 8) { \
67967 args = PTR_ALIGN(args, sizeof(u32)); \
67968- *(u32 *)&value = *(u32 *)args; \
67969- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
67970+ *(u32 *)&value = *(const u32 *)args; \
67971+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
67972 } else { \
67973 args = PTR_ALIGN(args, sizeof(type)); \
67974- value = *(typeof(type) *)args; \
67975+ value = *(const typeof(type) *)args; \
67976 } \
67977 args += sizeof(type); \
67978 value; \
67979@@ -1694,7 +1710,7 @@ int bstr_printf(char *buf, size_t size,
67980 case FORMAT_TYPE_STR: {
67981 const char *str_arg = args;
67982 args += strlen(str_arg) + 1;
67983- str = string(str, end, (char *)str_arg, spec);
67984+ str = string(str, end, str_arg, spec);
67985 break;
67986 }
67987
67988diff -urNp linux-3.1.4/localversion-grsec linux-3.1.4/localversion-grsec
67989--- linux-3.1.4/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
67990+++ linux-3.1.4/localversion-grsec 2011-11-16 18:40:44.000000000 -0500
67991@@ -0,0 +1 @@
67992+-grsec
67993diff -urNp linux-3.1.4/Makefile linux-3.1.4/Makefile
67994--- linux-3.1.4/Makefile 2011-11-29 18:06:31.000000000 -0500
67995+++ linux-3.1.4/Makefile 2011-12-02 17:38:47.000000000 -0500
67996@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
67997
67998 HOSTCC = gcc
67999 HOSTCXX = g++
68000-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
68001-HOSTCXXFLAGS = -O2
68002+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
68003+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
68004+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
68005
68006 # Decide whether to build built-in, modular, or both.
68007 # Normally, just do built-in.
68008@@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
68009 # Rules shared between *config targets and build targets
68010
68011 # Basic helpers built in scripts/
68012-PHONY += scripts_basic
68013-scripts_basic:
68014+PHONY += scripts_basic gcc-plugins
68015+scripts_basic: gcc-plugins
68016 $(Q)$(MAKE) $(build)=scripts/basic
68017 $(Q)rm -f .tmp_quiet_recordmcount
68018
68019@@ -564,6 +565,42 @@ else
68020 KBUILD_CFLAGS += -O2
68021 endif
68022
68023+ifndef DISABLE_PAX_PLUGINS
68024+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
68025+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
68026+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
68027+endif
68028+ifdef CONFIG_PAX_MEMORY_STACKLEAK
68029+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
68030+STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
68031+endif
68032+ifdef CONFIG_KALLOCSTAT_PLUGIN
68033+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
68034+endif
68035+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
68036+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
68037+KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD)
68038+endif
68039+ifdef CONFIG_CHECKER_PLUGIN
68040+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
68041+CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
68042+endif
68043+endif
68044+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
68045+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
68046+gcc-plugins:
68047+ $(Q)$(MAKE) $(build)=tools/gcc
68048+else
68049+gcc-plugins:
68050+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
68051+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
68052+else
68053+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
68054+endif
68055+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
68056+endif
68057+endif
68058+
68059 include $(srctree)/arch/$(SRCARCH)/Makefile
68060
68061 ifneq ($(CONFIG_FRAME_WARN),0)
68062@@ -708,7 +745,7 @@ export mod_strip_cmd
68063
68064
68065 ifeq ($(KBUILD_EXTMOD),)
68066-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
68067+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
68068
68069 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
68070 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
68071@@ -932,6 +969,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-mai
68072
68073 # The actual objects are generated when descending,
68074 # make sure no implicit rule kicks in
68075+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
68076 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
68077
68078 # Handle descending into subdirectories listed in $(vmlinux-dirs)
68079@@ -941,7 +979,7 @@ $(sort $(vmlinux-init) $(vmlinux-main))
68080 # Error messages still appears in the original language
68081
68082 PHONY += $(vmlinux-dirs)
68083-$(vmlinux-dirs): prepare scripts
68084+$(vmlinux-dirs): gcc-plugins prepare scripts
68085 $(Q)$(MAKE) $(build)=$@
68086
68087 # Store (new) KERNELRELASE string in include/config/kernel.release
68088@@ -986,6 +1024,7 @@ prepare0: archprepare FORCE
68089 $(Q)$(MAKE) $(build)=. missing-syscalls
68090
68091 # All the preparing..
68092+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
68093 prepare: prepare0
68094
68095 # Generate some files
68096@@ -1087,6 +1126,7 @@ all: modules
68097 # using awk while concatenating to the final file.
68098
68099 PHONY += modules
68100+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
68101 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
68102 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
68103 @$(kecho) ' Building modules, stage 2.';
68104@@ -1102,7 +1142,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modu
68105
68106 # Target to prepare building external modules
68107 PHONY += modules_prepare
68108-modules_prepare: prepare scripts
68109+modules_prepare: gcc-plugins prepare scripts
68110
68111 # Target to install modules
68112 PHONY += modules_install
68113@@ -1198,7 +1238,7 @@ distclean: mrproper
68114 @find $(srctree) $(RCS_FIND_IGNORE) \
68115 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
68116 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
68117- -o -name '.*.rej' -o -size 0 \
68118+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
68119 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
68120 -type f -print | xargs rm -f
68121
68122@@ -1360,6 +1400,7 @@ PHONY += $(module-dirs) modules
68123 $(module-dirs): crmodverdir $(objtree)/Module.symvers
68124 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
68125
68126+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
68127 modules: $(module-dirs)
68128 @$(kecho) ' Building modules, stage 2.';
68129 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
68130@@ -1486,17 +1527,19 @@ else
68131 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
68132 endif
68133
68134-%.s: %.c prepare scripts FORCE
68135+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
68136+%.s: %.c gcc-plugins prepare scripts FORCE
68137 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
68138 %.i: %.c prepare scripts FORCE
68139 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
68140-%.o: %.c prepare scripts FORCE
68141+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
68142+%.o: %.c gcc-plugins prepare scripts FORCE
68143 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
68144 %.lst: %.c prepare scripts FORCE
68145 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
68146-%.s: %.S prepare scripts FORCE
68147+%.s: %.S gcc-plugins prepare scripts FORCE
68148 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
68149-%.o: %.S prepare scripts FORCE
68150+%.o: %.S gcc-plugins prepare scripts FORCE
68151 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
68152 %.symtypes: %.c prepare scripts FORCE
68153 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
68154@@ -1506,11 +1549,13 @@ endif
68155 $(cmd_crmodverdir)
68156 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
68157 $(build)=$(build-dir)
68158-%/: prepare scripts FORCE
68159+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
68160+%/: gcc-plugins prepare scripts FORCE
68161 $(cmd_crmodverdir)
68162 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
68163 $(build)=$(build-dir)
68164-%.ko: prepare scripts FORCE
68165+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
68166+%.ko: gcc-plugins prepare scripts FORCE
68167 $(cmd_crmodverdir)
68168 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
68169 $(build)=$(build-dir) $(@:.ko=.o)
68170diff -urNp linux-3.1.4/mm/filemap.c linux-3.1.4/mm/filemap.c
68171--- linux-3.1.4/mm/filemap.c 2011-11-11 15:19:27.000000000 -0500
68172+++ linux-3.1.4/mm/filemap.c 2011-11-16 18:40:44.000000000 -0500
68173@@ -1784,7 +1784,7 @@ int generic_file_mmap(struct file * file
68174 struct address_space *mapping = file->f_mapping;
68175
68176 if (!mapping->a_ops->readpage)
68177- return -ENOEXEC;
68178+ return -ENODEV;
68179 file_accessed(file);
68180 vma->vm_ops = &generic_file_vm_ops;
68181 vma->vm_flags |= VM_CAN_NONLINEAR;
68182@@ -2190,6 +2190,7 @@ inline int generic_write_checks(struct f
68183 *pos = i_size_read(inode);
68184
68185 if (limit != RLIM_INFINITY) {
68186+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
68187 if (*pos >= limit) {
68188 send_sig(SIGXFSZ, current, 0);
68189 return -EFBIG;
68190diff -urNp linux-3.1.4/mm/fremap.c linux-3.1.4/mm/fremap.c
68191--- linux-3.1.4/mm/fremap.c 2011-11-11 15:19:27.000000000 -0500
68192+++ linux-3.1.4/mm/fremap.c 2011-11-16 18:39:08.000000000 -0500
68193@@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
68194 retry:
68195 vma = find_vma(mm, start);
68196
68197+#ifdef CONFIG_PAX_SEGMEXEC
68198+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
68199+ goto out;
68200+#endif
68201+
68202 /*
68203 * Make sure the vma is shared, that it supports prefaulting,
68204 * and that the remapped range is valid and fully within
68205diff -urNp linux-3.1.4/mm/highmem.c linux-3.1.4/mm/highmem.c
68206--- linux-3.1.4/mm/highmem.c 2011-11-11 15:19:27.000000000 -0500
68207+++ linux-3.1.4/mm/highmem.c 2011-11-16 18:39:08.000000000 -0500
68208@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
68209 * So no dangers, even with speculative execution.
68210 */
68211 page = pte_page(pkmap_page_table[i]);
68212+ pax_open_kernel();
68213 pte_clear(&init_mm, (unsigned long)page_address(page),
68214 &pkmap_page_table[i]);
68215-
68216+ pax_close_kernel();
68217 set_page_address(page, NULL);
68218 need_flush = 1;
68219 }
68220@@ -186,9 +187,11 @@ start:
68221 }
68222 }
68223 vaddr = PKMAP_ADDR(last_pkmap_nr);
68224+
68225+ pax_open_kernel();
68226 set_pte_at(&init_mm, vaddr,
68227 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
68228-
68229+ pax_close_kernel();
68230 pkmap_count[last_pkmap_nr] = 1;
68231 set_page_address(page, (void *)vaddr);
68232
68233diff -urNp linux-3.1.4/mm/huge_memory.c linux-3.1.4/mm/huge_memory.c
68234--- linux-3.1.4/mm/huge_memory.c 2011-11-11 15:19:27.000000000 -0500
68235+++ linux-3.1.4/mm/huge_memory.c 2011-12-02 17:38:47.000000000 -0500
68236@@ -702,7 +702,7 @@ out:
68237 * run pte_offset_map on the pmd, if an huge pmd could
68238 * materialize from under us from a different thread.
68239 */
68240- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
68241+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
68242 return VM_FAULT_OOM;
68243 /* if an huge pmd materialized from under us just retry later */
68244 if (unlikely(pmd_trans_huge(*pmd)))
68245@@ -829,7 +829,7 @@ static int do_huge_pmd_wp_page_fallback(
68246
68247 for (i = 0; i < HPAGE_PMD_NR; i++) {
68248 copy_user_highpage(pages[i], page + i,
68249- haddr + PAGE_SHIFT*i, vma);
68250+ haddr + PAGE_SIZE*i, vma);
68251 __SetPageUptodate(pages[i]);
68252 cond_resched();
68253 }
68254diff -urNp linux-3.1.4/mm/hugetlb.c linux-3.1.4/mm/hugetlb.c
68255--- linux-3.1.4/mm/hugetlb.c 2011-11-11 15:19:27.000000000 -0500
68256+++ linux-3.1.4/mm/hugetlb.c 2011-11-16 18:39:08.000000000 -0500
68257@@ -2346,6 +2346,27 @@ static int unmap_ref_private(struct mm_s
68258 return 1;
68259 }
68260
68261+#ifdef CONFIG_PAX_SEGMEXEC
68262+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
68263+{
68264+ struct mm_struct *mm = vma->vm_mm;
68265+ struct vm_area_struct *vma_m;
68266+ unsigned long address_m;
68267+ pte_t *ptep_m;
68268+
68269+ vma_m = pax_find_mirror_vma(vma);
68270+ if (!vma_m)
68271+ return;
68272+
68273+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68274+ address_m = address + SEGMEXEC_TASK_SIZE;
68275+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
68276+ get_page(page_m);
68277+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
68278+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
68279+}
68280+#endif
68281+
68282 /*
68283 * Hugetlb_cow() should be called with page lock of the original hugepage held.
68284 */
68285@@ -2447,6 +2468,11 @@ retry_avoidcopy:
68286 make_huge_pte(vma, new_page, 1));
68287 page_remove_rmap(old_page);
68288 hugepage_add_new_anon_rmap(new_page, vma, address);
68289+
68290+#ifdef CONFIG_PAX_SEGMEXEC
68291+ pax_mirror_huge_pte(vma, address, new_page);
68292+#endif
68293+
68294 /* Make the old page be freed below */
68295 new_page = old_page;
68296 mmu_notifier_invalidate_range_end(mm,
68297@@ -2598,6 +2624,10 @@ retry:
68298 && (vma->vm_flags & VM_SHARED)));
68299 set_huge_pte_at(mm, address, ptep, new_pte);
68300
68301+#ifdef CONFIG_PAX_SEGMEXEC
68302+ pax_mirror_huge_pte(vma, address, page);
68303+#endif
68304+
68305 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
68306 /* Optimization, do the COW without a second fault */
68307 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
68308@@ -2627,6 +2657,10 @@ int hugetlb_fault(struct mm_struct *mm,
68309 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
68310 struct hstate *h = hstate_vma(vma);
68311
68312+#ifdef CONFIG_PAX_SEGMEXEC
68313+ struct vm_area_struct *vma_m;
68314+#endif
68315+
68316 ptep = huge_pte_offset(mm, address);
68317 if (ptep) {
68318 entry = huge_ptep_get(ptep);
68319@@ -2638,6 +2672,26 @@ int hugetlb_fault(struct mm_struct *mm,
68320 VM_FAULT_SET_HINDEX(h - hstates);
68321 }
68322
68323+#ifdef CONFIG_PAX_SEGMEXEC
68324+ vma_m = pax_find_mirror_vma(vma);
68325+ if (vma_m) {
68326+ unsigned long address_m;
68327+
68328+ if (vma->vm_start > vma_m->vm_start) {
68329+ address_m = address;
68330+ address -= SEGMEXEC_TASK_SIZE;
68331+ vma = vma_m;
68332+ h = hstate_vma(vma);
68333+ } else
68334+ address_m = address + SEGMEXEC_TASK_SIZE;
68335+
68336+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
68337+ return VM_FAULT_OOM;
68338+ address_m &= HPAGE_MASK;
68339+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
68340+ }
68341+#endif
68342+
68343 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
68344 if (!ptep)
68345 return VM_FAULT_OOM;
68346diff -urNp linux-3.1.4/mm/internal.h linux-3.1.4/mm/internal.h
68347--- linux-3.1.4/mm/internal.h 2011-11-11 15:19:27.000000000 -0500
68348+++ linux-3.1.4/mm/internal.h 2011-11-16 18:39:08.000000000 -0500
68349@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page
68350 * in mm/page_alloc.c
68351 */
68352 extern void __free_pages_bootmem(struct page *page, unsigned int order);
68353+extern void free_compound_page(struct page *page);
68354 extern void prep_compound_page(struct page *page, unsigned long order);
68355 #ifdef CONFIG_MEMORY_FAILURE
68356 extern bool is_free_buddy_page(struct page *page);
68357diff -urNp linux-3.1.4/mm/Kconfig linux-3.1.4/mm/Kconfig
68358--- linux-3.1.4/mm/Kconfig 2011-11-11 15:19:27.000000000 -0500
68359+++ linux-3.1.4/mm/Kconfig 2011-11-17 18:57:00.000000000 -0500
68360@@ -238,10 +238,10 @@ config KSM
68361 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
68362
68363 config DEFAULT_MMAP_MIN_ADDR
68364- int "Low address space to protect from user allocation"
68365+ int "Low address space to protect from user allocation"
68366 depends on MMU
68367- default 4096
68368- help
68369+ default 65536
68370+ help
68371 This is the portion of low virtual memory which should be protected
68372 from userspace allocation. Keeping a user from writing to low pages
68373 can help reduce the impact of kernel NULL pointer bugs.
68374diff -urNp linux-3.1.4/mm/kmemleak.c linux-3.1.4/mm/kmemleak.c
68375--- linux-3.1.4/mm/kmemleak.c 2011-11-11 15:19:27.000000000 -0500
68376+++ linux-3.1.4/mm/kmemleak.c 2011-11-16 18:40:44.000000000 -0500
68377@@ -357,7 +357,7 @@ static void print_unreferenced(struct se
68378
68379 for (i = 0; i < object->trace_len; i++) {
68380 void *ptr = (void *)object->trace[i];
68381- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
68382+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
68383 }
68384 }
68385
68386diff -urNp linux-3.1.4/mm/maccess.c linux-3.1.4/mm/maccess.c
68387--- linux-3.1.4/mm/maccess.c 2011-11-11 15:19:27.000000000 -0500
68388+++ linux-3.1.4/mm/maccess.c 2011-11-16 18:39:08.000000000 -0500
68389@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, cons
68390 set_fs(KERNEL_DS);
68391 pagefault_disable();
68392 ret = __copy_from_user_inatomic(dst,
68393- (__force const void __user *)src, size);
68394+ (const void __force_user *)src, size);
68395 pagefault_enable();
68396 set_fs(old_fs);
68397
68398@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, con
68399
68400 set_fs(KERNEL_DS);
68401 pagefault_disable();
68402- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
68403+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
68404 pagefault_enable();
68405 set_fs(old_fs);
68406
68407diff -urNp linux-3.1.4/mm/madvise.c linux-3.1.4/mm/madvise.c
68408--- linux-3.1.4/mm/madvise.c 2011-11-11 15:19:27.000000000 -0500
68409+++ linux-3.1.4/mm/madvise.c 2011-11-16 18:39:08.000000000 -0500
68410@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
68411 pgoff_t pgoff;
68412 unsigned long new_flags = vma->vm_flags;
68413
68414+#ifdef CONFIG_PAX_SEGMEXEC
68415+ struct vm_area_struct *vma_m;
68416+#endif
68417+
68418 switch (behavior) {
68419 case MADV_NORMAL:
68420 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
68421@@ -110,6 +114,13 @@ success:
68422 /*
68423 * vm_flags is protected by the mmap_sem held in write mode.
68424 */
68425+
68426+#ifdef CONFIG_PAX_SEGMEXEC
68427+ vma_m = pax_find_mirror_vma(vma);
68428+ if (vma_m)
68429+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
68430+#endif
68431+
68432 vma->vm_flags = new_flags;
68433
68434 out:
68435@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
68436 struct vm_area_struct ** prev,
68437 unsigned long start, unsigned long end)
68438 {
68439+
68440+#ifdef CONFIG_PAX_SEGMEXEC
68441+ struct vm_area_struct *vma_m;
68442+#endif
68443+
68444 *prev = vma;
68445 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
68446 return -EINVAL;
68447@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
68448 zap_page_range(vma, start, end - start, &details);
68449 } else
68450 zap_page_range(vma, start, end - start, NULL);
68451+
68452+#ifdef CONFIG_PAX_SEGMEXEC
68453+ vma_m = pax_find_mirror_vma(vma);
68454+ if (vma_m) {
68455+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
68456+ struct zap_details details = {
68457+ .nonlinear_vma = vma_m,
68458+ .last_index = ULONG_MAX,
68459+ };
68460+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
68461+ } else
68462+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
68463+ }
68464+#endif
68465+
68466 return 0;
68467 }
68468
68469@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
68470 if (end < start)
68471 goto out;
68472
68473+#ifdef CONFIG_PAX_SEGMEXEC
68474+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
68475+ if (end > SEGMEXEC_TASK_SIZE)
68476+ goto out;
68477+ } else
68478+#endif
68479+
68480+ if (end > TASK_SIZE)
68481+ goto out;
68482+
68483 error = 0;
68484 if (end == start)
68485 goto out;
68486diff -urNp linux-3.1.4/mm/memory.c linux-3.1.4/mm/memory.c
68487--- linux-3.1.4/mm/memory.c 2011-11-11 15:19:27.000000000 -0500
68488+++ linux-3.1.4/mm/memory.c 2011-11-16 18:39:08.000000000 -0500
68489@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct
68490 return;
68491
68492 pmd = pmd_offset(pud, start);
68493+
68494+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
68495 pud_clear(pud);
68496 pmd_free_tlb(tlb, pmd, start);
68497+#endif
68498+
68499 }
68500
68501 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
68502@@ -489,9 +493,12 @@ static inline void free_pud_range(struct
68503 if (end - 1 > ceiling - 1)
68504 return;
68505
68506+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
68507 pud = pud_offset(pgd, start);
68508 pgd_clear(pgd);
68509 pud_free_tlb(tlb, pud, start);
68510+#endif
68511+
68512 }
68513
68514 /*
68515@@ -1566,12 +1573,6 @@ no_page_table:
68516 return page;
68517 }
68518
68519-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
68520-{
68521- return stack_guard_page_start(vma, addr) ||
68522- stack_guard_page_end(vma, addr+PAGE_SIZE);
68523-}
68524-
68525 /**
68526 * __get_user_pages() - pin user pages in memory
68527 * @tsk: task_struct of target task
68528@@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct
68529 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
68530 i = 0;
68531
68532- do {
68533+ while (nr_pages) {
68534 struct vm_area_struct *vma;
68535
68536- vma = find_extend_vma(mm, start);
68537+ vma = find_vma(mm, start);
68538 if (!vma && in_gate_area(mm, start)) {
68539 unsigned long pg = start & PAGE_MASK;
68540 pgd_t *pgd;
68541@@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct
68542 goto next_page;
68543 }
68544
68545- if (!vma ||
68546+ if (!vma || start < vma->vm_start ||
68547 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
68548 !(vm_flags & vma->vm_flags))
68549 return i ? : -EFAULT;
68550@@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct
68551 int ret;
68552 unsigned int fault_flags = 0;
68553
68554- /* For mlock, just skip the stack guard page. */
68555- if (foll_flags & FOLL_MLOCK) {
68556- if (stack_guard_page(vma, start))
68557- goto next_page;
68558- }
68559 if (foll_flags & FOLL_WRITE)
68560 fault_flags |= FAULT_FLAG_WRITE;
68561 if (nonblocking)
68562@@ -1800,7 +1796,7 @@ next_page:
68563 start += PAGE_SIZE;
68564 nr_pages--;
68565 } while (nr_pages && start < vma->vm_end);
68566- } while (nr_pages);
68567+ }
68568 return i;
68569 }
68570 EXPORT_SYMBOL(__get_user_pages);
68571@@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_st
68572 page_add_file_rmap(page);
68573 set_pte_at(mm, addr, pte, mk_pte(page, prot));
68574
68575+#ifdef CONFIG_PAX_SEGMEXEC
68576+ pax_mirror_file_pte(vma, addr, page, ptl);
68577+#endif
68578+
68579 retval = 0;
68580 pte_unmap_unlock(pte, ptl);
68581 return retval;
68582@@ -2041,10 +2041,22 @@ out:
68583 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
68584 struct page *page)
68585 {
68586+
68587+#ifdef CONFIG_PAX_SEGMEXEC
68588+ struct vm_area_struct *vma_m;
68589+#endif
68590+
68591 if (addr < vma->vm_start || addr >= vma->vm_end)
68592 return -EFAULT;
68593 if (!page_count(page))
68594 return -EINVAL;
68595+
68596+#ifdef CONFIG_PAX_SEGMEXEC
68597+ vma_m = pax_find_mirror_vma(vma);
68598+ if (vma_m)
68599+ vma_m->vm_flags |= VM_INSERTPAGE;
68600+#endif
68601+
68602 vma->vm_flags |= VM_INSERTPAGE;
68603 return insert_page(vma, addr, page, vma->vm_page_prot);
68604 }
68605@@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struc
68606 unsigned long pfn)
68607 {
68608 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
68609+ BUG_ON(vma->vm_mirror);
68610
68611 if (addr < vma->vm_start || addr >= vma->vm_end)
68612 return -EFAULT;
68613@@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct
68614 copy_user_highpage(dst, src, va, vma);
68615 }
68616
68617+#ifdef CONFIG_PAX_SEGMEXEC
68618+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
68619+{
68620+ struct mm_struct *mm = vma->vm_mm;
68621+ spinlock_t *ptl;
68622+ pte_t *pte, entry;
68623+
68624+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
68625+ entry = *pte;
68626+ if (!pte_present(entry)) {
68627+ if (!pte_none(entry)) {
68628+ BUG_ON(pte_file(entry));
68629+ free_swap_and_cache(pte_to_swp_entry(entry));
68630+ pte_clear_not_present_full(mm, address, pte, 0);
68631+ }
68632+ } else {
68633+ struct page *page;
68634+
68635+ flush_cache_page(vma, address, pte_pfn(entry));
68636+ entry = ptep_clear_flush(vma, address, pte);
68637+ BUG_ON(pte_dirty(entry));
68638+ page = vm_normal_page(vma, address, entry);
68639+ if (page) {
68640+ update_hiwater_rss(mm);
68641+ if (PageAnon(page))
68642+ dec_mm_counter_fast(mm, MM_ANONPAGES);
68643+ else
68644+ dec_mm_counter_fast(mm, MM_FILEPAGES);
68645+ page_remove_rmap(page);
68646+ page_cache_release(page);
68647+ }
68648+ }
68649+ pte_unmap_unlock(pte, ptl);
68650+}
68651+
68652+/* PaX: if vma is mirrored, synchronize the mirror's PTE
68653+ *
68654+ * the ptl of the lower mapped page is held on entry and is not released on exit
68655+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
68656+ */
68657+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68658+{
68659+ struct mm_struct *mm = vma->vm_mm;
68660+ unsigned long address_m;
68661+ spinlock_t *ptl_m;
68662+ struct vm_area_struct *vma_m;
68663+ pmd_t *pmd_m;
68664+ pte_t *pte_m, entry_m;
68665+
68666+ BUG_ON(!page_m || !PageAnon(page_m));
68667+
68668+ vma_m = pax_find_mirror_vma(vma);
68669+ if (!vma_m)
68670+ return;
68671+
68672+ BUG_ON(!PageLocked(page_m));
68673+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68674+ address_m = address + SEGMEXEC_TASK_SIZE;
68675+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68676+ pte_m = pte_offset_map(pmd_m, address_m);
68677+ ptl_m = pte_lockptr(mm, pmd_m);
68678+ if (ptl != ptl_m) {
68679+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68680+ if (!pte_none(*pte_m))
68681+ goto out;
68682+ }
68683+
68684+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68685+ page_cache_get(page_m);
68686+ page_add_anon_rmap(page_m, vma_m, address_m);
68687+ inc_mm_counter_fast(mm, MM_ANONPAGES);
68688+ set_pte_at(mm, address_m, pte_m, entry_m);
68689+ update_mmu_cache(vma_m, address_m, entry_m);
68690+out:
68691+ if (ptl != ptl_m)
68692+ spin_unlock(ptl_m);
68693+ pte_unmap(pte_m);
68694+ unlock_page(page_m);
68695+}
68696+
68697+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68698+{
68699+ struct mm_struct *mm = vma->vm_mm;
68700+ unsigned long address_m;
68701+ spinlock_t *ptl_m;
68702+ struct vm_area_struct *vma_m;
68703+ pmd_t *pmd_m;
68704+ pte_t *pte_m, entry_m;
68705+
68706+ BUG_ON(!page_m || PageAnon(page_m));
68707+
68708+ vma_m = pax_find_mirror_vma(vma);
68709+ if (!vma_m)
68710+ return;
68711+
68712+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68713+ address_m = address + SEGMEXEC_TASK_SIZE;
68714+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68715+ pte_m = pte_offset_map(pmd_m, address_m);
68716+ ptl_m = pte_lockptr(mm, pmd_m);
68717+ if (ptl != ptl_m) {
68718+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68719+ if (!pte_none(*pte_m))
68720+ goto out;
68721+ }
68722+
68723+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68724+ page_cache_get(page_m);
68725+ page_add_file_rmap(page_m);
68726+ inc_mm_counter_fast(mm, MM_FILEPAGES);
68727+ set_pte_at(mm, address_m, pte_m, entry_m);
68728+ update_mmu_cache(vma_m, address_m, entry_m);
68729+out:
68730+ if (ptl != ptl_m)
68731+ spin_unlock(ptl_m);
68732+ pte_unmap(pte_m);
68733+}
68734+
68735+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
68736+{
68737+ struct mm_struct *mm = vma->vm_mm;
68738+ unsigned long address_m;
68739+ spinlock_t *ptl_m;
68740+ struct vm_area_struct *vma_m;
68741+ pmd_t *pmd_m;
68742+ pte_t *pte_m, entry_m;
68743+
68744+ vma_m = pax_find_mirror_vma(vma);
68745+ if (!vma_m)
68746+ return;
68747+
68748+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68749+ address_m = address + SEGMEXEC_TASK_SIZE;
68750+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68751+ pte_m = pte_offset_map(pmd_m, address_m);
68752+ ptl_m = pte_lockptr(mm, pmd_m);
68753+ if (ptl != ptl_m) {
68754+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68755+ if (!pte_none(*pte_m))
68756+ goto out;
68757+ }
68758+
68759+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
68760+ set_pte_at(mm, address_m, pte_m, entry_m);
68761+out:
68762+ if (ptl != ptl_m)
68763+ spin_unlock(ptl_m);
68764+ pte_unmap(pte_m);
68765+}
68766+
68767+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
68768+{
68769+ struct page *page_m;
68770+ pte_t entry;
68771+
68772+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
68773+ goto out;
68774+
68775+ entry = *pte;
68776+ page_m = vm_normal_page(vma, address, entry);
68777+ if (!page_m)
68778+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
68779+ else if (PageAnon(page_m)) {
68780+ if (pax_find_mirror_vma(vma)) {
68781+ pte_unmap_unlock(pte, ptl);
68782+ lock_page(page_m);
68783+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
68784+ if (pte_same(entry, *pte))
68785+ pax_mirror_anon_pte(vma, address, page_m, ptl);
68786+ else
68787+ unlock_page(page_m);
68788+ }
68789+ } else
68790+ pax_mirror_file_pte(vma, address, page_m, ptl);
68791+
68792+out:
68793+ pte_unmap_unlock(pte, ptl);
68794+}
68795+#endif
68796+
68797 /*
68798 * This routine handles present pages, when users try to write
68799 * to a shared page. It is done by copying the page to a new address
68800@@ -2656,6 +2849,12 @@ gotten:
68801 */
68802 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68803 if (likely(pte_same(*page_table, orig_pte))) {
68804+
68805+#ifdef CONFIG_PAX_SEGMEXEC
68806+ if (pax_find_mirror_vma(vma))
68807+ BUG_ON(!trylock_page(new_page));
68808+#endif
68809+
68810 if (old_page) {
68811 if (!PageAnon(old_page)) {
68812 dec_mm_counter_fast(mm, MM_FILEPAGES);
68813@@ -2707,6 +2906,10 @@ gotten:
68814 page_remove_rmap(old_page);
68815 }
68816
68817+#ifdef CONFIG_PAX_SEGMEXEC
68818+ pax_mirror_anon_pte(vma, address, new_page, ptl);
68819+#endif
68820+
68821 /* Free the old page.. */
68822 new_page = old_page;
68823 ret |= VM_FAULT_WRITE;
68824@@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct
68825 swap_free(entry);
68826 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
68827 try_to_free_swap(page);
68828+
68829+#ifdef CONFIG_PAX_SEGMEXEC
68830+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
68831+#endif
68832+
68833 unlock_page(page);
68834 if (swapcache) {
68835 /*
68836@@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct
68837
68838 /* No need to invalidate - it was non-present before */
68839 update_mmu_cache(vma, address, page_table);
68840+
68841+#ifdef CONFIG_PAX_SEGMEXEC
68842+ pax_mirror_anon_pte(vma, address, page, ptl);
68843+#endif
68844+
68845 unlock:
68846 pte_unmap_unlock(page_table, ptl);
68847 out:
68848@@ -3028,40 +3241,6 @@ out_release:
68849 }
68850
68851 /*
68852- * This is like a special single-page "expand_{down|up}wards()",
68853- * except we must first make sure that 'address{-|+}PAGE_SIZE'
68854- * doesn't hit another vma.
68855- */
68856-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
68857-{
68858- address &= PAGE_MASK;
68859- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
68860- struct vm_area_struct *prev = vma->vm_prev;
68861-
68862- /*
68863- * Is there a mapping abutting this one below?
68864- *
68865- * That's only ok if it's the same stack mapping
68866- * that has gotten split..
68867- */
68868- if (prev && prev->vm_end == address)
68869- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
68870-
68871- expand_downwards(vma, address - PAGE_SIZE);
68872- }
68873- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
68874- struct vm_area_struct *next = vma->vm_next;
68875-
68876- /* As VM_GROWSDOWN but s/below/above/ */
68877- if (next && next->vm_start == address + PAGE_SIZE)
68878- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
68879-
68880- expand_upwards(vma, address + PAGE_SIZE);
68881- }
68882- return 0;
68883-}
68884-
68885-/*
68886 * We enter with non-exclusive mmap_sem (to exclude vma changes,
68887 * but allow concurrent faults), and pte mapped but not yet locked.
68888 * We return with mmap_sem still held, but pte unmapped and unlocked.
68889@@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_s
68890 unsigned long address, pte_t *page_table, pmd_t *pmd,
68891 unsigned int flags)
68892 {
68893- struct page *page;
68894+ struct page *page = NULL;
68895 spinlock_t *ptl;
68896 pte_t entry;
68897
68898- pte_unmap(page_table);
68899-
68900- /* Check if we need to add a guard page to the stack */
68901- if (check_stack_guard_page(vma, address) < 0)
68902- return VM_FAULT_SIGBUS;
68903-
68904- /* Use the zero-page for reads */
68905 if (!(flags & FAULT_FLAG_WRITE)) {
68906 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
68907 vma->vm_page_prot));
68908- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68909+ ptl = pte_lockptr(mm, pmd);
68910+ spin_lock(ptl);
68911 if (!pte_none(*page_table))
68912 goto unlock;
68913 goto setpte;
68914 }
68915
68916 /* Allocate our own private page. */
68917+ pte_unmap(page_table);
68918+
68919 if (unlikely(anon_vma_prepare(vma)))
68920 goto oom;
68921 page = alloc_zeroed_user_highpage_movable(vma, address);
68922@@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_s
68923 if (!pte_none(*page_table))
68924 goto release;
68925
68926+#ifdef CONFIG_PAX_SEGMEXEC
68927+ if (pax_find_mirror_vma(vma))
68928+ BUG_ON(!trylock_page(page));
68929+#endif
68930+
68931 inc_mm_counter_fast(mm, MM_ANONPAGES);
68932 page_add_new_anon_rmap(page, vma, address);
68933 setpte:
68934@@ -3116,6 +3296,12 @@ setpte:
68935
68936 /* No need to invalidate - it was non-present before */
68937 update_mmu_cache(vma, address, page_table);
68938+
68939+#ifdef CONFIG_PAX_SEGMEXEC
68940+ if (page)
68941+ pax_mirror_anon_pte(vma, address, page, ptl);
68942+#endif
68943+
68944 unlock:
68945 pte_unmap_unlock(page_table, ptl);
68946 return 0;
68947@@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *
68948 */
68949 /* Only go through if we didn't race with anybody else... */
68950 if (likely(pte_same(*page_table, orig_pte))) {
68951+
68952+#ifdef CONFIG_PAX_SEGMEXEC
68953+ if (anon && pax_find_mirror_vma(vma))
68954+ BUG_ON(!trylock_page(page));
68955+#endif
68956+
68957 flush_icache_page(vma, page);
68958 entry = mk_pte(page, vma->vm_page_prot);
68959 if (flags & FAULT_FLAG_WRITE)
68960@@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *
68961
68962 /* no need to invalidate: a not-present page won't be cached */
68963 update_mmu_cache(vma, address, page_table);
68964+
68965+#ifdef CONFIG_PAX_SEGMEXEC
68966+ if (anon)
68967+ pax_mirror_anon_pte(vma, address, page, ptl);
68968+ else
68969+ pax_mirror_file_pte(vma, address, page, ptl);
68970+#endif
68971+
68972 } else {
68973 if (cow_page)
68974 mem_cgroup_uncharge_page(cow_page);
68975@@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *m
68976 if (flags & FAULT_FLAG_WRITE)
68977 flush_tlb_fix_spurious_fault(vma, address);
68978 }
68979+
68980+#ifdef CONFIG_PAX_SEGMEXEC
68981+ pax_mirror_pte(vma, address, pte, pmd, ptl);
68982+ return 0;
68983+#endif
68984+
68985 unlock:
68986 pte_unmap_unlock(pte, ptl);
68987 return 0;
68988@@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm
68989 pmd_t *pmd;
68990 pte_t *pte;
68991
68992+#ifdef CONFIG_PAX_SEGMEXEC
68993+ struct vm_area_struct *vma_m;
68994+#endif
68995+
68996 __set_current_state(TASK_RUNNING);
68997
68998 count_vm_event(PGFAULT);
68999@@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm
69000 if (unlikely(is_vm_hugetlb_page(vma)))
69001 return hugetlb_fault(mm, vma, address, flags);
69002
69003+#ifdef CONFIG_PAX_SEGMEXEC
69004+ vma_m = pax_find_mirror_vma(vma);
69005+ if (vma_m) {
69006+ unsigned long address_m;
69007+ pgd_t *pgd_m;
69008+ pud_t *pud_m;
69009+ pmd_t *pmd_m;
69010+
69011+ if (vma->vm_start > vma_m->vm_start) {
69012+ address_m = address;
69013+ address -= SEGMEXEC_TASK_SIZE;
69014+ vma = vma_m;
69015+ } else
69016+ address_m = address + SEGMEXEC_TASK_SIZE;
69017+
69018+ pgd_m = pgd_offset(mm, address_m);
69019+ pud_m = pud_alloc(mm, pgd_m, address_m);
69020+ if (!pud_m)
69021+ return VM_FAULT_OOM;
69022+ pmd_m = pmd_alloc(mm, pud_m, address_m);
69023+ if (!pmd_m)
69024+ return VM_FAULT_OOM;
69025+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
69026+ return VM_FAULT_OOM;
69027+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
69028+ }
69029+#endif
69030+
69031 pgd = pgd_offset(mm, address);
69032 pud = pud_alloc(mm, pgd, address);
69033 if (!pud)
69034@@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm
69035 * run pte_offset_map on the pmd, if an huge pmd could
69036 * materialize from under us from a different thread.
69037 */
69038- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
69039+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
69040 return VM_FAULT_OOM;
69041 /* if an huge pmd materialized from under us just retry later */
69042 if (unlikely(pmd_trans_huge(*pmd)))
69043@@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void)
69044 gate_vma.vm_start = FIXADDR_USER_START;
69045 gate_vma.vm_end = FIXADDR_USER_END;
69046 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
69047- gate_vma.vm_page_prot = __P101;
69048+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
69049 /*
69050 * Make sure the vDSO gets into every core dump.
69051 * Dumping its contents makes post-mortem fully interpretable later
69052diff -urNp linux-3.1.4/mm/memory-failure.c linux-3.1.4/mm/memory-failure.c
69053--- linux-3.1.4/mm/memory-failure.c 2011-11-11 15:19:27.000000000 -0500
69054+++ linux-3.1.4/mm/memory-failure.c 2011-11-16 18:39:08.000000000 -0500
69055@@ -60,7 +60,7 @@ int sysctl_memory_failure_early_kill __r
69056
69057 int sysctl_memory_failure_recovery __read_mostly = 1;
69058
69059-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
69060+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
69061
69062 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
69063
69064@@ -201,7 +201,7 @@ static int kill_proc_ao(struct task_stru
69065 si.si_signo = SIGBUS;
69066 si.si_errno = 0;
69067 si.si_code = BUS_MCEERR_AO;
69068- si.si_addr = (void *)addr;
69069+ si.si_addr = (void __user *)addr;
69070 #ifdef __ARCH_SI_TRAPNO
69071 si.si_trapno = trapno;
69072 #endif
69073@@ -1009,7 +1009,7 @@ int __memory_failure(unsigned long pfn,
69074 }
69075
69076 nr_pages = 1 << compound_trans_order(hpage);
69077- atomic_long_add(nr_pages, &mce_bad_pages);
69078+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
69079
69080 /*
69081 * We need/can do nothing about count=0 pages.
69082@@ -1039,7 +1039,7 @@ int __memory_failure(unsigned long pfn,
69083 if (!PageHWPoison(hpage)
69084 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
69085 || (p != hpage && TestSetPageHWPoison(hpage))) {
69086- atomic_long_sub(nr_pages, &mce_bad_pages);
69087+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69088 return 0;
69089 }
69090 set_page_hwpoison_huge_page(hpage);
69091@@ -1097,7 +1097,7 @@ int __memory_failure(unsigned long pfn,
69092 }
69093 if (hwpoison_filter(p)) {
69094 if (TestClearPageHWPoison(p))
69095- atomic_long_sub(nr_pages, &mce_bad_pages);
69096+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69097 unlock_page(hpage);
69098 put_page(hpage);
69099 return 0;
69100@@ -1314,7 +1314,7 @@ int unpoison_memory(unsigned long pfn)
69101 return 0;
69102 }
69103 if (TestClearPageHWPoison(p))
69104- atomic_long_sub(nr_pages, &mce_bad_pages);
69105+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69106 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
69107 return 0;
69108 }
69109@@ -1328,7 +1328,7 @@ int unpoison_memory(unsigned long pfn)
69110 */
69111 if (TestClearPageHWPoison(page)) {
69112 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
69113- atomic_long_sub(nr_pages, &mce_bad_pages);
69114+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69115 freeit = 1;
69116 if (PageHuge(page))
69117 clear_page_hwpoison_huge_page(page);
69118@@ -1441,7 +1441,7 @@ static int soft_offline_huge_page(struct
69119 }
69120 done:
69121 if (!PageHWPoison(hpage))
69122- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
69123+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
69124 set_page_hwpoison_huge_page(hpage);
69125 dequeue_hwpoisoned_huge_page(hpage);
69126 /* keep elevated page count for bad page */
69127@@ -1572,7 +1572,7 @@ int soft_offline_page(struct page *page,
69128 return ret;
69129
69130 done:
69131- atomic_long_add(1, &mce_bad_pages);
69132+ atomic_long_add_unchecked(1, &mce_bad_pages);
69133 SetPageHWPoison(page);
69134 /* keep elevated page count for bad page */
69135 return ret;
69136diff -urNp linux-3.1.4/mm/mempolicy.c linux-3.1.4/mm/mempolicy.c
69137--- linux-3.1.4/mm/mempolicy.c 2011-11-11 15:19:27.000000000 -0500
69138+++ linux-3.1.4/mm/mempolicy.c 2011-11-16 18:40:44.000000000 -0500
69139@@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct
69140 unsigned long vmstart;
69141 unsigned long vmend;
69142
69143+#ifdef CONFIG_PAX_SEGMEXEC
69144+ struct vm_area_struct *vma_m;
69145+#endif
69146+
69147 vma = find_vma_prev(mm, start, &prev);
69148 if (!vma || vma->vm_start > start)
69149 return -EFAULT;
69150@@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct
69151 err = policy_vma(vma, new_pol);
69152 if (err)
69153 goto out;
69154+
69155+#ifdef CONFIG_PAX_SEGMEXEC
69156+ vma_m = pax_find_mirror_vma(vma);
69157+ if (vma_m) {
69158+ err = policy_vma(vma_m, new_pol);
69159+ if (err)
69160+ goto out;
69161+ }
69162+#endif
69163+
69164 }
69165
69166 out:
69167@@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start
69168
69169 if (end < start)
69170 return -EINVAL;
69171+
69172+#ifdef CONFIG_PAX_SEGMEXEC
69173+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69174+ if (end > SEGMEXEC_TASK_SIZE)
69175+ return -EINVAL;
69176+ } else
69177+#endif
69178+
69179+ if (end > TASK_SIZE)
69180+ return -EINVAL;
69181+
69182 if (end == start)
69183 return 0;
69184
69185@@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
69186 if (!mm)
69187 goto out;
69188
69189+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69190+ if (mm != current->mm &&
69191+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
69192+ err = -EPERM;
69193+ goto out;
69194+ }
69195+#endif
69196+
69197 /*
69198 * Check if this process has the right to modify the specified
69199 * process. The right exists if the process has administrative
69200@@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
69201 rcu_read_lock();
69202 tcred = __task_cred(task);
69203 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
69204- cred->uid != tcred->suid && cred->uid != tcred->uid &&
69205- !capable(CAP_SYS_NICE)) {
69206+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
69207 rcu_read_unlock();
69208 err = -EPERM;
69209 goto out;
69210diff -urNp linux-3.1.4/mm/migrate.c linux-3.1.4/mm/migrate.c
69211--- linux-3.1.4/mm/migrate.c 2011-11-11 15:19:27.000000000 -0500
69212+++ linux-3.1.4/mm/migrate.c 2011-11-16 18:40:44.000000000 -0500
69213@@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struc
69214 unsigned long chunk_start;
69215 int err;
69216
69217+ pax_track_stack();
69218+
69219 task_nodes = cpuset_mems_allowed(task);
69220
69221 err = -ENOMEM;
69222@@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
69223 if (!mm)
69224 return -EINVAL;
69225
69226+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69227+ if (mm != current->mm &&
69228+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
69229+ err = -EPERM;
69230+ goto out;
69231+ }
69232+#endif
69233+
69234 /*
69235 * Check if this process has the right to modify the specified
69236 * process. The right exists if the process has administrative
69237@@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
69238 rcu_read_lock();
69239 tcred = __task_cred(task);
69240 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
69241- cred->uid != tcred->suid && cred->uid != tcred->uid &&
69242- !capable(CAP_SYS_NICE)) {
69243+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
69244 rcu_read_unlock();
69245 err = -EPERM;
69246 goto out;
69247diff -urNp linux-3.1.4/mm/mlock.c linux-3.1.4/mm/mlock.c
69248--- linux-3.1.4/mm/mlock.c 2011-11-11 15:19:27.000000000 -0500
69249+++ linux-3.1.4/mm/mlock.c 2011-11-16 18:40:44.000000000 -0500
69250@@ -13,6 +13,7 @@
69251 #include <linux/pagemap.h>
69252 #include <linux/mempolicy.h>
69253 #include <linux/syscalls.h>
69254+#include <linux/security.h>
69255 #include <linux/sched.h>
69256 #include <linux/module.h>
69257 #include <linux/rmap.h>
69258@@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
69259 return -EINVAL;
69260 if (end == start)
69261 return 0;
69262+ if (end > TASK_SIZE)
69263+ return -EINVAL;
69264+
69265 vma = find_vma_prev(current->mm, start, &prev);
69266 if (!vma || vma->vm_start > start)
69267 return -ENOMEM;
69268@@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
69269 for (nstart = start ; ; ) {
69270 vm_flags_t newflags;
69271
69272+#ifdef CONFIG_PAX_SEGMEXEC
69273+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
69274+ break;
69275+#endif
69276+
69277 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
69278
69279 newflags = vma->vm_flags | VM_LOCKED;
69280@@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
69281 lock_limit >>= PAGE_SHIFT;
69282
69283 /* check against resource limits */
69284+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
69285 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
69286 error = do_mlock(start, len, 1);
69287 up_write(&current->mm->mmap_sem);
69288@@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
69289 static int do_mlockall(int flags)
69290 {
69291 struct vm_area_struct * vma, * prev = NULL;
69292- unsigned int def_flags = 0;
69293
69294 if (flags & MCL_FUTURE)
69295- def_flags = VM_LOCKED;
69296- current->mm->def_flags = def_flags;
69297+ current->mm->def_flags |= VM_LOCKED;
69298+ else
69299+ current->mm->def_flags &= ~VM_LOCKED;
69300 if (flags == MCL_FUTURE)
69301 goto out;
69302
69303 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
69304 vm_flags_t newflags;
69305
69306+#ifdef CONFIG_PAX_SEGMEXEC
69307+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
69308+ break;
69309+#endif
69310+
69311+ BUG_ON(vma->vm_end > TASK_SIZE);
69312 newflags = vma->vm_flags | VM_LOCKED;
69313 if (!(flags & MCL_CURRENT))
69314 newflags &= ~VM_LOCKED;
69315@@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
69316 lock_limit >>= PAGE_SHIFT;
69317
69318 ret = -ENOMEM;
69319+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
69320 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
69321 capable(CAP_IPC_LOCK))
69322 ret = do_mlockall(flags);
69323diff -urNp linux-3.1.4/mm/mmap.c linux-3.1.4/mm/mmap.c
69324--- linux-3.1.4/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
69325+++ linux-3.1.4/mm/mmap.c 2011-11-16 18:40:44.000000000 -0500
69326@@ -46,6 +46,16 @@
69327 #define arch_rebalance_pgtables(addr, len) (addr)
69328 #endif
69329
69330+static inline void verify_mm_writelocked(struct mm_struct *mm)
69331+{
69332+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
69333+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
69334+ up_read(&mm->mmap_sem);
69335+ BUG();
69336+ }
69337+#endif
69338+}
69339+
69340 static void unmap_region(struct mm_struct *mm,
69341 struct vm_area_struct *vma, struct vm_area_struct *prev,
69342 unsigned long start, unsigned long end);
69343@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
69344 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
69345 *
69346 */
69347-pgprot_t protection_map[16] = {
69348+pgprot_t protection_map[16] __read_only = {
69349 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
69350 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
69351 };
69352
69353-pgprot_t vm_get_page_prot(unsigned long vm_flags)
69354+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
69355 {
69356- return __pgprot(pgprot_val(protection_map[vm_flags &
69357+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
69358 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
69359 pgprot_val(arch_vm_get_page_prot(vm_flags)));
69360+
69361+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
69362+ if (!(__supported_pte_mask & _PAGE_NX) &&
69363+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
69364+ (vm_flags & (VM_READ | VM_WRITE)))
69365+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
69366+#endif
69367+
69368+ return prot;
69369 }
69370 EXPORT_SYMBOL(vm_get_page_prot);
69371
69372 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
69373 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
69374 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
69375+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
69376 /*
69377 * Make sure vm_committed_as in one cacheline and not cacheline shared with
69378 * other variables. It can be updated by several CPUs frequently.
69379@@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma
69380 struct vm_area_struct *next = vma->vm_next;
69381
69382 might_sleep();
69383+ BUG_ON(vma->vm_mirror);
69384 if (vma->vm_ops && vma->vm_ops->close)
69385 vma->vm_ops->close(vma);
69386 if (vma->vm_file) {
69387@@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
69388 * not page aligned -Ram Gupta
69389 */
69390 rlim = rlimit(RLIMIT_DATA);
69391+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
69392 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
69393 (mm->end_data - mm->start_data) > rlim)
69394 goto out;
69395@@ -689,6 +711,12 @@ static int
69396 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
69397 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
69398 {
69399+
69400+#ifdef CONFIG_PAX_SEGMEXEC
69401+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
69402+ return 0;
69403+#endif
69404+
69405 if (is_mergeable_vma(vma, file, vm_flags) &&
69406 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
69407 if (vma->vm_pgoff == vm_pgoff)
69408@@ -708,6 +736,12 @@ static int
69409 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
69410 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
69411 {
69412+
69413+#ifdef CONFIG_PAX_SEGMEXEC
69414+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
69415+ return 0;
69416+#endif
69417+
69418 if (is_mergeable_vma(vma, file, vm_flags) &&
69419 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
69420 pgoff_t vm_pglen;
69421@@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struc
69422 struct vm_area_struct *vma_merge(struct mm_struct *mm,
69423 struct vm_area_struct *prev, unsigned long addr,
69424 unsigned long end, unsigned long vm_flags,
69425- struct anon_vma *anon_vma, struct file *file,
69426+ struct anon_vma *anon_vma, struct file *file,
69427 pgoff_t pgoff, struct mempolicy *policy)
69428 {
69429 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
69430 struct vm_area_struct *area, *next;
69431 int err;
69432
69433+#ifdef CONFIG_PAX_SEGMEXEC
69434+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
69435+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
69436+
69437+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
69438+#endif
69439+
69440 /*
69441 * We later require that vma->vm_flags == vm_flags,
69442 * so this tests vma->vm_flags & VM_SPECIAL, too.
69443@@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct
69444 if (next && next->vm_end == end) /* cases 6, 7, 8 */
69445 next = next->vm_next;
69446
69447+#ifdef CONFIG_PAX_SEGMEXEC
69448+ if (prev)
69449+ prev_m = pax_find_mirror_vma(prev);
69450+ if (area)
69451+ area_m = pax_find_mirror_vma(area);
69452+ if (next)
69453+ next_m = pax_find_mirror_vma(next);
69454+#endif
69455+
69456 /*
69457 * Can it merge with the predecessor?
69458 */
69459@@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct
69460 /* cases 1, 6 */
69461 err = vma_adjust(prev, prev->vm_start,
69462 next->vm_end, prev->vm_pgoff, NULL);
69463- } else /* cases 2, 5, 7 */
69464+
69465+#ifdef CONFIG_PAX_SEGMEXEC
69466+ if (!err && prev_m)
69467+ err = vma_adjust(prev_m, prev_m->vm_start,
69468+ next_m->vm_end, prev_m->vm_pgoff, NULL);
69469+#endif
69470+
69471+ } else { /* cases 2, 5, 7 */
69472 err = vma_adjust(prev, prev->vm_start,
69473 end, prev->vm_pgoff, NULL);
69474+
69475+#ifdef CONFIG_PAX_SEGMEXEC
69476+ if (!err && prev_m)
69477+ err = vma_adjust(prev_m, prev_m->vm_start,
69478+ end_m, prev_m->vm_pgoff, NULL);
69479+#endif
69480+
69481+ }
69482 if (err)
69483 return NULL;
69484 khugepaged_enter_vma_merge(prev);
69485@@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct
69486 mpol_equal(policy, vma_policy(next)) &&
69487 can_vma_merge_before(next, vm_flags,
69488 anon_vma, file, pgoff+pglen)) {
69489- if (prev && addr < prev->vm_end) /* case 4 */
69490+ if (prev && addr < prev->vm_end) { /* case 4 */
69491 err = vma_adjust(prev, prev->vm_start,
69492 addr, prev->vm_pgoff, NULL);
69493- else /* cases 3, 8 */
69494+
69495+#ifdef CONFIG_PAX_SEGMEXEC
69496+ if (!err && prev_m)
69497+ err = vma_adjust(prev_m, prev_m->vm_start,
69498+ addr_m, prev_m->vm_pgoff, NULL);
69499+#endif
69500+
69501+ } else { /* cases 3, 8 */
69502 err = vma_adjust(area, addr, next->vm_end,
69503 next->vm_pgoff - pglen, NULL);
69504+
69505+#ifdef CONFIG_PAX_SEGMEXEC
69506+ if (!err && area_m)
69507+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
69508+ next_m->vm_pgoff - pglen, NULL);
69509+#endif
69510+
69511+ }
69512 if (err)
69513 return NULL;
69514 khugepaged_enter_vma_merge(area);
69515@@ -921,14 +1001,11 @@ none:
69516 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
69517 struct file *file, long pages)
69518 {
69519- const unsigned long stack_flags
69520- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
69521-
69522 if (file) {
69523 mm->shared_vm += pages;
69524 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
69525 mm->exec_vm += pages;
69526- } else if (flags & stack_flags)
69527+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
69528 mm->stack_vm += pages;
69529 if (flags & (VM_RESERVED|VM_IO))
69530 mm->reserved_vm += pages;
69531@@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file
69532 * (the exception is when the underlying filesystem is noexec
69533 * mounted, in which case we dont add PROT_EXEC.)
69534 */
69535- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69536+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69537 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
69538 prot |= PROT_EXEC;
69539
69540@@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file
69541 /* Obtain the address to map to. we verify (or select) it and ensure
69542 * that it represents a valid section of the address space.
69543 */
69544- addr = get_unmapped_area(file, addr, len, pgoff, flags);
69545+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
69546 if (addr & ~PAGE_MASK)
69547 return addr;
69548
69549@@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file
69550 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
69551 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
69552
69553+#ifdef CONFIG_PAX_MPROTECT
69554+ if (mm->pax_flags & MF_PAX_MPROTECT) {
69555+#ifndef CONFIG_PAX_MPROTECT_COMPAT
69556+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
69557+ gr_log_rwxmmap(file);
69558+
69559+#ifdef CONFIG_PAX_EMUPLT
69560+ vm_flags &= ~VM_EXEC;
69561+#else
69562+ return -EPERM;
69563+#endif
69564+
69565+ }
69566+
69567+ if (!(vm_flags & VM_EXEC))
69568+ vm_flags &= ~VM_MAYEXEC;
69569+#else
69570+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69571+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69572+#endif
69573+ else
69574+ vm_flags &= ~VM_MAYWRITE;
69575+ }
69576+#endif
69577+
69578+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
69579+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
69580+ vm_flags &= ~VM_PAGEEXEC;
69581+#endif
69582+
69583 if (flags & MAP_LOCKED)
69584 if (!can_do_mlock())
69585 return -EPERM;
69586@@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file
69587 locked += mm->locked_vm;
69588 lock_limit = rlimit(RLIMIT_MEMLOCK);
69589 lock_limit >>= PAGE_SHIFT;
69590+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
69591 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
69592 return -EAGAIN;
69593 }
69594@@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file
69595 if (error)
69596 return error;
69597
69598+ if (!gr_acl_handle_mmap(file, prot))
69599+ return -EACCES;
69600+
69601 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
69602 }
69603 EXPORT_SYMBOL(do_mmap_pgoff);
69604@@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area
69605 vm_flags_t vm_flags = vma->vm_flags;
69606
69607 /* If it was private or non-writable, the write bit is already clear */
69608- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
69609+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
69610 return 0;
69611
69612 /* The backer wishes to know when pages are first written to? */
69613@@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *f
69614 unsigned long charged = 0;
69615 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
69616
69617+#ifdef CONFIG_PAX_SEGMEXEC
69618+ struct vm_area_struct *vma_m = NULL;
69619+#endif
69620+
69621+ /*
69622+ * mm->mmap_sem is required to protect against another thread
69623+ * changing the mappings in case we sleep.
69624+ */
69625+ verify_mm_writelocked(mm);
69626+
69627 /* Clear old maps */
69628 error = -ENOMEM;
69629-munmap_back:
69630 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69631 if (vma && vma->vm_start < addr + len) {
69632 if (do_munmap(mm, addr, len))
69633 return -ENOMEM;
69634- goto munmap_back;
69635+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69636+ BUG_ON(vma && vma->vm_start < addr + len);
69637 }
69638
69639 /* Check against address space limit. */
69640@@ -1258,6 +1379,16 @@ munmap_back:
69641 goto unacct_error;
69642 }
69643
69644+#ifdef CONFIG_PAX_SEGMEXEC
69645+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
69646+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69647+ if (!vma_m) {
69648+ error = -ENOMEM;
69649+ goto free_vma;
69650+ }
69651+ }
69652+#endif
69653+
69654 vma->vm_mm = mm;
69655 vma->vm_start = addr;
69656 vma->vm_end = addr + len;
69657@@ -1281,6 +1412,19 @@ munmap_back:
69658 error = file->f_op->mmap(file, vma);
69659 if (error)
69660 goto unmap_and_free_vma;
69661+
69662+#ifdef CONFIG_PAX_SEGMEXEC
69663+ if (vma_m && (vm_flags & VM_EXECUTABLE))
69664+ added_exe_file_vma(mm);
69665+#endif
69666+
69667+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
69668+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
69669+ vma->vm_flags |= VM_PAGEEXEC;
69670+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69671+ }
69672+#endif
69673+
69674 if (vm_flags & VM_EXECUTABLE)
69675 added_exe_file_vma(mm);
69676
69677@@ -1316,6 +1460,11 @@ munmap_back:
69678 vma_link(mm, vma, prev, rb_link, rb_parent);
69679 file = vma->vm_file;
69680
69681+#ifdef CONFIG_PAX_SEGMEXEC
69682+ if (vma_m)
69683+ BUG_ON(pax_mirror_vma(vma_m, vma));
69684+#endif
69685+
69686 /* Once vma denies write, undo our temporary denial count */
69687 if (correct_wcount)
69688 atomic_inc(&inode->i_writecount);
69689@@ -1324,6 +1473,7 @@ out:
69690
69691 mm->total_vm += len >> PAGE_SHIFT;
69692 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
69693+ track_exec_limit(mm, addr, addr + len, vm_flags);
69694 if (vm_flags & VM_LOCKED) {
69695 if (!mlock_vma_pages_range(vma, addr, addr + len))
69696 mm->locked_vm += (len >> PAGE_SHIFT);
69697@@ -1341,6 +1491,12 @@ unmap_and_free_vma:
69698 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
69699 charged = 0;
69700 free_vma:
69701+
69702+#ifdef CONFIG_PAX_SEGMEXEC
69703+ if (vma_m)
69704+ kmem_cache_free(vm_area_cachep, vma_m);
69705+#endif
69706+
69707 kmem_cache_free(vm_area_cachep, vma);
69708 unacct_error:
69709 if (charged)
69710@@ -1348,6 +1504,44 @@ unacct_error:
69711 return error;
69712 }
69713
69714+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
69715+{
69716+ if (!vma) {
69717+#ifdef CONFIG_STACK_GROWSUP
69718+ if (addr > sysctl_heap_stack_gap)
69719+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
69720+ else
69721+ vma = find_vma(current->mm, 0);
69722+ if (vma && (vma->vm_flags & VM_GROWSUP))
69723+ return false;
69724+#endif
69725+ return true;
69726+ }
69727+
69728+ if (addr + len > vma->vm_start)
69729+ return false;
69730+
69731+ if (vma->vm_flags & VM_GROWSDOWN)
69732+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
69733+#ifdef CONFIG_STACK_GROWSUP
69734+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
69735+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
69736+#endif
69737+
69738+ return true;
69739+}
69740+
69741+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
69742+{
69743+ if (vma->vm_start < len)
69744+ return -ENOMEM;
69745+ if (!(vma->vm_flags & VM_GROWSDOWN))
69746+ return vma->vm_start - len;
69747+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
69748+ return vma->vm_start - len - sysctl_heap_stack_gap;
69749+ return -ENOMEM;
69750+}
69751+
69752 /* Get an address range which is currently unmapped.
69753 * For shmat() with addr=0.
69754 *
69755@@ -1374,18 +1568,23 @@ arch_get_unmapped_area(struct file *filp
69756 if (flags & MAP_FIXED)
69757 return addr;
69758
69759+#ifdef CONFIG_PAX_RANDMMAP
69760+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69761+#endif
69762+
69763 if (addr) {
69764 addr = PAGE_ALIGN(addr);
69765- vma = find_vma(mm, addr);
69766- if (TASK_SIZE - len >= addr &&
69767- (!vma || addr + len <= vma->vm_start))
69768- return addr;
69769+ if (TASK_SIZE - len >= addr) {
69770+ vma = find_vma(mm, addr);
69771+ if (check_heap_stack_gap(vma, addr, len))
69772+ return addr;
69773+ }
69774 }
69775 if (len > mm->cached_hole_size) {
69776- start_addr = addr = mm->free_area_cache;
69777+ start_addr = addr = mm->free_area_cache;
69778 } else {
69779- start_addr = addr = TASK_UNMAPPED_BASE;
69780- mm->cached_hole_size = 0;
69781+ start_addr = addr = mm->mmap_base;
69782+ mm->cached_hole_size = 0;
69783 }
69784
69785 full_search:
69786@@ -1396,34 +1595,40 @@ full_search:
69787 * Start a new search - just in case we missed
69788 * some holes.
69789 */
69790- if (start_addr != TASK_UNMAPPED_BASE) {
69791- addr = TASK_UNMAPPED_BASE;
69792- start_addr = addr;
69793+ if (start_addr != mm->mmap_base) {
69794+ start_addr = addr = mm->mmap_base;
69795 mm->cached_hole_size = 0;
69796 goto full_search;
69797 }
69798 return -ENOMEM;
69799 }
69800- if (!vma || addr + len <= vma->vm_start) {
69801- /*
69802- * Remember the place where we stopped the search:
69803- */
69804- mm->free_area_cache = addr + len;
69805- return addr;
69806- }
69807+ if (check_heap_stack_gap(vma, addr, len))
69808+ break;
69809 if (addr + mm->cached_hole_size < vma->vm_start)
69810 mm->cached_hole_size = vma->vm_start - addr;
69811 addr = vma->vm_end;
69812 }
69813+
69814+ /*
69815+ * Remember the place where we stopped the search:
69816+ */
69817+ mm->free_area_cache = addr + len;
69818+ return addr;
69819 }
69820 #endif
69821
69822 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
69823 {
69824+
69825+#ifdef CONFIG_PAX_SEGMEXEC
69826+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69827+ return;
69828+#endif
69829+
69830 /*
69831 * Is this a new hole at the lowest possible address?
69832 */
69833- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
69834+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
69835 mm->free_area_cache = addr;
69836 mm->cached_hole_size = ~0UL;
69837 }
69838@@ -1441,7 +1646,7 @@ arch_get_unmapped_area_topdown(struct fi
69839 {
69840 struct vm_area_struct *vma;
69841 struct mm_struct *mm = current->mm;
69842- unsigned long addr = addr0;
69843+ unsigned long base = mm->mmap_base, addr = addr0;
69844
69845 /* requested length too big for entire address space */
69846 if (len > TASK_SIZE)
69847@@ -1450,13 +1655,18 @@ arch_get_unmapped_area_topdown(struct fi
69848 if (flags & MAP_FIXED)
69849 return addr;
69850
69851+#ifdef CONFIG_PAX_RANDMMAP
69852+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69853+#endif
69854+
69855 /* requesting a specific address */
69856 if (addr) {
69857 addr = PAGE_ALIGN(addr);
69858- vma = find_vma(mm, addr);
69859- if (TASK_SIZE - len >= addr &&
69860- (!vma || addr + len <= vma->vm_start))
69861- return addr;
69862+ if (TASK_SIZE - len >= addr) {
69863+ vma = find_vma(mm, addr);
69864+ if (check_heap_stack_gap(vma, addr, len))
69865+ return addr;
69866+ }
69867 }
69868
69869 /* check if free_area_cache is useful for us */
69870@@ -1471,7 +1681,7 @@ arch_get_unmapped_area_topdown(struct fi
69871 /* make sure it can fit in the remaining address space */
69872 if (addr > len) {
69873 vma = find_vma(mm, addr-len);
69874- if (!vma || addr <= vma->vm_start)
69875+ if (check_heap_stack_gap(vma, addr - len, len))
69876 /* remember the address as a hint for next time */
69877 return (mm->free_area_cache = addr-len);
69878 }
69879@@ -1488,7 +1698,7 @@ arch_get_unmapped_area_topdown(struct fi
69880 * return with success:
69881 */
69882 vma = find_vma(mm, addr);
69883- if (!vma || addr+len <= vma->vm_start)
69884+ if (check_heap_stack_gap(vma, addr, len))
69885 /* remember the address as a hint for next time */
69886 return (mm->free_area_cache = addr);
69887
69888@@ -1497,8 +1707,8 @@ arch_get_unmapped_area_topdown(struct fi
69889 mm->cached_hole_size = vma->vm_start - addr;
69890
69891 /* try just below the current vma->vm_start */
69892- addr = vma->vm_start-len;
69893- } while (len < vma->vm_start);
69894+ addr = skip_heap_stack_gap(vma, len);
69895+ } while (!IS_ERR_VALUE(addr));
69896
69897 bottomup:
69898 /*
69899@@ -1507,13 +1717,21 @@ bottomup:
69900 * can happen with large stack limits and large mmap()
69901 * allocations.
69902 */
69903+ mm->mmap_base = TASK_UNMAPPED_BASE;
69904+
69905+#ifdef CONFIG_PAX_RANDMMAP
69906+ if (mm->pax_flags & MF_PAX_RANDMMAP)
69907+ mm->mmap_base += mm->delta_mmap;
69908+#endif
69909+
69910+ mm->free_area_cache = mm->mmap_base;
69911 mm->cached_hole_size = ~0UL;
69912- mm->free_area_cache = TASK_UNMAPPED_BASE;
69913 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
69914 /*
69915 * Restore the topdown base:
69916 */
69917- mm->free_area_cache = mm->mmap_base;
69918+ mm->mmap_base = base;
69919+ mm->free_area_cache = base;
69920 mm->cached_hole_size = ~0UL;
69921
69922 return addr;
69923@@ -1522,6 +1740,12 @@ bottomup:
69924
69925 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69926 {
69927+
69928+#ifdef CONFIG_PAX_SEGMEXEC
69929+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69930+ return;
69931+#endif
69932+
69933 /*
69934 * Is this a new hole at the highest possible address?
69935 */
69936@@ -1529,8 +1753,10 @@ void arch_unmap_area_topdown(struct mm_s
69937 mm->free_area_cache = addr;
69938
69939 /* dont allow allocations above current base */
69940- if (mm->free_area_cache > mm->mmap_base)
69941+ if (mm->free_area_cache > mm->mmap_base) {
69942 mm->free_area_cache = mm->mmap_base;
69943+ mm->cached_hole_size = ~0UL;
69944+ }
69945 }
69946
69947 unsigned long
69948@@ -1638,6 +1864,28 @@ out:
69949 return prev ? prev->vm_next : vma;
69950 }
69951
69952+#ifdef CONFIG_PAX_SEGMEXEC
69953+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
69954+{
69955+ struct vm_area_struct *vma_m;
69956+
69957+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
69958+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
69959+ BUG_ON(vma->vm_mirror);
69960+ return NULL;
69961+ }
69962+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
69963+ vma_m = vma->vm_mirror;
69964+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
69965+ BUG_ON(vma->vm_file != vma_m->vm_file);
69966+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
69967+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
69968+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
69969+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
69970+ return vma_m;
69971+}
69972+#endif
69973+
69974 /*
69975 * Verify that the stack growth is acceptable and
69976 * update accounting. This is shared with both the
69977@@ -1654,6 +1902,7 @@ static int acct_stack_growth(struct vm_a
69978 return -ENOMEM;
69979
69980 /* Stack limit test */
69981+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
69982 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
69983 return -ENOMEM;
69984
69985@@ -1664,6 +1913,7 @@ static int acct_stack_growth(struct vm_a
69986 locked = mm->locked_vm + grow;
69987 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
69988 limit >>= PAGE_SHIFT;
69989+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
69990 if (locked > limit && !capable(CAP_IPC_LOCK))
69991 return -ENOMEM;
69992 }
69993@@ -1694,37 +1944,48 @@ static int acct_stack_growth(struct vm_a
69994 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
69995 * vma is the last one with address > vma->vm_end. Have to extend vma.
69996 */
69997+#ifndef CONFIG_IA64
69998+static
69999+#endif
70000 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
70001 {
70002 int error;
70003+ bool locknext;
70004
70005 if (!(vma->vm_flags & VM_GROWSUP))
70006 return -EFAULT;
70007
70008+ /* Also guard against wrapping around to address 0. */
70009+ if (address < PAGE_ALIGN(address+1))
70010+ address = PAGE_ALIGN(address+1);
70011+ else
70012+ return -ENOMEM;
70013+
70014 /*
70015 * We must make sure the anon_vma is allocated
70016 * so that the anon_vma locking is not a noop.
70017 */
70018 if (unlikely(anon_vma_prepare(vma)))
70019 return -ENOMEM;
70020+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
70021+ if (locknext && anon_vma_prepare(vma->vm_next))
70022+ return -ENOMEM;
70023 vma_lock_anon_vma(vma);
70024+ if (locknext)
70025+ vma_lock_anon_vma(vma->vm_next);
70026
70027 /*
70028 * vma->vm_start/vm_end cannot change under us because the caller
70029 * is required to hold the mmap_sem in read mode. We need the
70030- * anon_vma lock to serialize against concurrent expand_stacks.
70031- * Also guard against wrapping around to address 0.
70032+ * anon_vma locks to serialize against concurrent expand_stacks
70033+ * and expand_upwards.
70034 */
70035- if (address < PAGE_ALIGN(address+4))
70036- address = PAGE_ALIGN(address+4);
70037- else {
70038- vma_unlock_anon_vma(vma);
70039- return -ENOMEM;
70040- }
70041 error = 0;
70042
70043 /* Somebody else might have raced and expanded it already */
70044- if (address > vma->vm_end) {
70045+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
70046+ error = -ENOMEM;
70047+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
70048 unsigned long size, grow;
70049
70050 size = address - vma->vm_start;
70051@@ -1739,6 +2000,8 @@ int expand_upwards(struct vm_area_struct
70052 }
70053 }
70054 }
70055+ if (locknext)
70056+ vma_unlock_anon_vma(vma->vm_next);
70057 vma_unlock_anon_vma(vma);
70058 khugepaged_enter_vma_merge(vma);
70059 return error;
70060@@ -1752,6 +2015,8 @@ int expand_downwards(struct vm_area_stru
70061 unsigned long address)
70062 {
70063 int error;
70064+ bool lockprev = false;
70065+ struct vm_area_struct *prev;
70066
70067 /*
70068 * We must make sure the anon_vma is allocated
70069@@ -1765,6 +2030,15 @@ int expand_downwards(struct vm_area_stru
70070 if (error)
70071 return error;
70072
70073+ prev = vma->vm_prev;
70074+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
70075+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
70076+#endif
70077+ if (lockprev && anon_vma_prepare(prev))
70078+ return -ENOMEM;
70079+ if (lockprev)
70080+ vma_lock_anon_vma(prev);
70081+
70082 vma_lock_anon_vma(vma);
70083
70084 /*
70085@@ -1774,9 +2048,17 @@ int expand_downwards(struct vm_area_stru
70086 */
70087
70088 /* Somebody else might have raced and expanded it already */
70089- if (address < vma->vm_start) {
70090+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
70091+ error = -ENOMEM;
70092+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
70093 unsigned long size, grow;
70094
70095+#ifdef CONFIG_PAX_SEGMEXEC
70096+ struct vm_area_struct *vma_m;
70097+
70098+ vma_m = pax_find_mirror_vma(vma);
70099+#endif
70100+
70101 size = vma->vm_end - address;
70102 grow = (vma->vm_start - address) >> PAGE_SHIFT;
70103
70104@@ -1786,11 +2068,22 @@ int expand_downwards(struct vm_area_stru
70105 if (!error) {
70106 vma->vm_start = address;
70107 vma->vm_pgoff -= grow;
70108+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
70109+
70110+#ifdef CONFIG_PAX_SEGMEXEC
70111+ if (vma_m) {
70112+ vma_m->vm_start -= grow << PAGE_SHIFT;
70113+ vma_m->vm_pgoff -= grow;
70114+ }
70115+#endif
70116+
70117 perf_event_mmap(vma);
70118 }
70119 }
70120 }
70121 vma_unlock_anon_vma(vma);
70122+ if (lockprev)
70123+ vma_unlock_anon_vma(prev);
70124 khugepaged_enter_vma_merge(vma);
70125 return error;
70126 }
70127@@ -1860,6 +2153,13 @@ static void remove_vma_list(struct mm_st
70128 do {
70129 long nrpages = vma_pages(vma);
70130
70131+#ifdef CONFIG_PAX_SEGMEXEC
70132+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
70133+ vma = remove_vma(vma);
70134+ continue;
70135+ }
70136+#endif
70137+
70138 mm->total_vm -= nrpages;
70139 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
70140 vma = remove_vma(vma);
70141@@ -1905,6 +2205,16 @@ detach_vmas_to_be_unmapped(struct mm_str
70142 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
70143 vma->vm_prev = NULL;
70144 do {
70145+
70146+#ifdef CONFIG_PAX_SEGMEXEC
70147+ if (vma->vm_mirror) {
70148+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
70149+ vma->vm_mirror->vm_mirror = NULL;
70150+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
70151+ vma->vm_mirror = NULL;
70152+ }
70153+#endif
70154+
70155 rb_erase(&vma->vm_rb, &mm->mm_rb);
70156 mm->map_count--;
70157 tail_vma = vma;
70158@@ -1933,14 +2243,33 @@ static int __split_vma(struct mm_struct
70159 struct vm_area_struct *new;
70160 int err = -ENOMEM;
70161
70162+#ifdef CONFIG_PAX_SEGMEXEC
70163+ struct vm_area_struct *vma_m, *new_m = NULL;
70164+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
70165+#endif
70166+
70167 if (is_vm_hugetlb_page(vma) && (addr &
70168 ~(huge_page_mask(hstate_vma(vma)))))
70169 return -EINVAL;
70170
70171+#ifdef CONFIG_PAX_SEGMEXEC
70172+ vma_m = pax_find_mirror_vma(vma);
70173+#endif
70174+
70175 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
70176 if (!new)
70177 goto out_err;
70178
70179+#ifdef CONFIG_PAX_SEGMEXEC
70180+ if (vma_m) {
70181+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
70182+ if (!new_m) {
70183+ kmem_cache_free(vm_area_cachep, new);
70184+ goto out_err;
70185+ }
70186+ }
70187+#endif
70188+
70189 /* most fields are the same, copy all, and then fixup */
70190 *new = *vma;
70191
70192@@ -1953,6 +2282,22 @@ static int __split_vma(struct mm_struct
70193 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
70194 }
70195
70196+#ifdef CONFIG_PAX_SEGMEXEC
70197+ if (vma_m) {
70198+ *new_m = *vma_m;
70199+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
70200+ new_m->vm_mirror = new;
70201+ new->vm_mirror = new_m;
70202+
70203+ if (new_below)
70204+ new_m->vm_end = addr_m;
70205+ else {
70206+ new_m->vm_start = addr_m;
70207+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
70208+ }
70209+ }
70210+#endif
70211+
70212 pol = mpol_dup(vma_policy(vma));
70213 if (IS_ERR(pol)) {
70214 err = PTR_ERR(pol);
70215@@ -1978,6 +2323,42 @@ static int __split_vma(struct mm_struct
70216 else
70217 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
70218
70219+#ifdef CONFIG_PAX_SEGMEXEC
70220+ if (!err && vma_m) {
70221+ if (anon_vma_clone(new_m, vma_m))
70222+ goto out_free_mpol;
70223+
70224+ mpol_get(pol);
70225+ vma_set_policy(new_m, pol);
70226+
70227+ if (new_m->vm_file) {
70228+ get_file(new_m->vm_file);
70229+ if (vma_m->vm_flags & VM_EXECUTABLE)
70230+ added_exe_file_vma(mm);
70231+ }
70232+
70233+ if (new_m->vm_ops && new_m->vm_ops->open)
70234+ new_m->vm_ops->open(new_m);
70235+
70236+ if (new_below)
70237+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
70238+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
70239+ else
70240+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
70241+
70242+ if (err) {
70243+ if (new_m->vm_ops && new_m->vm_ops->close)
70244+ new_m->vm_ops->close(new_m);
70245+ if (new_m->vm_file) {
70246+ if (vma_m->vm_flags & VM_EXECUTABLE)
70247+ removed_exe_file_vma(mm);
70248+ fput(new_m->vm_file);
70249+ }
70250+ mpol_put(pol);
70251+ }
70252+ }
70253+#endif
70254+
70255 /* Success. */
70256 if (!err)
70257 return 0;
70258@@ -1990,10 +2371,18 @@ static int __split_vma(struct mm_struct
70259 removed_exe_file_vma(mm);
70260 fput(new->vm_file);
70261 }
70262- unlink_anon_vmas(new);
70263 out_free_mpol:
70264 mpol_put(pol);
70265 out_free_vma:
70266+
70267+#ifdef CONFIG_PAX_SEGMEXEC
70268+ if (new_m) {
70269+ unlink_anon_vmas(new_m);
70270+ kmem_cache_free(vm_area_cachep, new_m);
70271+ }
70272+#endif
70273+
70274+ unlink_anon_vmas(new);
70275 kmem_cache_free(vm_area_cachep, new);
70276 out_err:
70277 return err;
70278@@ -2006,6 +2395,15 @@ static int __split_vma(struct mm_struct
70279 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
70280 unsigned long addr, int new_below)
70281 {
70282+
70283+#ifdef CONFIG_PAX_SEGMEXEC
70284+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
70285+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
70286+ if (mm->map_count >= sysctl_max_map_count-1)
70287+ return -ENOMEM;
70288+ } else
70289+#endif
70290+
70291 if (mm->map_count >= sysctl_max_map_count)
70292 return -ENOMEM;
70293
70294@@ -2017,11 +2415,30 @@ int split_vma(struct mm_struct *mm, stru
70295 * work. This now handles partial unmappings.
70296 * Jeremy Fitzhardinge <jeremy@goop.org>
70297 */
70298+#ifdef CONFIG_PAX_SEGMEXEC
70299 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
70300 {
70301+ int ret = __do_munmap(mm, start, len);
70302+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
70303+ return ret;
70304+
70305+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
70306+}
70307+
70308+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
70309+#else
70310+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
70311+#endif
70312+{
70313 unsigned long end;
70314 struct vm_area_struct *vma, *prev, *last;
70315
70316+ /*
70317+ * mm->mmap_sem is required to protect against another thread
70318+ * changing the mappings in case we sleep.
70319+ */
70320+ verify_mm_writelocked(mm);
70321+
70322 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
70323 return -EINVAL;
70324
70325@@ -2096,6 +2513,8 @@ int do_munmap(struct mm_struct *mm, unsi
70326 /* Fix up all other VM information */
70327 remove_vma_list(mm, vma);
70328
70329+ track_exec_limit(mm, start, end, 0UL);
70330+
70331 return 0;
70332 }
70333
70334@@ -2108,22 +2527,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
70335
70336 profile_munmap(addr);
70337
70338+#ifdef CONFIG_PAX_SEGMEXEC
70339+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
70340+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
70341+ return -EINVAL;
70342+#endif
70343+
70344 down_write(&mm->mmap_sem);
70345 ret = do_munmap(mm, addr, len);
70346 up_write(&mm->mmap_sem);
70347 return ret;
70348 }
70349
70350-static inline void verify_mm_writelocked(struct mm_struct *mm)
70351-{
70352-#ifdef CONFIG_DEBUG_VM
70353- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
70354- WARN_ON(1);
70355- up_read(&mm->mmap_sem);
70356- }
70357-#endif
70358-}
70359-
70360 /*
70361 * this is really a simplified "do_mmap". it only handles
70362 * anonymous maps. eventually we may be able to do some
70363@@ -2137,6 +2552,7 @@ unsigned long do_brk(unsigned long addr,
70364 struct rb_node ** rb_link, * rb_parent;
70365 pgoff_t pgoff = addr >> PAGE_SHIFT;
70366 int error;
70367+ unsigned long charged;
70368
70369 len = PAGE_ALIGN(len);
70370 if (!len)
70371@@ -2148,16 +2564,30 @@ unsigned long do_brk(unsigned long addr,
70372
70373 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
70374
70375+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
70376+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
70377+ flags &= ~VM_EXEC;
70378+
70379+#ifdef CONFIG_PAX_MPROTECT
70380+ if (mm->pax_flags & MF_PAX_MPROTECT)
70381+ flags &= ~VM_MAYEXEC;
70382+#endif
70383+
70384+ }
70385+#endif
70386+
70387 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
70388 if (error & ~PAGE_MASK)
70389 return error;
70390
70391+ charged = len >> PAGE_SHIFT;
70392+
70393 /*
70394 * mlock MCL_FUTURE?
70395 */
70396 if (mm->def_flags & VM_LOCKED) {
70397 unsigned long locked, lock_limit;
70398- locked = len >> PAGE_SHIFT;
70399+ locked = charged;
70400 locked += mm->locked_vm;
70401 lock_limit = rlimit(RLIMIT_MEMLOCK);
70402 lock_limit >>= PAGE_SHIFT;
70403@@ -2174,22 +2604,22 @@ unsigned long do_brk(unsigned long addr,
70404 /*
70405 * Clear old maps. this also does some error checking for us
70406 */
70407- munmap_back:
70408 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70409 if (vma && vma->vm_start < addr + len) {
70410 if (do_munmap(mm, addr, len))
70411 return -ENOMEM;
70412- goto munmap_back;
70413+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70414+ BUG_ON(vma && vma->vm_start < addr + len);
70415 }
70416
70417 /* Check against address space limits *after* clearing old maps... */
70418- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
70419+ if (!may_expand_vm(mm, charged))
70420 return -ENOMEM;
70421
70422 if (mm->map_count > sysctl_max_map_count)
70423 return -ENOMEM;
70424
70425- if (security_vm_enough_memory(len >> PAGE_SHIFT))
70426+ if (security_vm_enough_memory(charged))
70427 return -ENOMEM;
70428
70429 /* Can we just expand an old private anonymous mapping? */
70430@@ -2203,7 +2633,7 @@ unsigned long do_brk(unsigned long addr,
70431 */
70432 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70433 if (!vma) {
70434- vm_unacct_memory(len >> PAGE_SHIFT);
70435+ vm_unacct_memory(charged);
70436 return -ENOMEM;
70437 }
70438
70439@@ -2217,11 +2647,12 @@ unsigned long do_brk(unsigned long addr,
70440 vma_link(mm, vma, prev, rb_link, rb_parent);
70441 out:
70442 perf_event_mmap(vma);
70443- mm->total_vm += len >> PAGE_SHIFT;
70444+ mm->total_vm += charged;
70445 if (flags & VM_LOCKED) {
70446 if (!mlock_vma_pages_range(vma, addr, addr + len))
70447- mm->locked_vm += (len >> PAGE_SHIFT);
70448+ mm->locked_vm += charged;
70449 }
70450+ track_exec_limit(mm, addr, addr + len, flags);
70451 return addr;
70452 }
70453
70454@@ -2268,8 +2699,10 @@ void exit_mmap(struct mm_struct *mm)
70455 * Walk the list again, actually closing and freeing it,
70456 * with preemption enabled, without holding any MM locks.
70457 */
70458- while (vma)
70459+ while (vma) {
70460+ vma->vm_mirror = NULL;
70461 vma = remove_vma(vma);
70462+ }
70463
70464 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
70465 }
70466@@ -2283,6 +2716,13 @@ int insert_vm_struct(struct mm_struct *
70467 struct vm_area_struct * __vma, * prev;
70468 struct rb_node ** rb_link, * rb_parent;
70469
70470+#ifdef CONFIG_PAX_SEGMEXEC
70471+ struct vm_area_struct *vma_m = NULL;
70472+#endif
70473+
70474+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
70475+ return -EPERM;
70476+
70477 /*
70478 * The vm_pgoff of a purely anonymous vma should be irrelevant
70479 * until its first write fault, when page's anon_vma and index
70480@@ -2305,7 +2745,22 @@ int insert_vm_struct(struct mm_struct *
70481 if ((vma->vm_flags & VM_ACCOUNT) &&
70482 security_vm_enough_memory_mm(mm, vma_pages(vma)))
70483 return -ENOMEM;
70484+
70485+#ifdef CONFIG_PAX_SEGMEXEC
70486+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
70487+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70488+ if (!vma_m)
70489+ return -ENOMEM;
70490+ }
70491+#endif
70492+
70493 vma_link(mm, vma, prev, rb_link, rb_parent);
70494+
70495+#ifdef CONFIG_PAX_SEGMEXEC
70496+ if (vma_m)
70497+ BUG_ON(pax_mirror_vma(vma_m, vma));
70498+#endif
70499+
70500 return 0;
70501 }
70502
70503@@ -2323,6 +2778,8 @@ struct vm_area_struct *copy_vma(struct v
70504 struct rb_node **rb_link, *rb_parent;
70505 struct mempolicy *pol;
70506
70507+ BUG_ON(vma->vm_mirror);
70508+
70509 /*
70510 * If anonymous vma has not yet been faulted, update new pgoff
70511 * to match new location, to increase its chance of merging.
70512@@ -2373,6 +2830,39 @@ struct vm_area_struct *copy_vma(struct v
70513 return NULL;
70514 }
70515
70516+#ifdef CONFIG_PAX_SEGMEXEC
70517+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
70518+{
70519+ struct vm_area_struct *prev_m;
70520+ struct rb_node **rb_link_m, *rb_parent_m;
70521+ struct mempolicy *pol_m;
70522+
70523+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
70524+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
70525+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
70526+ *vma_m = *vma;
70527+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
70528+ if (anon_vma_clone(vma_m, vma))
70529+ return -ENOMEM;
70530+ pol_m = vma_policy(vma_m);
70531+ mpol_get(pol_m);
70532+ vma_set_policy(vma_m, pol_m);
70533+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
70534+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
70535+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
70536+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
70537+ if (vma_m->vm_file)
70538+ get_file(vma_m->vm_file);
70539+ if (vma_m->vm_ops && vma_m->vm_ops->open)
70540+ vma_m->vm_ops->open(vma_m);
70541+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
70542+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
70543+ vma_m->vm_mirror = vma;
70544+ vma->vm_mirror = vma_m;
70545+ return 0;
70546+}
70547+#endif
70548+
70549 /*
70550 * Return true if the calling process may expand its vm space by the passed
70551 * number of pages
70552@@ -2383,7 +2873,7 @@ int may_expand_vm(struct mm_struct *mm,
70553 unsigned long lim;
70554
70555 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
70556-
70557+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
70558 if (cur + npages > lim)
70559 return 0;
70560 return 1;
70561@@ -2454,6 +2944,22 @@ int install_special_mapping(struct mm_st
70562 vma->vm_start = addr;
70563 vma->vm_end = addr + len;
70564
70565+#ifdef CONFIG_PAX_MPROTECT
70566+ if (mm->pax_flags & MF_PAX_MPROTECT) {
70567+#ifndef CONFIG_PAX_MPROTECT_COMPAT
70568+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
70569+ return -EPERM;
70570+ if (!(vm_flags & VM_EXEC))
70571+ vm_flags &= ~VM_MAYEXEC;
70572+#else
70573+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
70574+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
70575+#endif
70576+ else
70577+ vm_flags &= ~VM_MAYWRITE;
70578+ }
70579+#endif
70580+
70581 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
70582 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
70583
70584diff -urNp linux-3.1.4/mm/mprotect.c linux-3.1.4/mm/mprotect.c
70585--- linux-3.1.4/mm/mprotect.c 2011-11-11 15:19:27.000000000 -0500
70586+++ linux-3.1.4/mm/mprotect.c 2011-11-16 18:40:44.000000000 -0500
70587@@ -23,10 +23,16 @@
70588 #include <linux/mmu_notifier.h>
70589 #include <linux/migrate.h>
70590 #include <linux/perf_event.h>
70591+
70592+#ifdef CONFIG_PAX_MPROTECT
70593+#include <linux/elf.h>
70594+#endif
70595+
70596 #include <asm/uaccess.h>
70597 #include <asm/pgtable.h>
70598 #include <asm/cacheflush.h>
70599 #include <asm/tlbflush.h>
70600+#include <asm/mmu_context.h>
70601
70602 #ifndef pgprot_modify
70603 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
70604@@ -141,6 +147,48 @@ static void change_protection(struct vm_
70605 flush_tlb_range(vma, start, end);
70606 }
70607
70608+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70609+/* called while holding the mmap semaphor for writing except stack expansion */
70610+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
70611+{
70612+ unsigned long oldlimit, newlimit = 0UL;
70613+
70614+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
70615+ return;
70616+
70617+ spin_lock(&mm->page_table_lock);
70618+ oldlimit = mm->context.user_cs_limit;
70619+ if ((prot & VM_EXEC) && oldlimit < end)
70620+ /* USER_CS limit moved up */
70621+ newlimit = end;
70622+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
70623+ /* USER_CS limit moved down */
70624+ newlimit = start;
70625+
70626+ if (newlimit) {
70627+ mm->context.user_cs_limit = newlimit;
70628+
70629+#ifdef CONFIG_SMP
70630+ wmb();
70631+ cpus_clear(mm->context.cpu_user_cs_mask);
70632+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
70633+#endif
70634+
70635+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
70636+ }
70637+ spin_unlock(&mm->page_table_lock);
70638+ if (newlimit == end) {
70639+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
70640+
70641+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
70642+ if (is_vm_hugetlb_page(vma))
70643+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
70644+ else
70645+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
70646+ }
70647+}
70648+#endif
70649+
70650 int
70651 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70652 unsigned long start, unsigned long end, unsigned long newflags)
70653@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
70654 int error;
70655 int dirty_accountable = 0;
70656
70657+#ifdef CONFIG_PAX_SEGMEXEC
70658+ struct vm_area_struct *vma_m = NULL;
70659+ unsigned long start_m, end_m;
70660+
70661+ start_m = start + SEGMEXEC_TASK_SIZE;
70662+ end_m = end + SEGMEXEC_TASK_SIZE;
70663+#endif
70664+
70665 if (newflags == oldflags) {
70666 *pprev = vma;
70667 return 0;
70668 }
70669
70670+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
70671+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
70672+
70673+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
70674+ return -ENOMEM;
70675+
70676+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
70677+ return -ENOMEM;
70678+ }
70679+
70680 /*
70681 * If we make a private mapping writable we increase our commit;
70682 * but (without finer accounting) cannot reduce our commit if we
70683@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
70684 }
70685 }
70686
70687+#ifdef CONFIG_PAX_SEGMEXEC
70688+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
70689+ if (start != vma->vm_start) {
70690+ error = split_vma(mm, vma, start, 1);
70691+ if (error)
70692+ goto fail;
70693+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
70694+ *pprev = (*pprev)->vm_next;
70695+ }
70696+
70697+ if (end != vma->vm_end) {
70698+ error = split_vma(mm, vma, end, 0);
70699+ if (error)
70700+ goto fail;
70701+ }
70702+
70703+ if (pax_find_mirror_vma(vma)) {
70704+ error = __do_munmap(mm, start_m, end_m - start_m);
70705+ if (error)
70706+ goto fail;
70707+ } else {
70708+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70709+ if (!vma_m) {
70710+ error = -ENOMEM;
70711+ goto fail;
70712+ }
70713+ vma->vm_flags = newflags;
70714+ error = pax_mirror_vma(vma_m, vma);
70715+ if (error) {
70716+ vma->vm_flags = oldflags;
70717+ goto fail;
70718+ }
70719+ }
70720+ }
70721+#endif
70722+
70723 /*
70724 * First try to merge with previous and/or next vma.
70725 */
70726@@ -204,9 +306,21 @@ success:
70727 * vm_flags and vm_page_prot are protected by the mmap_sem
70728 * held in write mode.
70729 */
70730+
70731+#ifdef CONFIG_PAX_SEGMEXEC
70732+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
70733+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
70734+#endif
70735+
70736 vma->vm_flags = newflags;
70737+
70738+#ifdef CONFIG_PAX_MPROTECT
70739+ if (mm->binfmt && mm->binfmt->handle_mprotect)
70740+ mm->binfmt->handle_mprotect(vma, newflags);
70741+#endif
70742+
70743 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
70744- vm_get_page_prot(newflags));
70745+ vm_get_page_prot(vma->vm_flags));
70746
70747 if (vma_wants_writenotify(vma)) {
70748 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
70749@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
70750 end = start + len;
70751 if (end <= start)
70752 return -ENOMEM;
70753+
70754+#ifdef CONFIG_PAX_SEGMEXEC
70755+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70756+ if (end > SEGMEXEC_TASK_SIZE)
70757+ return -EINVAL;
70758+ } else
70759+#endif
70760+
70761+ if (end > TASK_SIZE)
70762+ return -EINVAL;
70763+
70764 if (!arch_validate_prot(prot))
70765 return -EINVAL;
70766
70767@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
70768 /*
70769 * Does the application expect PROT_READ to imply PROT_EXEC:
70770 */
70771- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
70772+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
70773 prot |= PROT_EXEC;
70774
70775 vm_flags = calc_vm_prot_bits(prot);
70776@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
70777 if (start > vma->vm_start)
70778 prev = vma;
70779
70780+#ifdef CONFIG_PAX_MPROTECT
70781+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
70782+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
70783+#endif
70784+
70785 for (nstart = start ; ; ) {
70786 unsigned long newflags;
70787
70788@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
70789
70790 /* newflags >> 4 shift VM_MAY% in place of VM_% */
70791 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
70792+ if (prot & (PROT_WRITE | PROT_EXEC))
70793+ gr_log_rwxmprotect(vma->vm_file);
70794+
70795+ error = -EACCES;
70796+ goto out;
70797+ }
70798+
70799+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
70800 error = -EACCES;
70801 goto out;
70802 }
70803@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
70804 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
70805 if (error)
70806 goto out;
70807+
70808+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
70809+
70810 nstart = tmp;
70811
70812 if (nstart < prev->vm_end)
70813diff -urNp linux-3.1.4/mm/mremap.c linux-3.1.4/mm/mremap.c
70814--- linux-3.1.4/mm/mremap.c 2011-11-11 15:19:27.000000000 -0500
70815+++ linux-3.1.4/mm/mremap.c 2011-11-16 18:39:08.000000000 -0500
70816@@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
70817 continue;
70818 pte = ptep_clear_flush(vma, old_addr, old_pte);
70819 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
70820+
70821+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70822+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
70823+ pte = pte_exprotect(pte);
70824+#endif
70825+
70826 set_pte_at(mm, new_addr, new_pte, pte);
70827 }
70828
70829@@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
70830 if (is_vm_hugetlb_page(vma))
70831 goto Einval;
70832
70833+#ifdef CONFIG_PAX_SEGMEXEC
70834+ if (pax_find_mirror_vma(vma))
70835+ goto Einval;
70836+#endif
70837+
70838 /* We can't remap across vm area boundaries */
70839 if (old_len > vma->vm_end - addr)
70840 goto Efault;
70841@@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned
70842 unsigned long ret = -EINVAL;
70843 unsigned long charged = 0;
70844 unsigned long map_flags;
70845+ unsigned long pax_task_size = TASK_SIZE;
70846
70847 if (new_addr & ~PAGE_MASK)
70848 goto out;
70849
70850- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
70851+#ifdef CONFIG_PAX_SEGMEXEC
70852+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
70853+ pax_task_size = SEGMEXEC_TASK_SIZE;
70854+#endif
70855+
70856+ pax_task_size -= PAGE_SIZE;
70857+
70858+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
70859 goto out;
70860
70861 /* Check if the location we're moving into overlaps the
70862 * old location at all, and fail if it does.
70863 */
70864- if ((new_addr <= addr) && (new_addr+new_len) > addr)
70865- goto out;
70866-
70867- if ((addr <= new_addr) && (addr+old_len) > new_addr)
70868+ if (addr + old_len > new_addr && new_addr + new_len > addr)
70869 goto out;
70870
70871 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70872@@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long ad
70873 struct vm_area_struct *vma;
70874 unsigned long ret = -EINVAL;
70875 unsigned long charged = 0;
70876+ unsigned long pax_task_size = TASK_SIZE;
70877
70878 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
70879 goto out;
70880@@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long ad
70881 if (!new_len)
70882 goto out;
70883
70884+#ifdef CONFIG_PAX_SEGMEXEC
70885+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
70886+ pax_task_size = SEGMEXEC_TASK_SIZE;
70887+#endif
70888+
70889+ pax_task_size -= PAGE_SIZE;
70890+
70891+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
70892+ old_len > pax_task_size || addr > pax_task_size-old_len)
70893+ goto out;
70894+
70895 if (flags & MREMAP_FIXED) {
70896 if (flags & MREMAP_MAYMOVE)
70897 ret = mremap_to(addr, old_len, new_addr, new_len);
70898@@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long ad
70899 addr + new_len);
70900 }
70901 ret = addr;
70902+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
70903 goto out;
70904 }
70905 }
70906@@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long ad
70907 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70908 if (ret)
70909 goto out;
70910+
70911+ map_flags = vma->vm_flags;
70912 ret = move_vma(vma, addr, old_len, new_len, new_addr);
70913+ if (!(ret & ~PAGE_MASK)) {
70914+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
70915+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
70916+ }
70917 }
70918 out:
70919 if (ret & ~PAGE_MASK)
70920diff -urNp linux-3.1.4/mm/nobootmem.c linux-3.1.4/mm/nobootmem.c
70921--- linux-3.1.4/mm/nobootmem.c 2011-11-11 15:19:27.000000000 -0500
70922+++ linux-3.1.4/mm/nobootmem.c 2011-11-16 18:39:08.000000000 -0500
70923@@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
70924 unsigned long __init free_all_memory_core_early(int nodeid)
70925 {
70926 int i;
70927- u64 start, end;
70928+ u64 start, end, startrange, endrange;
70929 unsigned long count = 0;
70930- struct range *range = NULL;
70931+ struct range *range = NULL, rangerange = { 0, 0 };
70932 int nr_range;
70933
70934 nr_range = get_free_all_memory_range(&range, nodeid);
70935+ startrange = __pa(range) >> PAGE_SHIFT;
70936+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
70937
70938 for (i = 0; i < nr_range; i++) {
70939 start = range[i].start;
70940 end = range[i].end;
70941+ if (start <= endrange && startrange < end) {
70942+ BUG_ON(rangerange.start | rangerange.end);
70943+ rangerange = range[i];
70944+ continue;
70945+ }
70946 count += end - start;
70947 __free_pages_memory(start, end);
70948 }
70949+ start = rangerange.start;
70950+ end = rangerange.end;
70951+ count += end - start;
70952+ __free_pages_memory(start, end);
70953
70954 return count;
70955 }
70956diff -urNp linux-3.1.4/mm/nommu.c linux-3.1.4/mm/nommu.c
70957--- linux-3.1.4/mm/nommu.c 2011-11-11 15:19:27.000000000 -0500
70958+++ linux-3.1.4/mm/nommu.c 2011-11-16 18:39:08.000000000 -0500
70959@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMI
70960 int sysctl_overcommit_ratio = 50; /* default is 50% */
70961 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
70962 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
70963-int heap_stack_gap = 0;
70964
70965 atomic_long_t mmap_pages_allocated;
70966
70967@@ -825,15 +824,6 @@ struct vm_area_struct *find_vma(struct m
70968 EXPORT_SYMBOL(find_vma);
70969
70970 /*
70971- * find a VMA
70972- * - we don't extend stack VMAs under NOMMU conditions
70973- */
70974-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
70975-{
70976- return find_vma(mm, addr);
70977-}
70978-
70979-/*
70980 * expand a stack to a given address
70981 * - not supported under NOMMU conditions
70982 */
70983@@ -1553,6 +1543,7 @@ int split_vma(struct mm_struct *mm, stru
70984
70985 /* most fields are the same, copy all, and then fixup */
70986 *new = *vma;
70987+ INIT_LIST_HEAD(&new->anon_vma_chain);
70988 *region = *vma->vm_region;
70989 new->vm_region = region;
70990
70991diff -urNp linux-3.1.4/mm/oom_kill.c linux-3.1.4/mm/oom_kill.c
70992--- linux-3.1.4/mm/oom_kill.c 2011-11-11 15:19:27.000000000 -0500
70993+++ linux-3.1.4/mm/oom_kill.c 2011-11-18 18:44:21.000000000 -0500
70994@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct t
70995 unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
70996 const nodemask_t *nodemask, unsigned long totalpages)
70997 {
70998- int points;
70999+ long points;
71000
71001 if (oom_unkillable_task(p, mem, nodemask))
71002 return 0;
71003diff -urNp linux-3.1.4/mm/page_alloc.c linux-3.1.4/mm/page_alloc.c
71004--- linux-3.1.4/mm/page_alloc.c 2011-11-11 15:19:27.000000000 -0500
71005+++ linux-3.1.4/mm/page_alloc.c 2011-11-16 18:40:44.000000000 -0500
71006@@ -340,7 +340,7 @@ out:
71007 * This usage means that zero-order pages may not be compound.
71008 */
71009
71010-static void free_compound_page(struct page *page)
71011+void free_compound_page(struct page *page)
71012 {
71013 __free_pages_ok(page, compound_order(page));
71014 }
71015@@ -653,6 +653,10 @@ static bool free_pages_prepare(struct pa
71016 int i;
71017 int bad = 0;
71018
71019+#ifdef CONFIG_PAX_MEMORY_SANITIZE
71020+ unsigned long index = 1UL << order;
71021+#endif
71022+
71023 trace_mm_page_free_direct(page, order);
71024 kmemcheck_free_shadow(page, order);
71025
71026@@ -668,6 +672,12 @@ static bool free_pages_prepare(struct pa
71027 debug_check_no_obj_freed(page_address(page),
71028 PAGE_SIZE << order);
71029 }
71030+
71031+#ifdef CONFIG_PAX_MEMORY_SANITIZE
71032+ for (; index; --index)
71033+ sanitize_highpage(page + index - 1);
71034+#endif
71035+
71036 arch_free_page(page, order);
71037 kernel_map_pages(page, 1 << order, 0);
71038
71039@@ -783,8 +793,10 @@ static int prep_new_page(struct page *pa
71040 arch_alloc_page(page, order);
71041 kernel_map_pages(page, 1 << order, 1);
71042
71043+#ifndef CONFIG_PAX_MEMORY_SANITIZE
71044 if (gfp_flags & __GFP_ZERO)
71045 prep_zero_page(page, order, gfp_flags);
71046+#endif
71047
71048 if (order && (gfp_flags & __GFP_COMP))
71049 prep_compound_page(page, order);
71050@@ -2539,6 +2551,8 @@ void show_free_areas(unsigned int filter
71051 int cpu;
71052 struct zone *zone;
71053
71054+ pax_track_stack();
71055+
71056 for_each_populated_zone(zone) {
71057 if (skip_free_areas_node(filter, zone_to_nid(zone)))
71058 continue;
71059@@ -3350,7 +3364,13 @@ static int pageblock_is_reserved(unsigne
71060 unsigned long pfn;
71061
71062 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
71063+#ifdef CONFIG_X86_32
71064+ /* boot failures in VMware 8 on 32bit vanilla since
71065+ this change */
71066+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
71067+#else
71068 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
71069+#endif
71070 return 1;
71071 }
71072 return 0;
71073diff -urNp linux-3.1.4/mm/percpu.c linux-3.1.4/mm/percpu.c
71074--- linux-3.1.4/mm/percpu.c 2011-11-11 15:19:27.000000000 -0500
71075+++ linux-3.1.4/mm/percpu.c 2011-11-16 18:39:08.000000000 -0500
71076@@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
71077 static unsigned int pcpu_last_unit_cpu __read_mostly;
71078
71079 /* the address of the first chunk which starts with the kernel static area */
71080-void *pcpu_base_addr __read_mostly;
71081+void *pcpu_base_addr __read_only;
71082 EXPORT_SYMBOL_GPL(pcpu_base_addr);
71083
71084 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
71085diff -urNp linux-3.1.4/mm/rmap.c linux-3.1.4/mm/rmap.c
71086--- linux-3.1.4/mm/rmap.c 2011-11-11 15:19:27.000000000 -0500
71087+++ linux-3.1.4/mm/rmap.c 2011-11-16 18:39:08.000000000 -0500
71088@@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_stru
71089 struct anon_vma *anon_vma = vma->anon_vma;
71090 struct anon_vma_chain *avc;
71091
71092+#ifdef CONFIG_PAX_SEGMEXEC
71093+ struct anon_vma_chain *avc_m = NULL;
71094+#endif
71095+
71096 might_sleep();
71097 if (unlikely(!anon_vma)) {
71098 struct mm_struct *mm = vma->vm_mm;
71099@@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_stru
71100 if (!avc)
71101 goto out_enomem;
71102
71103+#ifdef CONFIG_PAX_SEGMEXEC
71104+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
71105+ if (!avc_m)
71106+ goto out_enomem_free_avc;
71107+#endif
71108+
71109 anon_vma = find_mergeable_anon_vma(vma);
71110 allocated = NULL;
71111 if (!anon_vma) {
71112@@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_stru
71113 /* page_table_lock to protect against threads */
71114 spin_lock(&mm->page_table_lock);
71115 if (likely(!vma->anon_vma)) {
71116+
71117+#ifdef CONFIG_PAX_SEGMEXEC
71118+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
71119+
71120+ if (vma_m) {
71121+ BUG_ON(vma_m->anon_vma);
71122+ vma_m->anon_vma = anon_vma;
71123+ avc_m->anon_vma = anon_vma;
71124+ avc_m->vma = vma;
71125+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
71126+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
71127+ avc_m = NULL;
71128+ }
71129+#endif
71130+
71131 vma->anon_vma = anon_vma;
71132 avc->anon_vma = anon_vma;
71133 avc->vma = vma;
71134@@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_stru
71135
71136 if (unlikely(allocated))
71137 put_anon_vma(allocated);
71138+
71139+#ifdef CONFIG_PAX_SEGMEXEC
71140+ if (unlikely(avc_m))
71141+ anon_vma_chain_free(avc_m);
71142+#endif
71143+
71144 if (unlikely(avc))
71145 anon_vma_chain_free(avc);
71146 }
71147 return 0;
71148
71149 out_enomem_free_avc:
71150+
71151+#ifdef CONFIG_PAX_SEGMEXEC
71152+ if (avc_m)
71153+ anon_vma_chain_free(avc_m);
71154+#endif
71155+
71156 anon_vma_chain_free(avc);
71157 out_enomem:
71158 return -ENOMEM;
71159@@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct v
71160 * Attach the anon_vmas from src to dst.
71161 * Returns 0 on success, -ENOMEM on failure.
71162 */
71163-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
71164+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
71165 {
71166 struct anon_vma_chain *avc, *pavc;
71167 struct anon_vma *root = NULL;
71168@@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct
71169 * the corresponding VMA in the parent process is attached to.
71170 * Returns 0 on success, non-zero on failure.
71171 */
71172-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
71173+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
71174 {
71175 struct anon_vma_chain *avc;
71176 struct anon_vma *anon_vma;
71177diff -urNp linux-3.1.4/mm/shmem.c linux-3.1.4/mm/shmem.c
71178--- linux-3.1.4/mm/shmem.c 2011-11-11 15:19:27.000000000 -0500
71179+++ linux-3.1.4/mm/shmem.c 2011-11-16 19:28:28.000000000 -0500
71180@@ -31,7 +31,7 @@
71181 #include <linux/module.h>
71182 #include <linux/swap.h>
71183
71184-static struct vfsmount *shm_mnt;
71185+struct vfsmount *shm_mnt;
71186
71187 #ifdef CONFIG_SHMEM
71188 /*
71189@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
71190 #define BOGO_DIRENT_SIZE 20
71191
71192 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
71193-#define SHORT_SYMLINK_LEN 128
71194+#define SHORT_SYMLINK_LEN 64
71195
71196 struct shmem_xattr {
71197 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
71198@@ -769,6 +769,8 @@ static struct page *shmem_swapin(swp_ent
71199 struct mempolicy mpol, *spol;
71200 struct vm_area_struct pvma;
71201
71202+ pax_track_stack();
71203+
71204 spol = mpol_cond_copy(&mpol,
71205 mpol_shared_policy_lookup(&info->policy, index));
71206
71207@@ -2149,8 +2151,7 @@ int shmem_fill_super(struct super_block
71208 int err = -ENOMEM;
71209
71210 /* Round up to L1_CACHE_BYTES to resist false sharing */
71211- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
71212- L1_CACHE_BYTES), GFP_KERNEL);
71213+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
71214 if (!sbinfo)
71215 return -ENOMEM;
71216
71217diff -urNp linux-3.1.4/mm/slab.c linux-3.1.4/mm/slab.c
71218--- linux-3.1.4/mm/slab.c 2011-11-11 15:19:27.000000000 -0500
71219+++ linux-3.1.4/mm/slab.c 2011-11-16 18:40:44.000000000 -0500
71220@@ -151,7 +151,7 @@
71221
71222 /* Legal flag mask for kmem_cache_create(). */
71223 #if DEBUG
71224-# define CREATE_MASK (SLAB_RED_ZONE | \
71225+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
71226 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
71227 SLAB_CACHE_DMA | \
71228 SLAB_STORE_USER | \
71229@@ -159,7 +159,7 @@
71230 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
71231 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
71232 #else
71233-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
71234+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
71235 SLAB_CACHE_DMA | \
71236 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
71237 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
71238@@ -288,7 +288,7 @@ struct kmem_list3 {
71239 * Need this for bootstrapping a per node allocator.
71240 */
71241 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
71242-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
71243+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
71244 #define CACHE_CACHE 0
71245 #define SIZE_AC MAX_NUMNODES
71246 #define SIZE_L3 (2 * MAX_NUMNODES)
71247@@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_
71248 if ((x)->max_freeable < i) \
71249 (x)->max_freeable = i; \
71250 } while (0)
71251-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
71252-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
71253-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
71254-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
71255+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
71256+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
71257+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
71258+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
71259 #else
71260 #define STATS_INC_ACTIVE(x) do { } while (0)
71261 #define STATS_DEC_ACTIVE(x) do { } while (0)
71262@@ -538,7 +538,7 @@ static inline void *index_to_obj(struct
71263 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
71264 */
71265 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
71266- const struct slab *slab, void *obj)
71267+ const struct slab *slab, const void *obj)
71268 {
71269 u32 offset = (obj - slab->s_mem);
71270 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
71271@@ -564,7 +564,7 @@ struct cache_names {
71272 static struct cache_names __initdata cache_names[] = {
71273 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
71274 #include <linux/kmalloc_sizes.h>
71275- {NULL,}
71276+ {NULL}
71277 #undef CACHE
71278 };
71279
71280@@ -1571,7 +1571,7 @@ void __init kmem_cache_init(void)
71281 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
71282 sizes[INDEX_AC].cs_size,
71283 ARCH_KMALLOC_MINALIGN,
71284- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
71285+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
71286 NULL);
71287
71288 if (INDEX_AC != INDEX_L3) {
71289@@ -1579,7 +1579,7 @@ void __init kmem_cache_init(void)
71290 kmem_cache_create(names[INDEX_L3].name,
71291 sizes[INDEX_L3].cs_size,
71292 ARCH_KMALLOC_MINALIGN,
71293- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
71294+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
71295 NULL);
71296 }
71297
71298@@ -1597,7 +1597,7 @@ void __init kmem_cache_init(void)
71299 sizes->cs_cachep = kmem_cache_create(names->name,
71300 sizes->cs_size,
71301 ARCH_KMALLOC_MINALIGN,
71302- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
71303+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
71304 NULL);
71305 }
71306 #ifdef CONFIG_ZONE_DMA
71307@@ -4324,10 +4324,10 @@ static int s_show(struct seq_file *m, vo
71308 }
71309 /* cpu stats */
71310 {
71311- unsigned long allochit = atomic_read(&cachep->allochit);
71312- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
71313- unsigned long freehit = atomic_read(&cachep->freehit);
71314- unsigned long freemiss = atomic_read(&cachep->freemiss);
71315+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
71316+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
71317+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
71318+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
71319
71320 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
71321 allochit, allocmiss, freehit, freemiss);
71322@@ -4584,15 +4584,70 @@ static const struct file_operations proc
71323
71324 static int __init slab_proc_init(void)
71325 {
71326- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
71327+ mode_t gr_mode = S_IRUGO;
71328+
71329+#ifdef CONFIG_GRKERNSEC_PROC_ADD
71330+ gr_mode = S_IRUSR;
71331+#endif
71332+
71333+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
71334 #ifdef CONFIG_DEBUG_SLAB_LEAK
71335- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
71336+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
71337 #endif
71338 return 0;
71339 }
71340 module_init(slab_proc_init);
71341 #endif
71342
71343+void check_object_size(const void *ptr, unsigned long n, bool to)
71344+{
71345+
71346+#ifdef CONFIG_PAX_USERCOPY
71347+ struct page *page;
71348+ struct kmem_cache *cachep = NULL;
71349+ struct slab *slabp;
71350+ unsigned int objnr;
71351+ unsigned long offset;
71352+ const char *type;
71353+
71354+ if (!n)
71355+ return;
71356+
71357+ type = "<null>";
71358+ if (ZERO_OR_NULL_PTR(ptr))
71359+ goto report;
71360+
71361+ if (!virt_addr_valid(ptr))
71362+ return;
71363+
71364+ page = virt_to_head_page(ptr);
71365+
71366+ type = "<process stack>";
71367+ if (!PageSlab(page)) {
71368+ if (object_is_on_stack(ptr, n) == -1)
71369+ goto report;
71370+ return;
71371+ }
71372+
71373+ cachep = page_get_cache(page);
71374+ type = cachep->name;
71375+ if (!(cachep->flags & SLAB_USERCOPY))
71376+ goto report;
71377+
71378+ slabp = page_get_slab(page);
71379+ objnr = obj_to_index(cachep, slabp, ptr);
71380+ BUG_ON(objnr >= cachep->num);
71381+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
71382+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
71383+ return;
71384+
71385+report:
71386+ pax_report_usercopy(ptr, n, to, type);
71387+#endif
71388+
71389+}
71390+EXPORT_SYMBOL(check_object_size);
71391+
71392 /**
71393 * ksize - get the actual amount of memory allocated for a given object
71394 * @objp: Pointer to the object
71395diff -urNp linux-3.1.4/mm/slob.c linux-3.1.4/mm/slob.c
71396--- linux-3.1.4/mm/slob.c 2011-11-11 15:19:27.000000000 -0500
71397+++ linux-3.1.4/mm/slob.c 2011-11-16 18:39:08.000000000 -0500
71398@@ -29,7 +29,7 @@
71399 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
71400 * alloc_pages() directly, allocating compound pages so the page order
71401 * does not have to be separately tracked, and also stores the exact
71402- * allocation size in page->private so that it can be used to accurately
71403+ * allocation size in slob_page->size so that it can be used to accurately
71404 * provide ksize(). These objects are detected in kfree() because slob_page()
71405 * is false for them.
71406 *
71407@@ -58,6 +58,7 @@
71408 */
71409
71410 #include <linux/kernel.h>
71411+#include <linux/sched.h>
71412 #include <linux/slab.h>
71413 #include <linux/mm.h>
71414 #include <linux/swap.h> /* struct reclaim_state */
71415@@ -102,7 +103,8 @@ struct slob_page {
71416 unsigned long flags; /* mandatory */
71417 atomic_t _count; /* mandatory */
71418 slobidx_t units; /* free units left in page */
71419- unsigned long pad[2];
71420+ unsigned long pad[1];
71421+ unsigned long size; /* size when >=PAGE_SIZE */
71422 slob_t *free; /* first free slob_t in page */
71423 struct list_head list; /* linked list of free pages */
71424 };
71425@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
71426 */
71427 static inline int is_slob_page(struct slob_page *sp)
71428 {
71429- return PageSlab((struct page *)sp);
71430+ return PageSlab((struct page *)sp) && !sp->size;
71431 }
71432
71433 static inline void set_slob_page(struct slob_page *sp)
71434@@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
71435
71436 static inline struct slob_page *slob_page(const void *addr)
71437 {
71438- return (struct slob_page *)virt_to_page(addr);
71439+ return (struct slob_page *)virt_to_head_page(addr);
71440 }
71441
71442 /*
71443@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
71444 /*
71445 * Return the size of a slob block.
71446 */
71447-static slobidx_t slob_units(slob_t *s)
71448+static slobidx_t slob_units(const slob_t *s)
71449 {
71450 if (s->units > 0)
71451 return s->units;
71452@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
71453 /*
71454 * Return the next free slob block pointer after this one.
71455 */
71456-static slob_t *slob_next(slob_t *s)
71457+static slob_t *slob_next(const slob_t *s)
71458 {
71459 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
71460 slobidx_t next;
71461@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
71462 /*
71463 * Returns true if s is the last free block in its page.
71464 */
71465-static int slob_last(slob_t *s)
71466+static int slob_last(const slob_t *s)
71467 {
71468 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
71469 }
71470@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
71471 if (!page)
71472 return NULL;
71473
71474+ set_slob_page(page);
71475 return page_address(page);
71476 }
71477
71478@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
71479 if (!b)
71480 return NULL;
71481 sp = slob_page(b);
71482- set_slob_page(sp);
71483
71484 spin_lock_irqsave(&slob_lock, flags);
71485 sp->units = SLOB_UNITS(PAGE_SIZE);
71486 sp->free = b;
71487+ sp->size = 0;
71488 INIT_LIST_HEAD(&sp->list);
71489 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
71490 set_slob_page_free(sp, slob_list);
71491@@ -476,10 +479,9 @@ out:
71492 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
71493 */
71494
71495-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
71496+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
71497 {
71498- unsigned int *m;
71499- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71500+ slob_t *m;
71501 void *ret;
71502
71503 gfp &= gfp_allowed_mask;
71504@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t
71505
71506 if (!m)
71507 return NULL;
71508- *m = size;
71509+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
71510+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
71511+ m[0].units = size;
71512+ m[1].units = align;
71513 ret = (void *)m + align;
71514
71515 trace_kmalloc_node(_RET_IP_, ret,
71516@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t
71517 gfp |= __GFP_COMP;
71518 ret = slob_new_pages(gfp, order, node);
71519 if (ret) {
71520- struct page *page;
71521- page = virt_to_page(ret);
71522- page->private = size;
71523+ struct slob_page *sp;
71524+ sp = slob_page(ret);
71525+ sp->size = size;
71526 }
71527
71528 trace_kmalloc_node(_RET_IP_, ret,
71529 size, PAGE_SIZE << order, gfp, node);
71530 }
71531
71532- kmemleak_alloc(ret, size, 1, gfp);
71533+ return ret;
71534+}
71535+
71536+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
71537+{
71538+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71539+ void *ret = __kmalloc_node_align(size, gfp, node, align);
71540+
71541+ if (!ZERO_OR_NULL_PTR(ret))
71542+ kmemleak_alloc(ret, size, 1, gfp);
71543 return ret;
71544 }
71545 EXPORT_SYMBOL(__kmalloc_node);
71546@@ -533,13 +547,92 @@ void kfree(const void *block)
71547 sp = slob_page(block);
71548 if (is_slob_page(sp)) {
71549 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71550- unsigned int *m = (unsigned int *)(block - align);
71551- slob_free(m, *m + align);
71552- } else
71553+ slob_t *m = (slob_t *)(block - align);
71554+ slob_free(m, m[0].units + align);
71555+ } else {
71556+ clear_slob_page(sp);
71557+ free_slob_page(sp);
71558+ sp->size = 0;
71559 put_page(&sp->page);
71560+ }
71561 }
71562 EXPORT_SYMBOL(kfree);
71563
71564+void check_object_size(const void *ptr, unsigned long n, bool to)
71565+{
71566+
71567+#ifdef CONFIG_PAX_USERCOPY
71568+ struct slob_page *sp;
71569+ const slob_t *free;
71570+ const void *base;
71571+ unsigned long flags;
71572+ const char *type;
71573+
71574+ if (!n)
71575+ return;
71576+
71577+ type = "<null>";
71578+ if (ZERO_OR_NULL_PTR(ptr))
71579+ goto report;
71580+
71581+ if (!virt_addr_valid(ptr))
71582+ return;
71583+
71584+ type = "<process stack>";
71585+ sp = slob_page(ptr);
71586+ if (!PageSlab((struct page*)sp)) {
71587+ if (object_is_on_stack(ptr, n) == -1)
71588+ goto report;
71589+ return;
71590+ }
71591+
71592+ type = "<slob>";
71593+ if (sp->size) {
71594+ base = page_address(&sp->page);
71595+ if (base <= ptr && n <= sp->size - (ptr - base))
71596+ return;
71597+ goto report;
71598+ }
71599+
71600+ /* some tricky double walking to find the chunk */
71601+ spin_lock_irqsave(&slob_lock, flags);
71602+ base = (void *)((unsigned long)ptr & PAGE_MASK);
71603+ free = sp->free;
71604+
71605+ while (!slob_last(free) && (void *)free <= ptr) {
71606+ base = free + slob_units(free);
71607+ free = slob_next(free);
71608+ }
71609+
71610+ while (base < (void *)free) {
71611+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
71612+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
71613+ int offset;
71614+
71615+ if (ptr < base + align)
71616+ break;
71617+
71618+ offset = ptr - base - align;
71619+ if (offset >= m) {
71620+ base += size;
71621+ continue;
71622+ }
71623+
71624+ if (n > m - offset)
71625+ break;
71626+
71627+ spin_unlock_irqrestore(&slob_lock, flags);
71628+ return;
71629+ }
71630+
71631+ spin_unlock_irqrestore(&slob_lock, flags);
71632+report:
71633+ pax_report_usercopy(ptr, n, to, type);
71634+#endif
71635+
71636+}
71637+EXPORT_SYMBOL(check_object_size);
71638+
71639 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
71640 size_t ksize(const void *block)
71641 {
71642@@ -552,10 +645,10 @@ size_t ksize(const void *block)
71643 sp = slob_page(block);
71644 if (is_slob_page(sp)) {
71645 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71646- unsigned int *m = (unsigned int *)(block - align);
71647- return SLOB_UNITS(*m) * SLOB_UNIT;
71648+ slob_t *m = (slob_t *)(block - align);
71649+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
71650 } else
71651- return sp->page.private;
71652+ return sp->size;
71653 }
71654 EXPORT_SYMBOL(ksize);
71655
71656@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(con
71657 {
71658 struct kmem_cache *c;
71659
71660+#ifdef CONFIG_PAX_USERCOPY
71661+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
71662+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
71663+#else
71664 c = slob_alloc(sizeof(struct kmem_cache),
71665 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
71666+#endif
71667
71668 if (c) {
71669 c->name = name;
71670@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_
71671
71672 lockdep_trace_alloc(flags);
71673
71674+#ifdef CONFIG_PAX_USERCOPY
71675+ b = __kmalloc_node_align(c->size, flags, node, c->align);
71676+#else
71677 if (c->size < PAGE_SIZE) {
71678 b = slob_alloc(c->size, flags, c->align, node);
71679 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
71680 SLOB_UNITS(c->size) * SLOB_UNIT,
71681 flags, node);
71682 } else {
71683+ struct slob_page *sp;
71684+
71685 b = slob_new_pages(flags, get_order(c->size), node);
71686+ sp = slob_page(b);
71687+ sp->size = c->size;
71688 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
71689 PAGE_SIZE << get_order(c->size),
71690 flags, node);
71691 }
71692+#endif
71693
71694 if (c->ctor)
71695 c->ctor(b);
71696@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
71697
71698 static void __kmem_cache_free(void *b, int size)
71699 {
71700- if (size < PAGE_SIZE)
71701+ struct slob_page *sp = slob_page(b);
71702+
71703+ if (is_slob_page(sp))
71704 slob_free(b, size);
71705- else
71706+ else {
71707+ clear_slob_page(sp);
71708+ free_slob_page(sp);
71709+ sp->size = 0;
71710 slob_free_pages(b, get_order(size));
71711+ }
71712 }
71713
71714 static void kmem_rcu_free(struct rcu_head *head)
71715@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_hea
71716
71717 void kmem_cache_free(struct kmem_cache *c, void *b)
71718 {
71719+ int size = c->size;
71720+
71721+#ifdef CONFIG_PAX_USERCOPY
71722+ if (size + c->align < PAGE_SIZE) {
71723+ size += c->align;
71724+ b -= c->align;
71725+ }
71726+#endif
71727+
71728 kmemleak_free_recursive(b, c->flags);
71729 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
71730 struct slob_rcu *slob_rcu;
71731- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
71732- slob_rcu->size = c->size;
71733+ slob_rcu = b + (size - sizeof(struct slob_rcu));
71734+ slob_rcu->size = size;
71735 call_rcu(&slob_rcu->head, kmem_rcu_free);
71736 } else {
71737- __kmem_cache_free(b, c->size);
71738+ __kmem_cache_free(b, size);
71739 }
71740
71741+#ifdef CONFIG_PAX_USERCOPY
71742+ trace_kfree(_RET_IP_, b);
71743+#else
71744 trace_kmem_cache_free(_RET_IP_, b);
71745+#endif
71746+
71747 }
71748 EXPORT_SYMBOL(kmem_cache_free);
71749
71750diff -urNp linux-3.1.4/mm/slub.c linux-3.1.4/mm/slub.c
71751--- linux-3.1.4/mm/slub.c 2011-11-11 15:19:27.000000000 -0500
71752+++ linux-3.1.4/mm/slub.c 2011-11-16 19:27:25.000000000 -0500
71753@@ -208,7 +208,7 @@ struct track {
71754
71755 enum track_item { TRACK_ALLOC, TRACK_FREE };
71756
71757-#ifdef CONFIG_SYSFS
71758+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71759 static int sysfs_slab_add(struct kmem_cache *);
71760 static int sysfs_slab_alias(struct kmem_cache *, const char *);
71761 static void sysfs_slab_remove(struct kmem_cache *);
71762@@ -556,7 +556,7 @@ static void print_track(const char *s, s
71763 if (!t->addr)
71764 return;
71765
71766- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
71767+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
71768 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
71769 #ifdef CONFIG_STACKTRACE
71770 {
71771@@ -2456,6 +2456,8 @@ void kmem_cache_free(struct kmem_cache *
71772
71773 page = virt_to_head_page(x);
71774
71775+ BUG_ON(!PageSlab(page));
71776+
71777 slab_free(s, page, x, _RET_IP_);
71778
71779 trace_kmem_cache_free(_RET_IP_, x);
71780@@ -2489,7 +2491,7 @@ static int slub_min_objects;
71781 * Merge control. If this is set then no merging of slab caches will occur.
71782 * (Could be removed. This was introduced to pacify the merge skeptics.)
71783 */
71784-static int slub_nomerge;
71785+static int slub_nomerge = 1;
71786
71787 /*
71788 * Calculate the order of allocation given an slab object size.
71789@@ -2912,7 +2914,7 @@ static int kmem_cache_open(struct kmem_c
71790 * list to avoid pounding the page allocator excessively.
71791 */
71792 set_min_partial(s, ilog2(s->size));
71793- s->refcount = 1;
71794+ atomic_set(&s->refcount, 1);
71795 #ifdef CONFIG_NUMA
71796 s->remote_node_defrag_ratio = 1000;
71797 #endif
71798@@ -3017,8 +3019,7 @@ static inline int kmem_cache_close(struc
71799 void kmem_cache_destroy(struct kmem_cache *s)
71800 {
71801 down_write(&slub_lock);
71802- s->refcount--;
71803- if (!s->refcount) {
71804+ if (atomic_dec_and_test(&s->refcount)) {
71805 list_del(&s->list);
71806 if (kmem_cache_close(s)) {
71807 printk(KERN_ERR "SLUB %s: %s called for cache that "
71808@@ -3228,6 +3229,50 @@ void *__kmalloc_node(size_t size, gfp_t
71809 EXPORT_SYMBOL(__kmalloc_node);
71810 #endif
71811
71812+void check_object_size(const void *ptr, unsigned long n, bool to)
71813+{
71814+
71815+#ifdef CONFIG_PAX_USERCOPY
71816+ struct page *page;
71817+ struct kmem_cache *s = NULL;
71818+ unsigned long offset;
71819+ const char *type;
71820+
71821+ if (!n)
71822+ return;
71823+
71824+ type = "<null>";
71825+ if (ZERO_OR_NULL_PTR(ptr))
71826+ goto report;
71827+
71828+ if (!virt_addr_valid(ptr))
71829+ return;
71830+
71831+ page = virt_to_head_page(ptr);
71832+
71833+ type = "<process stack>";
71834+ if (!PageSlab(page)) {
71835+ if (object_is_on_stack(ptr, n) == -1)
71836+ goto report;
71837+ return;
71838+ }
71839+
71840+ s = page->slab;
71841+ type = s->name;
71842+ if (!(s->flags & SLAB_USERCOPY))
71843+ goto report;
71844+
71845+ offset = (ptr - page_address(page)) % s->size;
71846+ if (offset <= s->objsize && n <= s->objsize - offset)
71847+ return;
71848+
71849+report:
71850+ pax_report_usercopy(ptr, n, to, type);
71851+#endif
71852+
71853+}
71854+EXPORT_SYMBOL(check_object_size);
71855+
71856 size_t ksize(const void *object)
71857 {
71858 struct page *page;
71859@@ -3502,7 +3547,7 @@ static void __init kmem_cache_bootstrap_
71860 int node;
71861
71862 list_add(&s->list, &slab_caches);
71863- s->refcount = -1;
71864+ atomic_set(&s->refcount, -1);
71865
71866 for_each_node_state(node, N_NORMAL_MEMORY) {
71867 struct kmem_cache_node *n = get_node(s, node);
71868@@ -3619,17 +3664,17 @@ void __init kmem_cache_init(void)
71869
71870 /* Caches that are not of the two-to-the-power-of size */
71871 if (KMALLOC_MIN_SIZE <= 32) {
71872- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
71873+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
71874 caches++;
71875 }
71876
71877 if (KMALLOC_MIN_SIZE <= 64) {
71878- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
71879+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
71880 caches++;
71881 }
71882
71883 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
71884- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
71885+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
71886 caches++;
71887 }
71888
71889@@ -3697,7 +3742,7 @@ static int slab_unmergeable(struct kmem_
71890 /*
71891 * We may have set a slab to be unmergeable during bootstrap.
71892 */
71893- if (s->refcount < 0)
71894+ if (atomic_read(&s->refcount) < 0)
71895 return 1;
71896
71897 return 0;
71898@@ -3756,7 +3801,7 @@ struct kmem_cache *kmem_cache_create(con
71899 down_write(&slub_lock);
71900 s = find_mergeable(size, align, flags, name, ctor);
71901 if (s) {
71902- s->refcount++;
71903+ atomic_inc(&s->refcount);
71904 /*
71905 * Adjust the object sizes so that we clear
71906 * the complete object on kzalloc.
71907@@ -3765,7 +3810,7 @@ struct kmem_cache *kmem_cache_create(con
71908 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
71909
71910 if (sysfs_slab_alias(s, name)) {
71911- s->refcount--;
71912+ atomic_dec(&s->refcount);
71913 goto err;
71914 }
71915 up_write(&slub_lock);
71916@@ -3893,7 +3938,7 @@ void *__kmalloc_node_track_caller(size_t
71917 }
71918 #endif
71919
71920-#ifdef CONFIG_SYSFS
71921+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71922 static int count_inuse(struct page *page)
71923 {
71924 return page->inuse;
71925@@ -4280,12 +4325,12 @@ static void resiliency_test(void)
71926 validate_slab_cache(kmalloc_caches[9]);
71927 }
71928 #else
71929-#ifdef CONFIG_SYSFS
71930+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71931 static void resiliency_test(void) {};
71932 #endif
71933 #endif
71934
71935-#ifdef CONFIG_SYSFS
71936+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71937 enum slab_stat_type {
71938 SL_ALL, /* All slabs */
71939 SL_PARTIAL, /* Only partially allocated slabs */
71940@@ -4495,7 +4540,7 @@ SLAB_ATTR_RO(ctor);
71941
71942 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
71943 {
71944- return sprintf(buf, "%d\n", s->refcount - 1);
71945+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
71946 }
71947 SLAB_ATTR_RO(aliases);
71948
71949@@ -5025,6 +5070,7 @@ static char *create_unique_id(struct kme
71950 return name;
71951 }
71952
71953+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71954 static int sysfs_slab_add(struct kmem_cache *s)
71955 {
71956 int err;
71957@@ -5087,6 +5133,7 @@ static void sysfs_slab_remove(struct kme
71958 kobject_del(&s->kobj);
71959 kobject_put(&s->kobj);
71960 }
71961+#endif
71962
71963 /*
71964 * Need to buffer aliases during bootup until sysfs becomes
71965@@ -5100,6 +5147,7 @@ struct saved_alias {
71966
71967 static struct saved_alias *alias_list;
71968
71969+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71970 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71971 {
71972 struct saved_alias *al;
71973@@ -5122,6 +5170,7 @@ static int sysfs_slab_alias(struct kmem_
71974 alias_list = al;
71975 return 0;
71976 }
71977+#endif
71978
71979 static int __init slab_sysfs_init(void)
71980 {
71981@@ -5257,7 +5306,13 @@ static const struct file_operations proc
71982
71983 static int __init slab_proc_init(void)
71984 {
71985- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
71986+ mode_t gr_mode = S_IRUGO;
71987+
71988+#ifdef CONFIG_GRKERNSEC_PROC_ADD
71989+ gr_mode = S_IRUSR;
71990+#endif
71991+
71992+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
71993 return 0;
71994 }
71995 module_init(slab_proc_init);
71996diff -urNp linux-3.1.4/mm/swap.c linux-3.1.4/mm/swap.c
71997--- linux-3.1.4/mm/swap.c 2011-11-11 15:19:27.000000000 -0500
71998+++ linux-3.1.4/mm/swap.c 2011-11-16 18:39:08.000000000 -0500
71999@@ -31,6 +31,7 @@
72000 #include <linux/backing-dev.h>
72001 #include <linux/memcontrol.h>
72002 #include <linux/gfp.h>
72003+#include <linux/hugetlb.h>
72004
72005 #include "internal.h"
72006
72007@@ -71,6 +72,8 @@ static void __put_compound_page(struct p
72008
72009 __page_cache_release(page);
72010 dtor = get_compound_page_dtor(page);
72011+ if (!PageHuge(page))
72012+ BUG_ON(dtor != free_compound_page);
72013 (*dtor)(page);
72014 }
72015
72016diff -urNp linux-3.1.4/mm/swapfile.c linux-3.1.4/mm/swapfile.c
72017--- linux-3.1.4/mm/swapfile.c 2011-11-11 15:19:27.000000000 -0500
72018+++ linux-3.1.4/mm/swapfile.c 2011-11-16 18:39:08.000000000 -0500
72019@@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
72020
72021 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
72022 /* Activity counter to indicate that a swapon or swapoff has occurred */
72023-static atomic_t proc_poll_event = ATOMIC_INIT(0);
72024+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
72025
72026 static inline unsigned char swap_count(unsigned char ent)
72027 {
72028@@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
72029 }
72030 filp_close(swap_file, NULL);
72031 err = 0;
72032- atomic_inc(&proc_poll_event);
72033+ atomic_inc_unchecked(&proc_poll_event);
72034 wake_up_interruptible(&proc_poll_wait);
72035
72036 out_dput:
72037@@ -1687,8 +1687,8 @@ static unsigned swaps_poll(struct file *
72038
72039 poll_wait(file, &proc_poll_wait, wait);
72040
72041- if (seq->poll_event != atomic_read(&proc_poll_event)) {
72042- seq->poll_event = atomic_read(&proc_poll_event);
72043+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
72044+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
72045 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
72046 }
72047
72048@@ -1786,7 +1786,7 @@ static int swaps_open(struct inode *inod
72049 return ret;
72050
72051 seq = file->private_data;
72052- seq->poll_event = atomic_read(&proc_poll_event);
72053+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
72054 return 0;
72055 }
72056
72057@@ -2124,7 +2124,7 @@ SYSCALL_DEFINE2(swapon, const char __use
72058 (p->flags & SWP_DISCARDABLE) ? "D" : "");
72059
72060 mutex_unlock(&swapon_mutex);
72061- atomic_inc(&proc_poll_event);
72062+ atomic_inc_unchecked(&proc_poll_event);
72063 wake_up_interruptible(&proc_poll_wait);
72064
72065 if (S_ISREG(inode->i_mode))
72066diff -urNp linux-3.1.4/mm/util.c linux-3.1.4/mm/util.c
72067--- linux-3.1.4/mm/util.c 2011-11-11 15:19:27.000000000 -0500
72068+++ linux-3.1.4/mm/util.c 2011-11-16 18:39:08.000000000 -0500
72069@@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
72070 * allocated buffer. Use this if you don't want to free the buffer immediately
72071 * like, for example, with RCU.
72072 */
72073+#undef __krealloc
72074 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
72075 {
72076 void *ret;
72077@@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
72078 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
72079 * %NULL pointer, the object pointed to is freed.
72080 */
72081+#undef krealloc
72082 void *krealloc(const void *p, size_t new_size, gfp_t flags)
72083 {
72084 void *ret;
72085@@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *m
72086 void arch_pick_mmap_layout(struct mm_struct *mm)
72087 {
72088 mm->mmap_base = TASK_UNMAPPED_BASE;
72089+
72090+#ifdef CONFIG_PAX_RANDMMAP
72091+ if (mm->pax_flags & MF_PAX_RANDMMAP)
72092+ mm->mmap_base += mm->delta_mmap;
72093+#endif
72094+
72095 mm->get_unmapped_area = arch_get_unmapped_area;
72096 mm->unmap_area = arch_unmap_area;
72097 }
72098diff -urNp linux-3.1.4/mm/vmalloc.c linux-3.1.4/mm/vmalloc.c
72099--- linux-3.1.4/mm/vmalloc.c 2011-11-11 15:19:27.000000000 -0500
72100+++ linux-3.1.4/mm/vmalloc.c 2011-11-16 18:40:44.000000000 -0500
72101@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
72102
72103 pte = pte_offset_kernel(pmd, addr);
72104 do {
72105- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
72106- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
72107+
72108+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72109+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
72110+ BUG_ON(!pte_exec(*pte));
72111+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
72112+ continue;
72113+ }
72114+#endif
72115+
72116+ {
72117+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
72118+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
72119+ }
72120 } while (pte++, addr += PAGE_SIZE, addr != end);
72121 }
72122
72123@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
72124 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
72125 {
72126 pte_t *pte;
72127+ int ret = -ENOMEM;
72128
72129 /*
72130 * nr is a running index into the array which helps higher level
72131@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
72132 pte = pte_alloc_kernel(pmd, addr);
72133 if (!pte)
72134 return -ENOMEM;
72135+
72136+ pax_open_kernel();
72137 do {
72138 struct page *page = pages[*nr];
72139
72140- if (WARN_ON(!pte_none(*pte)))
72141- return -EBUSY;
72142- if (WARN_ON(!page))
72143- return -ENOMEM;
72144+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72145+ if (pgprot_val(prot) & _PAGE_NX)
72146+#endif
72147+
72148+ if (WARN_ON(!pte_none(*pte))) {
72149+ ret = -EBUSY;
72150+ goto out;
72151+ }
72152+ if (WARN_ON(!page)) {
72153+ ret = -ENOMEM;
72154+ goto out;
72155+ }
72156 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
72157 (*nr)++;
72158 } while (pte++, addr += PAGE_SIZE, addr != end);
72159- return 0;
72160+ ret = 0;
72161+out:
72162+ pax_close_kernel();
72163+ return ret;
72164 }
72165
72166 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
72167@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
72168 * and fall back on vmalloc() if that fails. Others
72169 * just put it in the vmalloc space.
72170 */
72171-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
72172+#ifdef CONFIG_MODULES
72173+#ifdef MODULES_VADDR
72174 unsigned long addr = (unsigned long)x;
72175 if (addr >= MODULES_VADDR && addr < MODULES_END)
72176 return 1;
72177 #endif
72178+
72179+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72180+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
72181+ return 1;
72182+#endif
72183+
72184+#endif
72185+
72186 return is_vmalloc_addr(x);
72187 }
72188
72189@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
72190
72191 if (!pgd_none(*pgd)) {
72192 pud_t *pud = pud_offset(pgd, addr);
72193+#ifdef CONFIG_X86
72194+ if (!pud_large(*pud))
72195+#endif
72196 if (!pud_none(*pud)) {
72197 pmd_t *pmd = pmd_offset(pud, addr);
72198+#ifdef CONFIG_X86
72199+ if (!pmd_large(*pmd))
72200+#endif
72201 if (!pmd_none(*pmd)) {
72202 pte_t *ptep, pte;
72203
72204@@ -1294,6 +1334,16 @@ static struct vm_struct *__get_vm_area_n
72205 struct vm_struct *area;
72206
72207 BUG_ON(in_interrupt());
72208+
72209+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
72210+ if (flags & VM_KERNEXEC) {
72211+ if (start != VMALLOC_START || end != VMALLOC_END)
72212+ return NULL;
72213+ start = (unsigned long)MODULES_EXEC_VADDR;
72214+ end = (unsigned long)MODULES_EXEC_END;
72215+ }
72216+#endif
72217+
72218 if (flags & VM_IOREMAP) {
72219 int bit = fls(size);
72220
72221@@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned
72222 if (count > totalram_pages)
72223 return NULL;
72224
72225+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
72226+ if (!(pgprot_val(prot) & _PAGE_NX))
72227+ flags |= VM_KERNEXEC;
72228+#endif
72229+
72230 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
72231 __builtin_return_address(0));
72232 if (!area)
72233@@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long
72234 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
72235 return NULL;
72236
72237+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
72238+ if (!(pgprot_val(prot) & _PAGE_NX))
72239+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
72240+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
72241+ else
72242+#endif
72243+
72244 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
72245 start, end, node, gfp_mask, caller);
72246
72247@@ -1672,6 +1734,7 @@ static void *__vmalloc_node(unsigned lon
72248 gfp_mask, prot, node, caller);
72249 }
72250
72251+#undef __vmalloc
72252 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
72253 {
72254 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
72255@@ -1695,6 +1758,7 @@ static inline void *__vmalloc_node_flags
72256 * For tight control over page level allocator and protection flags
72257 * use __vmalloc() instead.
72258 */
72259+#undef vmalloc
72260 void *vmalloc(unsigned long size)
72261 {
72262 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
72263@@ -1711,6 +1775,7 @@ EXPORT_SYMBOL(vmalloc);
72264 * For tight control over page level allocator and protection flags
72265 * use __vmalloc() instead.
72266 */
72267+#undef vzalloc
72268 void *vzalloc(unsigned long size)
72269 {
72270 return __vmalloc_node_flags(size, -1,
72271@@ -1725,6 +1790,7 @@ EXPORT_SYMBOL(vzalloc);
72272 * The resulting memory area is zeroed so it can be mapped to userspace
72273 * without leaking data.
72274 */
72275+#undef vmalloc_user
72276 void *vmalloc_user(unsigned long size)
72277 {
72278 struct vm_struct *area;
72279@@ -1752,6 +1818,7 @@ EXPORT_SYMBOL(vmalloc_user);
72280 * For tight control over page level allocator and protection flags
72281 * use __vmalloc() instead.
72282 */
72283+#undef vmalloc_node
72284 void *vmalloc_node(unsigned long size, int node)
72285 {
72286 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
72287@@ -1771,6 +1838,7 @@ EXPORT_SYMBOL(vmalloc_node);
72288 * For tight control over page level allocator and protection flags
72289 * use __vmalloc_node() instead.
72290 */
72291+#undef vzalloc_node
72292 void *vzalloc_node(unsigned long size, int node)
72293 {
72294 return __vmalloc_node_flags(size, node,
72295@@ -1793,10 +1861,10 @@ EXPORT_SYMBOL(vzalloc_node);
72296 * For tight control over page level allocator and protection flags
72297 * use __vmalloc() instead.
72298 */
72299-
72300+#undef vmalloc_exec
72301 void *vmalloc_exec(unsigned long size)
72302 {
72303- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
72304+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
72305 -1, __builtin_return_address(0));
72306 }
72307
72308@@ -1815,6 +1883,7 @@ void *vmalloc_exec(unsigned long size)
72309 * Allocate enough 32bit PA addressable pages to cover @size from the
72310 * page level allocator and map them into contiguous kernel virtual space.
72311 */
72312+#undef vmalloc_32
72313 void *vmalloc_32(unsigned long size)
72314 {
72315 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
72316@@ -1829,6 +1898,7 @@ EXPORT_SYMBOL(vmalloc_32);
72317 * The resulting memory area is 32bit addressable and zeroed so it can be
72318 * mapped to userspace without leaking data.
72319 */
72320+#undef vmalloc_32_user
72321 void *vmalloc_32_user(unsigned long size)
72322 {
72323 struct vm_struct *area;
72324@@ -2091,6 +2161,8 @@ int remap_vmalloc_range(struct vm_area_s
72325 unsigned long uaddr = vma->vm_start;
72326 unsigned long usize = vma->vm_end - vma->vm_start;
72327
72328+ BUG_ON(vma->vm_mirror);
72329+
72330 if ((PAGE_SIZE-1) & (unsigned long)addr)
72331 return -EINVAL;
72332
72333diff -urNp linux-3.1.4/mm/vmstat.c linux-3.1.4/mm/vmstat.c
72334--- linux-3.1.4/mm/vmstat.c 2011-11-11 15:19:27.000000000 -0500
72335+++ linux-3.1.4/mm/vmstat.c 2011-11-16 18:40:44.000000000 -0500
72336@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
72337 *
72338 * vm_stat contains the global counters
72339 */
72340-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
72341+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
72342 EXPORT_SYMBOL(vm_stat);
72343
72344 #ifdef CONFIG_SMP
72345@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
72346 v = p->vm_stat_diff[i];
72347 p->vm_stat_diff[i] = 0;
72348 local_irq_restore(flags);
72349- atomic_long_add(v, &zone->vm_stat[i]);
72350+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
72351 global_diff[i] += v;
72352 #ifdef CONFIG_NUMA
72353 /* 3 seconds idle till flush */
72354@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
72355
72356 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
72357 if (global_diff[i])
72358- atomic_long_add(global_diff[i], &vm_stat[i]);
72359+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
72360 }
72361
72362 #endif
72363@@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
72364 start_cpu_timer(cpu);
72365 #endif
72366 #ifdef CONFIG_PROC_FS
72367- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
72368- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
72369- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
72370- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
72371+ {
72372+ mode_t gr_mode = S_IRUGO;
72373+#ifdef CONFIG_GRKERNSEC_PROC_ADD
72374+ gr_mode = S_IRUSR;
72375+#endif
72376+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
72377+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
72378+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
72379+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
72380+#else
72381+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
72382+#endif
72383+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
72384+ }
72385 #endif
72386 return 0;
72387 }
72388diff -urNp linux-3.1.4/net/8021q/vlan.c linux-3.1.4/net/8021q/vlan.c
72389--- linux-3.1.4/net/8021q/vlan.c 2011-11-11 15:19:27.000000000 -0500
72390+++ linux-3.1.4/net/8021q/vlan.c 2011-11-16 18:39:08.000000000 -0500
72391@@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net
72392 err = -EPERM;
72393 if (!capable(CAP_NET_ADMIN))
72394 break;
72395- if ((args.u.name_type >= 0) &&
72396- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
72397+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
72398 struct vlan_net *vn;
72399
72400 vn = net_generic(net, vlan_net_id);
72401diff -urNp linux-3.1.4/net/9p/trans_fd.c linux-3.1.4/net/9p/trans_fd.c
72402--- linux-3.1.4/net/9p/trans_fd.c 2011-11-11 15:19:27.000000000 -0500
72403+++ linux-3.1.4/net/9p/trans_fd.c 2011-11-16 18:39:08.000000000 -0500
72404@@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client
72405 oldfs = get_fs();
72406 set_fs(get_ds());
72407 /* The cast to a user pointer is valid due to the set_fs() */
72408- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
72409+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
72410 set_fs(oldfs);
72411
72412 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
72413diff -urNp linux-3.1.4/net/9p/trans_virtio.c linux-3.1.4/net/9p/trans_virtio.c
72414--- linux-3.1.4/net/9p/trans_virtio.c 2011-11-11 15:19:27.000000000 -0500
72415+++ linux-3.1.4/net/9p/trans_virtio.c 2011-11-16 18:39:08.000000000 -0500
72416@@ -327,7 +327,7 @@ req_retry_pinned:
72417 } else {
72418 char *pbuf;
72419 if (req->tc->pubuf)
72420- pbuf = (__force char *) req->tc->pubuf;
72421+ pbuf = (char __force_kernel *) req->tc->pubuf;
72422 else
72423 pbuf = req->tc->pkbuf;
72424 outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
72425@@ -357,7 +357,7 @@ req_retry_pinned:
72426 } else {
72427 char *pbuf;
72428 if (req->tc->pubuf)
72429- pbuf = (__force char *) req->tc->pubuf;
72430+ pbuf = (char __force_kernel *) req->tc->pubuf;
72431 else
72432 pbuf = req->tc->pkbuf;
72433
72434diff -urNp linux-3.1.4/net/atm/atm_misc.c linux-3.1.4/net/atm/atm_misc.c
72435--- linux-3.1.4/net/atm/atm_misc.c 2011-11-11 15:19:27.000000000 -0500
72436+++ linux-3.1.4/net/atm/atm_misc.c 2011-11-16 18:39:08.000000000 -0500
72437@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
72438 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
72439 return 1;
72440 atm_return(vcc, truesize);
72441- atomic_inc(&vcc->stats->rx_drop);
72442+ atomic_inc_unchecked(&vcc->stats->rx_drop);
72443 return 0;
72444 }
72445 EXPORT_SYMBOL(atm_charge);
72446@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
72447 }
72448 }
72449 atm_return(vcc, guess);
72450- atomic_inc(&vcc->stats->rx_drop);
72451+ atomic_inc_unchecked(&vcc->stats->rx_drop);
72452 return NULL;
72453 }
72454 EXPORT_SYMBOL(atm_alloc_charge);
72455@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
72456
72457 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
72458 {
72459-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
72460+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
72461 __SONET_ITEMS
72462 #undef __HANDLE_ITEM
72463 }
72464@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
72465
72466 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
72467 {
72468-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
72469+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
72470 __SONET_ITEMS
72471 #undef __HANDLE_ITEM
72472 }
72473diff -urNp linux-3.1.4/net/atm/lec.h linux-3.1.4/net/atm/lec.h
72474--- linux-3.1.4/net/atm/lec.h 2011-11-11 15:19:27.000000000 -0500
72475+++ linux-3.1.4/net/atm/lec.h 2011-11-16 18:39:08.000000000 -0500
72476@@ -48,7 +48,7 @@ struct lane2_ops {
72477 const u8 *tlvs, u32 sizeoftlvs);
72478 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
72479 const u8 *tlvs, u32 sizeoftlvs);
72480-};
72481+} __no_const;
72482
72483 /*
72484 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
72485diff -urNp linux-3.1.4/net/atm/mpc.h linux-3.1.4/net/atm/mpc.h
72486--- linux-3.1.4/net/atm/mpc.h 2011-11-11 15:19:27.000000000 -0500
72487+++ linux-3.1.4/net/atm/mpc.h 2011-11-16 18:39:08.000000000 -0500
72488@@ -33,7 +33,7 @@ struct mpoa_client {
72489 struct mpc_parameters parameters; /* parameters for this client */
72490
72491 const struct net_device_ops *old_ops;
72492- struct net_device_ops new_ops;
72493+ net_device_ops_no_const new_ops;
72494 };
72495
72496
72497diff -urNp linux-3.1.4/net/atm/mpoa_caches.c linux-3.1.4/net/atm/mpoa_caches.c
72498--- linux-3.1.4/net/atm/mpoa_caches.c 2011-11-11 15:19:27.000000000 -0500
72499+++ linux-3.1.4/net/atm/mpoa_caches.c 2011-11-16 18:40:44.000000000 -0500
72500@@ -255,6 +255,8 @@ static void check_resolving_entries(stru
72501 struct timeval now;
72502 struct k_message msg;
72503
72504+ pax_track_stack();
72505+
72506 do_gettimeofday(&now);
72507
72508 read_lock_bh(&client->ingress_lock);
72509diff -urNp linux-3.1.4/net/atm/proc.c linux-3.1.4/net/atm/proc.c
72510--- linux-3.1.4/net/atm/proc.c 2011-11-11 15:19:27.000000000 -0500
72511+++ linux-3.1.4/net/atm/proc.c 2011-11-16 18:39:08.000000000 -0500
72512@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
72513 const struct k_atm_aal_stats *stats)
72514 {
72515 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
72516- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
72517- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
72518- atomic_read(&stats->rx_drop));
72519+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
72520+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
72521+ atomic_read_unchecked(&stats->rx_drop));
72522 }
72523
72524 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
72525diff -urNp linux-3.1.4/net/atm/resources.c linux-3.1.4/net/atm/resources.c
72526--- linux-3.1.4/net/atm/resources.c 2011-11-11 15:19:27.000000000 -0500
72527+++ linux-3.1.4/net/atm/resources.c 2011-11-16 18:39:08.000000000 -0500
72528@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
72529 static void copy_aal_stats(struct k_atm_aal_stats *from,
72530 struct atm_aal_stats *to)
72531 {
72532-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
72533+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
72534 __AAL_STAT_ITEMS
72535 #undef __HANDLE_ITEM
72536 }
72537@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
72538 static void subtract_aal_stats(struct k_atm_aal_stats *from,
72539 struct atm_aal_stats *to)
72540 {
72541-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
72542+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
72543 __AAL_STAT_ITEMS
72544 #undef __HANDLE_ITEM
72545 }
72546diff -urNp linux-3.1.4/net/batman-adv/hard-interface.c linux-3.1.4/net/batman-adv/hard-interface.c
72547--- linux-3.1.4/net/batman-adv/hard-interface.c 2011-11-11 15:19:27.000000000 -0500
72548+++ linux-3.1.4/net/batman-adv/hard-interface.c 2011-11-16 18:39:08.000000000 -0500
72549@@ -347,8 +347,8 @@ int hardif_enable_interface(struct hard_
72550 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
72551 dev_add_pack(&hard_iface->batman_adv_ptype);
72552
72553- atomic_set(&hard_iface->seqno, 1);
72554- atomic_set(&hard_iface->frag_seqno, 1);
72555+ atomic_set_unchecked(&hard_iface->seqno, 1);
72556+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
72557 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
72558 hard_iface->net_dev->name);
72559
72560diff -urNp linux-3.1.4/net/batman-adv/routing.c linux-3.1.4/net/batman-adv/routing.c
72561--- linux-3.1.4/net/batman-adv/routing.c 2011-11-11 15:19:27.000000000 -0500
72562+++ linux-3.1.4/net/batman-adv/routing.c 2011-11-16 18:39:08.000000000 -0500
72563@@ -656,7 +656,7 @@ void receive_bat_packet(const struct eth
72564 return;
72565
72566 /* could be changed by schedule_own_packet() */
72567- if_incoming_seqno = atomic_read(&if_incoming->seqno);
72568+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
72569
72570 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
72571
72572diff -urNp linux-3.1.4/net/batman-adv/send.c linux-3.1.4/net/batman-adv/send.c
72573--- linux-3.1.4/net/batman-adv/send.c 2011-11-11 15:19:27.000000000 -0500
72574+++ linux-3.1.4/net/batman-adv/send.c 2011-11-16 18:39:08.000000000 -0500
72575@@ -326,7 +326,7 @@ void schedule_own_packet(struct hard_ifa
72576
72577 /* change sequence number to network order */
72578 batman_packet->seqno =
72579- htonl((uint32_t)atomic_read(&hard_iface->seqno));
72580+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
72581
72582 batman_packet->ttvn = atomic_read(&bat_priv->ttvn);
72583 batman_packet->tt_crc = htons((uint16_t)atomic_read(&bat_priv->tt_crc));
72584@@ -343,7 +343,7 @@ void schedule_own_packet(struct hard_ifa
72585 else
72586 batman_packet->gw_flags = NO_FLAGS;
72587
72588- atomic_inc(&hard_iface->seqno);
72589+ atomic_inc_unchecked(&hard_iface->seqno);
72590
72591 slide_own_bcast_window(hard_iface);
72592 send_time = own_send_time(bat_priv);
72593diff -urNp linux-3.1.4/net/batman-adv/soft-interface.c linux-3.1.4/net/batman-adv/soft-interface.c
72594--- linux-3.1.4/net/batman-adv/soft-interface.c 2011-11-11 15:19:27.000000000 -0500
72595+++ linux-3.1.4/net/batman-adv/soft-interface.c 2011-11-16 18:39:08.000000000 -0500
72596@@ -632,7 +632,7 @@ static int interface_tx(struct sk_buff *
72597
72598 /* set broadcast sequence number */
72599 bcast_packet->seqno =
72600- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
72601+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
72602
72603 add_bcast_packet_to_list(bat_priv, skb, 1);
72604
72605@@ -824,7 +824,7 @@ struct net_device *softif_create(const c
72606 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
72607
72608 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
72609- atomic_set(&bat_priv->bcast_seqno, 1);
72610+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
72611 atomic_set(&bat_priv->ttvn, 0);
72612 atomic_set(&bat_priv->tt_local_changes, 0);
72613 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
72614diff -urNp linux-3.1.4/net/batman-adv/types.h linux-3.1.4/net/batman-adv/types.h
72615--- linux-3.1.4/net/batman-adv/types.h 2011-11-26 19:57:29.000000000 -0500
72616+++ linux-3.1.4/net/batman-adv/types.h 2011-11-26 20:00:43.000000000 -0500
72617@@ -38,8 +38,8 @@ struct hard_iface {
72618 int16_t if_num;
72619 char if_status;
72620 struct net_device *net_dev;
72621- atomic_t seqno;
72622- atomic_t frag_seqno;
72623+ atomic_unchecked_t seqno;
72624+ atomic_unchecked_t frag_seqno;
72625 unsigned char *packet_buff;
72626 int packet_len;
72627 struct kobject *hardif_obj;
72628@@ -153,7 +153,7 @@ struct bat_priv {
72629 atomic_t orig_interval; /* uint */
72630 atomic_t hop_penalty; /* uint */
72631 atomic_t log_level; /* uint */
72632- atomic_t bcast_seqno;
72633+ atomic_unchecked_t bcast_seqno;
72634 atomic_t bcast_queue_left;
72635 atomic_t batman_queue_left;
72636 atomic_t ttvn; /* tranlation table version number */
72637diff -urNp linux-3.1.4/net/batman-adv/unicast.c linux-3.1.4/net/batman-adv/unicast.c
72638--- linux-3.1.4/net/batman-adv/unicast.c 2011-11-11 15:19:27.000000000 -0500
72639+++ linux-3.1.4/net/batman-adv/unicast.c 2011-11-16 18:39:08.000000000 -0500
72640@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, s
72641 frag1->flags = UNI_FRAG_HEAD | large_tail;
72642 frag2->flags = large_tail;
72643
72644- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
72645+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
72646 frag1->seqno = htons(seqno - 1);
72647 frag2->seqno = htons(seqno);
72648
72649diff -urNp linux-3.1.4/net/bluetooth/hci_conn.c linux-3.1.4/net/bluetooth/hci_conn.c
72650--- linux-3.1.4/net/bluetooth/hci_conn.c 2011-11-11 15:19:27.000000000 -0500
72651+++ linux-3.1.4/net/bluetooth/hci_conn.c 2011-11-16 18:39:08.000000000 -0500
72652@@ -218,7 +218,7 @@ void hci_le_start_enc(struct hci_conn *c
72653 cp.handle = cpu_to_le16(conn->handle);
72654 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
72655 cp.ediv = ediv;
72656- memcpy(cp.rand, rand, sizeof(rand));
72657+ memcpy(cp.rand, rand, sizeof(cp.rand));
72658
72659 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
72660 }
72661@@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *c
72662 memset(&cp, 0, sizeof(cp));
72663
72664 cp.handle = cpu_to_le16(conn->handle);
72665- memcpy(cp.ltk, ltk, sizeof(ltk));
72666+ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
72667
72668 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
72669 }
72670diff -urNp linux-3.1.4/net/bridge/br_multicast.c linux-3.1.4/net/bridge/br_multicast.c
72671--- linux-3.1.4/net/bridge/br_multicast.c 2011-11-11 15:19:27.000000000 -0500
72672+++ linux-3.1.4/net/bridge/br_multicast.c 2011-11-16 18:39:08.000000000 -0500
72673@@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct
72674 nexthdr = ip6h->nexthdr;
72675 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
72676
72677- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
72678+ if (nexthdr != IPPROTO_ICMPV6)
72679 return 0;
72680
72681 /* Okay, we found ICMPv6 header */
72682diff -urNp linux-3.1.4/net/bridge/netfilter/ebtables.c linux-3.1.4/net/bridge/netfilter/ebtables.c
72683--- linux-3.1.4/net/bridge/netfilter/ebtables.c 2011-11-11 15:19:27.000000000 -0500
72684+++ linux-3.1.4/net/bridge/netfilter/ebtables.c 2011-11-16 18:40:44.000000000 -0500
72685@@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *s
72686 tmp.valid_hooks = t->table->valid_hooks;
72687 }
72688 mutex_unlock(&ebt_mutex);
72689- if (copy_to_user(user, &tmp, *len) != 0){
72690+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
72691 BUGPRINT("c2u Didn't work\n");
72692 ret = -EFAULT;
72693 break;
72694@@ -1781,6 +1781,8 @@ static int compat_copy_everything_to_use
72695 int ret;
72696 void __user *pos;
72697
72698+ pax_track_stack();
72699+
72700 memset(&tinfo, 0, sizeof(tinfo));
72701
72702 if (cmd == EBT_SO_GET_ENTRIES) {
72703diff -urNp linux-3.1.4/net/caif/caif_socket.c linux-3.1.4/net/caif/caif_socket.c
72704--- linux-3.1.4/net/caif/caif_socket.c 2011-11-11 15:19:27.000000000 -0500
72705+++ linux-3.1.4/net/caif/caif_socket.c 2011-11-16 18:39:08.000000000 -0500
72706@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
72707 #ifdef CONFIG_DEBUG_FS
72708 struct debug_fs_counter {
72709 atomic_t caif_nr_socks;
72710- atomic_t caif_sock_create;
72711- atomic_t num_connect_req;
72712- atomic_t num_connect_resp;
72713- atomic_t num_connect_fail_resp;
72714- atomic_t num_disconnect;
72715- atomic_t num_remote_shutdown_ind;
72716- atomic_t num_tx_flow_off_ind;
72717- atomic_t num_tx_flow_on_ind;
72718- atomic_t num_rx_flow_off;
72719- atomic_t num_rx_flow_on;
72720+ atomic_unchecked_t caif_sock_create;
72721+ atomic_unchecked_t num_connect_req;
72722+ atomic_unchecked_t num_connect_resp;
72723+ atomic_unchecked_t num_connect_fail_resp;
72724+ atomic_unchecked_t num_disconnect;
72725+ atomic_unchecked_t num_remote_shutdown_ind;
72726+ atomic_unchecked_t num_tx_flow_off_ind;
72727+ atomic_unchecked_t num_tx_flow_on_ind;
72728+ atomic_unchecked_t num_rx_flow_off;
72729+ atomic_unchecked_t num_rx_flow_on;
72730 };
72731 static struct debug_fs_counter cnt;
72732 #define dbfs_atomic_inc(v) atomic_inc_return(v)
72733+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
72734 #define dbfs_atomic_dec(v) atomic_dec_return(v)
72735 #else
72736 #define dbfs_atomic_inc(v) 0
72737@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct soc
72738 atomic_read(&cf_sk->sk.sk_rmem_alloc),
72739 sk_rcvbuf_lowwater(cf_sk));
72740 set_rx_flow_off(cf_sk);
72741- dbfs_atomic_inc(&cnt.num_rx_flow_off);
72742+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72743 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72744 }
72745
72746@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct soc
72747 set_rx_flow_off(cf_sk);
72748 if (net_ratelimit())
72749 pr_debug("sending flow OFF due to rmem_schedule\n");
72750- dbfs_atomic_inc(&cnt.num_rx_flow_off);
72751+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72752 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72753 }
72754 skb->dev = NULL;
72755@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer
72756 switch (flow) {
72757 case CAIF_CTRLCMD_FLOW_ON_IND:
72758 /* OK from modem to start sending again */
72759- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
72760+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
72761 set_tx_flow_on(cf_sk);
72762 cf_sk->sk.sk_state_change(&cf_sk->sk);
72763 break;
72764
72765 case CAIF_CTRLCMD_FLOW_OFF_IND:
72766 /* Modem asks us to shut up */
72767- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
72768+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
72769 set_tx_flow_off(cf_sk);
72770 cf_sk->sk.sk_state_change(&cf_sk->sk);
72771 break;
72772@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer
72773 /* We're now connected */
72774 caif_client_register_refcnt(&cf_sk->layer,
72775 cfsk_hold, cfsk_put);
72776- dbfs_atomic_inc(&cnt.num_connect_resp);
72777+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
72778 cf_sk->sk.sk_state = CAIF_CONNECTED;
72779 set_tx_flow_on(cf_sk);
72780 cf_sk->sk.sk_state_change(&cf_sk->sk);
72781@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer
72782
72783 case CAIF_CTRLCMD_INIT_FAIL_RSP:
72784 /* Connect request failed */
72785- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
72786+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
72787 cf_sk->sk.sk_err = ECONNREFUSED;
72788 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
72789 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72790@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer
72791
72792 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
72793 /* Modem has closed this connection, or device is down. */
72794- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
72795+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
72796 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72797 cf_sk->sk.sk_err = ECONNRESET;
72798 set_rx_flow_on(cf_sk);
72799@@ -297,7 +298,7 @@ static void caif_check_flow_release(stru
72800 return;
72801
72802 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
72803- dbfs_atomic_inc(&cnt.num_rx_flow_on);
72804+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
72805 set_rx_flow_on(cf_sk);
72806 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
72807 }
72808@@ -854,7 +855,7 @@ static int caif_connect(struct socket *s
72809 /*ifindex = id of the interface.*/
72810 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
72811
72812- dbfs_atomic_inc(&cnt.num_connect_req);
72813+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
72814 cf_sk->layer.receive = caif_sktrecv_cb;
72815
72816 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
72817@@ -943,7 +944,7 @@ static int caif_release(struct socket *s
72818 spin_unlock_bh(&sk->sk_receive_queue.lock);
72819 sock->sk = NULL;
72820
72821- dbfs_atomic_inc(&cnt.num_disconnect);
72822+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
72823
72824 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
72825 if (cf_sk->debugfs_socket_dir != NULL)
72826@@ -1122,7 +1123,7 @@ static int caif_create(struct net *net,
72827 cf_sk->conn_req.protocol = protocol;
72828 /* Increase the number of sockets created. */
72829 dbfs_atomic_inc(&cnt.caif_nr_socks);
72830- num = dbfs_atomic_inc(&cnt.caif_sock_create);
72831+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
72832 #ifdef CONFIG_DEBUG_FS
72833 if (!IS_ERR(debugfsdir)) {
72834
72835diff -urNp linux-3.1.4/net/caif/cfctrl.c linux-3.1.4/net/caif/cfctrl.c
72836--- linux-3.1.4/net/caif/cfctrl.c 2011-11-11 15:19:27.000000000 -0500
72837+++ linux-3.1.4/net/caif/cfctrl.c 2011-11-16 18:40:44.000000000 -0500
72838@@ -9,6 +9,7 @@
72839 #include <linux/stddef.h>
72840 #include <linux/spinlock.h>
72841 #include <linux/slab.h>
72842+#include <linux/sched.h>
72843 #include <net/caif/caif_layer.h>
72844 #include <net/caif/cfpkt.h>
72845 #include <net/caif/cfctrl.h>
72846@@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
72847 dev_info.id = 0xff;
72848 memset(this, 0, sizeof(*this));
72849 cfsrvl_init(&this->serv, 0, &dev_info, false);
72850- atomic_set(&this->req_seq_no, 1);
72851- atomic_set(&this->rsp_seq_no, 1);
72852+ atomic_set_unchecked(&this->req_seq_no, 1);
72853+ atomic_set_unchecked(&this->rsp_seq_no, 1);
72854 this->serv.layer.receive = cfctrl_recv;
72855 sprintf(this->serv.layer.name, "ctrl");
72856 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
72857@@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfc
72858 struct cfctrl_request_info *req)
72859 {
72860 spin_lock_bh(&ctrl->info_list_lock);
72861- atomic_inc(&ctrl->req_seq_no);
72862- req->sequence_no = atomic_read(&ctrl->req_seq_no);
72863+ atomic_inc_unchecked(&ctrl->req_seq_no);
72864+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
72865 list_add_tail(&req->list, &ctrl->list);
72866 spin_unlock_bh(&ctrl->info_list_lock);
72867 }
72868@@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctr
72869 if (p != first)
72870 pr_warn("Requests are not received in order\n");
72871
72872- atomic_set(&ctrl->rsp_seq_no,
72873+ atomic_set_unchecked(&ctrl->rsp_seq_no,
72874 p->sequence_no);
72875 list_del(&p->list);
72876 goto out;
72877@@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *l
72878 struct cfctrl *cfctrl = container_obj(layer);
72879 struct cfctrl_request_info rsp, *req;
72880
72881+ pax_track_stack();
72882
72883 cfpkt_extr_head(pkt, &cmdrsp, 1);
72884 cmd = cmdrsp & CFCTRL_CMD_MASK;
72885diff -urNp linux-3.1.4/net/compat.c linux-3.1.4/net/compat.c
72886--- linux-3.1.4/net/compat.c 2011-11-11 15:19:27.000000000 -0500
72887+++ linux-3.1.4/net/compat.c 2011-11-16 18:39:08.000000000 -0500
72888@@ -70,9 +70,9 @@ int get_compat_msghdr(struct msghdr *kms
72889 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
72890 __get_user(kmsg->msg_flags, &umsg->msg_flags))
72891 return -EFAULT;
72892- kmsg->msg_name = compat_ptr(tmp1);
72893- kmsg->msg_iov = compat_ptr(tmp2);
72894- kmsg->msg_control = compat_ptr(tmp3);
72895+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
72896+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
72897+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
72898 return 0;
72899 }
72900
72901@@ -84,7 +84,7 @@ int verify_compat_iovec(struct msghdr *k
72902
72903 if (kern_msg->msg_namelen) {
72904 if (mode == VERIFY_READ) {
72905- int err = move_addr_to_kernel(kern_msg->msg_name,
72906+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
72907 kern_msg->msg_namelen,
72908 kern_address);
72909 if (err < 0)
72910@@ -95,7 +95,7 @@ int verify_compat_iovec(struct msghdr *k
72911 kern_msg->msg_name = NULL;
72912
72913 tot_len = iov_from_user_compat_to_kern(kern_iov,
72914- (struct compat_iovec __user *)kern_msg->msg_iov,
72915+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
72916 kern_msg->msg_iovlen);
72917 if (tot_len >= 0)
72918 kern_msg->msg_iov = kern_iov;
72919@@ -115,20 +115,20 @@ int verify_compat_iovec(struct msghdr *k
72920
72921 #define CMSG_COMPAT_FIRSTHDR(msg) \
72922 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
72923- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
72924+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
72925 (struct compat_cmsghdr __user *)NULL)
72926
72927 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
72928 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
72929 (ucmlen) <= (unsigned long) \
72930 ((mhdr)->msg_controllen - \
72931- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
72932+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
72933
72934 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
72935 struct compat_cmsghdr __user *cmsg, int cmsg_len)
72936 {
72937 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
72938- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
72939+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
72940 msg->msg_controllen)
72941 return NULL;
72942 return (struct compat_cmsghdr __user *)ptr;
72943@@ -220,7 +220,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
72944 {
72945 struct compat_timeval ctv;
72946 struct compat_timespec cts[3];
72947- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72948+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72949 struct compat_cmsghdr cmhdr;
72950 int cmlen;
72951
72952@@ -272,7 +272,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
72953
72954 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
72955 {
72956- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72957+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72958 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
72959 int fdnum = scm->fp->count;
72960 struct file **fp = scm->fp->fp;
72961@@ -369,7 +369,7 @@ static int do_set_sock_timeout(struct so
72962 return -EFAULT;
72963 old_fs = get_fs();
72964 set_fs(KERNEL_DS);
72965- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
72966+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
72967 set_fs(old_fs);
72968
72969 return err;
72970@@ -430,7 +430,7 @@ static int do_get_sock_timeout(struct so
72971 len = sizeof(ktime);
72972 old_fs = get_fs();
72973 set_fs(KERNEL_DS);
72974- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
72975+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
72976 set_fs(old_fs);
72977
72978 if (!err) {
72979@@ -565,7 +565,7 @@ int compat_mc_setsockopt(struct sock *so
72980 case MCAST_JOIN_GROUP:
72981 case MCAST_LEAVE_GROUP:
72982 {
72983- struct compat_group_req __user *gr32 = (void *)optval;
72984+ struct compat_group_req __user *gr32 = (void __user *)optval;
72985 struct group_req __user *kgr =
72986 compat_alloc_user_space(sizeof(struct group_req));
72987 u32 interface;
72988@@ -586,7 +586,7 @@ int compat_mc_setsockopt(struct sock *so
72989 case MCAST_BLOCK_SOURCE:
72990 case MCAST_UNBLOCK_SOURCE:
72991 {
72992- struct compat_group_source_req __user *gsr32 = (void *)optval;
72993+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
72994 struct group_source_req __user *kgsr = compat_alloc_user_space(
72995 sizeof(struct group_source_req));
72996 u32 interface;
72997@@ -607,7 +607,7 @@ int compat_mc_setsockopt(struct sock *so
72998 }
72999 case MCAST_MSFILTER:
73000 {
73001- struct compat_group_filter __user *gf32 = (void *)optval;
73002+ struct compat_group_filter __user *gf32 = (void __user *)optval;
73003 struct group_filter __user *kgf;
73004 u32 interface, fmode, numsrc;
73005
73006@@ -645,7 +645,7 @@ int compat_mc_getsockopt(struct sock *so
73007 char __user *optval, int __user *optlen,
73008 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
73009 {
73010- struct compat_group_filter __user *gf32 = (void *)optval;
73011+ struct compat_group_filter __user *gf32 = (void __user *)optval;
73012 struct group_filter __user *kgf;
73013 int __user *koptlen;
73014 u32 interface, fmode, numsrc;
73015diff -urNp linux-3.1.4/net/core/datagram.c linux-3.1.4/net/core/datagram.c
73016--- linux-3.1.4/net/core/datagram.c 2011-11-11 15:19:27.000000000 -0500
73017+++ linux-3.1.4/net/core/datagram.c 2011-11-16 18:39:08.000000000 -0500
73018@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
73019 }
73020
73021 kfree_skb(skb);
73022- atomic_inc(&sk->sk_drops);
73023+ atomic_inc_unchecked(&sk->sk_drops);
73024 sk_mem_reclaim_partial(sk);
73025
73026 return err;
73027diff -urNp linux-3.1.4/net/core/dev.c linux-3.1.4/net/core/dev.c
73028--- linux-3.1.4/net/core/dev.c 2011-11-11 15:19:27.000000000 -0500
73029+++ linux-3.1.4/net/core/dev.c 2011-11-16 18:40:44.000000000 -0500
73030@@ -1135,10 +1135,14 @@ void dev_load(struct net *net, const cha
73031 if (no_module && capable(CAP_NET_ADMIN))
73032 no_module = request_module("netdev-%s", name);
73033 if (no_module && capable(CAP_SYS_MODULE)) {
73034+#ifdef CONFIG_GRKERNSEC_MODHARDEN
73035+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
73036+#else
73037 if (!request_module("%s", name))
73038 pr_err("Loading kernel module for a network device "
73039 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
73040 "instead\n", name);
73041+#endif
73042 }
73043 }
73044 EXPORT_SYMBOL(dev_load);
73045@@ -1977,7 +1981,7 @@ static int illegal_highdma(struct net_de
73046
73047 struct dev_gso_cb {
73048 void (*destructor)(struct sk_buff *skb);
73049-};
73050+} __no_const;
73051
73052 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
73053
73054@@ -2930,7 +2934,7 @@ int netif_rx_ni(struct sk_buff *skb)
73055 }
73056 EXPORT_SYMBOL(netif_rx_ni);
73057
73058-static void net_tx_action(struct softirq_action *h)
73059+static void net_tx_action(void)
73060 {
73061 struct softnet_data *sd = &__get_cpu_var(softnet_data);
73062
73063@@ -3779,7 +3783,7 @@ void netif_napi_del(struct napi_struct *
73064 }
73065 EXPORT_SYMBOL(netif_napi_del);
73066
73067-static void net_rx_action(struct softirq_action *h)
73068+static void net_rx_action(void)
73069 {
73070 struct softnet_data *sd = &__get_cpu_var(softnet_data);
73071 unsigned long time_limit = jiffies + 2;
73072diff -urNp linux-3.1.4/net/core/flow.c linux-3.1.4/net/core/flow.c
73073--- linux-3.1.4/net/core/flow.c 2011-11-11 15:19:27.000000000 -0500
73074+++ linux-3.1.4/net/core/flow.c 2011-11-16 18:39:08.000000000 -0500
73075@@ -61,7 +61,7 @@ struct flow_cache {
73076 struct timer_list rnd_timer;
73077 };
73078
73079-atomic_t flow_cache_genid = ATOMIC_INIT(0);
73080+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
73081 EXPORT_SYMBOL(flow_cache_genid);
73082 static struct flow_cache flow_cache_global;
73083 static struct kmem_cache *flow_cachep __read_mostly;
73084@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsig
73085
73086 static int flow_entry_valid(struct flow_cache_entry *fle)
73087 {
73088- if (atomic_read(&flow_cache_genid) != fle->genid)
73089+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
73090 return 0;
73091 if (fle->object && !fle->object->ops->check(fle->object))
73092 return 0;
73093@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const
73094 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
73095 fcp->hash_count++;
73096 }
73097- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
73098+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
73099 flo = fle->object;
73100 if (!flo)
73101 goto ret_object;
73102@@ -280,7 +280,7 @@ nocache:
73103 }
73104 flo = resolver(net, key, family, dir, flo, ctx);
73105 if (fle) {
73106- fle->genid = atomic_read(&flow_cache_genid);
73107+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
73108 if (!IS_ERR(flo))
73109 fle->object = flo;
73110 else
73111diff -urNp linux-3.1.4/net/core/iovec.c linux-3.1.4/net/core/iovec.c
73112--- linux-3.1.4/net/core/iovec.c 2011-11-11 15:19:27.000000000 -0500
73113+++ linux-3.1.4/net/core/iovec.c 2011-11-16 18:39:08.000000000 -0500
73114@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struc
73115 if (m->msg_namelen) {
73116 if (mode == VERIFY_READ) {
73117 void __user *namep;
73118- namep = (void __user __force *) m->msg_name;
73119+ namep = (void __force_user *) m->msg_name;
73120 err = move_addr_to_kernel(namep, m->msg_namelen,
73121 address);
73122 if (err < 0)
73123@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struc
73124 }
73125
73126 size = m->msg_iovlen * sizeof(struct iovec);
73127- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
73128+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
73129 return -EFAULT;
73130
73131 m->msg_iov = iov;
73132diff -urNp linux-3.1.4/net/core/rtnetlink.c linux-3.1.4/net/core/rtnetlink.c
73133--- linux-3.1.4/net/core/rtnetlink.c 2011-11-11 15:19:27.000000000 -0500
73134+++ linux-3.1.4/net/core/rtnetlink.c 2011-11-16 18:39:08.000000000 -0500
73135@@ -57,7 +57,7 @@ struct rtnl_link {
73136 rtnl_doit_func doit;
73137 rtnl_dumpit_func dumpit;
73138 rtnl_calcit_func calcit;
73139-};
73140+} __no_const;
73141
73142 static DEFINE_MUTEX(rtnl_mutex);
73143 static u16 min_ifinfo_dump_size;
73144diff -urNp linux-3.1.4/net/core/scm.c linux-3.1.4/net/core/scm.c
73145--- linux-3.1.4/net/core/scm.c 2011-11-11 15:19:27.000000000 -0500
73146+++ linux-3.1.4/net/core/scm.c 2011-11-16 18:39:08.000000000 -0500
73147@@ -218,7 +218,7 @@ EXPORT_SYMBOL(__scm_send);
73148 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
73149 {
73150 struct cmsghdr __user *cm
73151- = (__force struct cmsghdr __user *)msg->msg_control;
73152+ = (struct cmsghdr __force_user *)msg->msg_control;
73153 struct cmsghdr cmhdr;
73154 int cmlen = CMSG_LEN(len);
73155 int err;
73156@@ -241,7 +241,7 @@ int put_cmsg(struct msghdr * msg, int le
73157 err = -EFAULT;
73158 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
73159 goto out;
73160- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
73161+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
73162 goto out;
73163 cmlen = CMSG_SPACE(len);
73164 if (msg->msg_controllen < cmlen)
73165@@ -257,7 +257,7 @@ EXPORT_SYMBOL(put_cmsg);
73166 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
73167 {
73168 struct cmsghdr __user *cm
73169- = (__force struct cmsghdr __user*)msg->msg_control;
73170+ = (struct cmsghdr __force_user *)msg->msg_control;
73171
73172 int fdmax = 0;
73173 int fdnum = scm->fp->count;
73174@@ -277,7 +277,7 @@ void scm_detach_fds(struct msghdr *msg,
73175 if (fdnum < fdmax)
73176 fdmax = fdnum;
73177
73178- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
73179+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
73180 i++, cmfptr++)
73181 {
73182 int new_fd;
73183diff -urNp linux-3.1.4/net/core/skbuff.c linux-3.1.4/net/core/skbuff.c
73184--- linux-3.1.4/net/core/skbuff.c 2011-11-11 15:19:27.000000000 -0500
73185+++ linux-3.1.4/net/core/skbuff.c 2011-11-16 18:40:44.000000000 -0500
73186@@ -1650,6 +1650,8 @@ int skb_splice_bits(struct sk_buff *skb,
73187 struct sock *sk = skb->sk;
73188 int ret = 0;
73189
73190+ pax_track_stack();
73191+
73192 if (splice_grow_spd(pipe, &spd))
73193 return -ENOMEM;
73194
73195diff -urNp linux-3.1.4/net/core/sock.c linux-3.1.4/net/core/sock.c
73196--- linux-3.1.4/net/core/sock.c 2011-11-11 15:19:27.000000000 -0500
73197+++ linux-3.1.4/net/core/sock.c 2011-11-16 18:40:44.000000000 -0500
73198@@ -293,7 +293,7 @@ int sock_queue_rcv_skb(struct sock *sk,
73199 */
73200 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
73201 (unsigned)sk->sk_rcvbuf) {
73202- atomic_inc(&sk->sk_drops);
73203+ atomic_inc_unchecked(&sk->sk_drops);
73204 trace_sock_rcvqueue_full(sk, skb);
73205 return -ENOMEM;
73206 }
73207@@ -303,7 +303,7 @@ int sock_queue_rcv_skb(struct sock *sk,
73208 return err;
73209
73210 if (!sk_rmem_schedule(sk, skb->truesize)) {
73211- atomic_inc(&sk->sk_drops);
73212+ atomic_inc_unchecked(&sk->sk_drops);
73213 return -ENOBUFS;
73214 }
73215
73216@@ -323,7 +323,7 @@ int sock_queue_rcv_skb(struct sock *sk,
73217 skb_dst_force(skb);
73218
73219 spin_lock_irqsave(&list->lock, flags);
73220- skb->dropcount = atomic_read(&sk->sk_drops);
73221+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
73222 __skb_queue_tail(list, skb);
73223 spin_unlock_irqrestore(&list->lock, flags);
73224
73225@@ -343,7 +343,7 @@ int sk_receive_skb(struct sock *sk, stru
73226 skb->dev = NULL;
73227
73228 if (sk_rcvqueues_full(sk, skb)) {
73229- atomic_inc(&sk->sk_drops);
73230+ atomic_inc_unchecked(&sk->sk_drops);
73231 goto discard_and_relse;
73232 }
73233 if (nested)
73234@@ -361,7 +361,7 @@ int sk_receive_skb(struct sock *sk, stru
73235 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
73236 } else if (sk_add_backlog(sk, skb)) {
73237 bh_unlock_sock(sk);
73238- atomic_inc(&sk->sk_drops);
73239+ atomic_inc_unchecked(&sk->sk_drops);
73240 goto discard_and_relse;
73241 }
73242
73243@@ -924,7 +924,7 @@ int sock_getsockopt(struct socket *sock,
73244 if (len > sizeof(peercred))
73245 len = sizeof(peercred);
73246 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
73247- if (copy_to_user(optval, &peercred, len))
73248+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
73249 return -EFAULT;
73250 goto lenout;
73251 }
73252@@ -937,7 +937,7 @@ int sock_getsockopt(struct socket *sock,
73253 return -ENOTCONN;
73254 if (lv < len)
73255 return -EINVAL;
73256- if (copy_to_user(optval, address, len))
73257+ if (len > sizeof(address) || copy_to_user(optval, address, len))
73258 return -EFAULT;
73259 goto lenout;
73260 }
73261@@ -970,7 +970,7 @@ int sock_getsockopt(struct socket *sock,
73262
73263 if (len > lv)
73264 len = lv;
73265- if (copy_to_user(optval, &v, len))
73266+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
73267 return -EFAULT;
73268 lenout:
73269 if (put_user(len, optlen))
73270@@ -2029,7 +2029,7 @@ void sock_init_data(struct socket *sock,
73271 */
73272 smp_wmb();
73273 atomic_set(&sk->sk_refcnt, 1);
73274- atomic_set(&sk->sk_drops, 0);
73275+ atomic_set_unchecked(&sk->sk_drops, 0);
73276 }
73277 EXPORT_SYMBOL(sock_init_data);
73278
73279diff -urNp linux-3.1.4/net/decnet/sysctl_net_decnet.c linux-3.1.4/net/decnet/sysctl_net_decnet.c
73280--- linux-3.1.4/net/decnet/sysctl_net_decnet.c 2011-11-11 15:19:27.000000000 -0500
73281+++ linux-3.1.4/net/decnet/sysctl_net_decnet.c 2011-11-16 18:39:08.000000000 -0500
73282@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_t
73283
73284 if (len > *lenp) len = *lenp;
73285
73286- if (copy_to_user(buffer, addr, len))
73287+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
73288 return -EFAULT;
73289
73290 *lenp = len;
73291@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table
73292
73293 if (len > *lenp) len = *lenp;
73294
73295- if (copy_to_user(buffer, devname, len))
73296+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
73297 return -EFAULT;
73298
73299 *lenp = len;
73300diff -urNp linux-3.1.4/net/econet/Kconfig linux-3.1.4/net/econet/Kconfig
73301--- linux-3.1.4/net/econet/Kconfig 2011-11-11 15:19:27.000000000 -0500
73302+++ linux-3.1.4/net/econet/Kconfig 2011-11-16 18:40:44.000000000 -0500
73303@@ -4,7 +4,7 @@
73304
73305 config ECONET
73306 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
73307- depends on EXPERIMENTAL && INET
73308+ depends on EXPERIMENTAL && INET && BROKEN
73309 ---help---
73310 Econet is a fairly old and slow networking protocol mainly used by
73311 Acorn computers to access file and print servers. It uses native
73312diff -urNp linux-3.1.4/net/ipv4/fib_frontend.c linux-3.1.4/net/ipv4/fib_frontend.c
73313--- linux-3.1.4/net/ipv4/fib_frontend.c 2011-11-11 15:19:27.000000000 -0500
73314+++ linux-3.1.4/net/ipv4/fib_frontend.c 2011-11-16 18:39:08.000000000 -0500
73315@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct not
73316 #ifdef CONFIG_IP_ROUTE_MULTIPATH
73317 fib_sync_up(dev);
73318 #endif
73319- atomic_inc(&net->ipv4.dev_addr_genid);
73320+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
73321 rt_cache_flush(dev_net(dev), -1);
73322 break;
73323 case NETDEV_DOWN:
73324 fib_del_ifaddr(ifa, NULL);
73325- atomic_inc(&net->ipv4.dev_addr_genid);
73326+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
73327 if (ifa->ifa_dev->ifa_list == NULL) {
73328 /* Last address was deleted from this interface.
73329 * Disable IP.
73330@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notif
73331 #ifdef CONFIG_IP_ROUTE_MULTIPATH
73332 fib_sync_up(dev);
73333 #endif
73334- atomic_inc(&net->ipv4.dev_addr_genid);
73335+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
73336 rt_cache_flush(dev_net(dev), -1);
73337 break;
73338 case NETDEV_DOWN:
73339diff -urNp linux-3.1.4/net/ipv4/fib_semantics.c linux-3.1.4/net/ipv4/fib_semantics.c
73340--- linux-3.1.4/net/ipv4/fib_semantics.c 2011-11-11 15:19:27.000000000 -0500
73341+++ linux-3.1.4/net/ipv4/fib_semantics.c 2011-11-16 18:39:08.000000000 -0500
73342@@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct n
73343 nh->nh_saddr = inet_select_addr(nh->nh_dev,
73344 nh->nh_gw,
73345 nh->nh_parent->fib_scope);
73346- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
73347+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
73348
73349 return nh->nh_saddr;
73350 }
73351diff -urNp linux-3.1.4/net/ipv4/inet_diag.c linux-3.1.4/net/ipv4/inet_diag.c
73352--- linux-3.1.4/net/ipv4/inet_diag.c 2011-11-11 15:19:27.000000000 -0500
73353+++ linux-3.1.4/net/ipv4/inet_diag.c 2011-11-16 18:40:44.000000000 -0500
73354@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
73355 r->idiag_retrans = 0;
73356
73357 r->id.idiag_if = sk->sk_bound_dev_if;
73358+
73359+#ifdef CONFIG_GRKERNSEC_HIDESYM
73360+ r->id.idiag_cookie[0] = 0;
73361+ r->id.idiag_cookie[1] = 0;
73362+#else
73363 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
73364 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
73365+#endif
73366
73367 r->id.idiag_sport = inet->inet_sport;
73368 r->id.idiag_dport = inet->inet_dport;
73369@@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
73370 r->idiag_family = tw->tw_family;
73371 r->idiag_retrans = 0;
73372 r->id.idiag_if = tw->tw_bound_dev_if;
73373+
73374+#ifdef CONFIG_GRKERNSEC_HIDESYM
73375+ r->id.idiag_cookie[0] = 0;
73376+ r->id.idiag_cookie[1] = 0;
73377+#else
73378 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
73379 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
73380+#endif
73381+
73382 r->id.idiag_sport = tw->tw_sport;
73383 r->id.idiag_dport = tw->tw_dport;
73384 r->id.idiag_src[0] = tw->tw_rcv_saddr;
73385@@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
73386 if (sk == NULL)
73387 goto unlock;
73388
73389+#ifndef CONFIG_GRKERNSEC_HIDESYM
73390 err = -ESTALE;
73391 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
73392 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
73393 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
73394 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
73395 goto out;
73396+#endif
73397
73398 err = -ENOMEM;
73399 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
73400@@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
73401 r->idiag_retrans = req->retrans;
73402
73403 r->id.idiag_if = sk->sk_bound_dev_if;
73404+
73405+#ifdef CONFIG_GRKERNSEC_HIDESYM
73406+ r->id.idiag_cookie[0] = 0;
73407+ r->id.idiag_cookie[1] = 0;
73408+#else
73409 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
73410 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
73411+#endif
73412
73413 tmo = req->expires - jiffies;
73414 if (tmo < 0)
73415diff -urNp linux-3.1.4/net/ipv4/inet_hashtables.c linux-3.1.4/net/ipv4/inet_hashtables.c
73416--- linux-3.1.4/net/ipv4/inet_hashtables.c 2011-11-11 15:19:27.000000000 -0500
73417+++ linux-3.1.4/net/ipv4/inet_hashtables.c 2011-11-16 18:40:44.000000000 -0500
73418@@ -18,12 +18,15 @@
73419 #include <linux/sched.h>
73420 #include <linux/slab.h>
73421 #include <linux/wait.h>
73422+#include <linux/security.h>
73423
73424 #include <net/inet_connection_sock.h>
73425 #include <net/inet_hashtables.h>
73426 #include <net/secure_seq.h>
73427 #include <net/ip.h>
73428
73429+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
73430+
73431 /*
73432 * Allocate and initialize a new local port bind bucket.
73433 * The bindhash mutex for snum's hash chain must be held here.
73434@@ -530,6 +533,8 @@ ok:
73435 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
73436 spin_unlock(&head->lock);
73437
73438+ gr_update_task_in_ip_table(current, inet_sk(sk));
73439+
73440 if (tw) {
73441 inet_twsk_deschedule(tw, death_row);
73442 while (twrefcnt) {
73443diff -urNp linux-3.1.4/net/ipv4/inetpeer.c linux-3.1.4/net/ipv4/inetpeer.c
73444--- linux-3.1.4/net/ipv4/inetpeer.c 2011-11-11 15:19:27.000000000 -0500
73445+++ linux-3.1.4/net/ipv4/inetpeer.c 2011-11-16 19:18:22.000000000 -0500
73446@@ -400,6 +400,8 @@ struct inet_peer *inet_getpeer(const str
73447 unsigned int sequence;
73448 int invalidated, gccnt = 0;
73449
73450+ pax_track_stack();
73451+
73452 /* Attempt a lockless lookup first.
73453 * Because of a concurrent writer, we might not find an existing entry.
73454 */
73455@@ -436,8 +438,8 @@ relookup:
73456 if (p) {
73457 p->daddr = *daddr;
73458 atomic_set(&p->refcnt, 1);
73459- atomic_set(&p->rid, 0);
73460- atomic_set(&p->ip_id_count,
73461+ atomic_set_unchecked(&p->rid, 0);
73462+ atomic_set_unchecked(&p->ip_id_count,
73463 (daddr->family == AF_INET) ?
73464 secure_ip_id(daddr->addr.a4) :
73465 secure_ipv6_id(daddr->addr.a6));
73466diff -urNp linux-3.1.4/net/ipv4/ipconfig.c linux-3.1.4/net/ipv4/ipconfig.c
73467--- linux-3.1.4/net/ipv4/ipconfig.c 2011-11-11 15:19:27.000000000 -0500
73468+++ linux-3.1.4/net/ipv4/ipconfig.c 2011-11-16 18:39:08.000000000 -0500
73469@@ -313,7 +313,7 @@ static int __init ic_devinet_ioctl(unsig
73470
73471 mm_segment_t oldfs = get_fs();
73472 set_fs(get_ds());
73473- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
73474+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
73475 set_fs(oldfs);
73476 return res;
73477 }
73478@@ -324,7 +324,7 @@ static int __init ic_dev_ioctl(unsigned
73479
73480 mm_segment_t oldfs = get_fs();
73481 set_fs(get_ds());
73482- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
73483+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
73484 set_fs(oldfs);
73485 return res;
73486 }
73487@@ -335,7 +335,7 @@ static int __init ic_route_ioctl(unsigne
73488
73489 mm_segment_t oldfs = get_fs();
73490 set_fs(get_ds());
73491- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
73492+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
73493 set_fs(oldfs);
73494 return res;
73495 }
73496diff -urNp linux-3.1.4/net/ipv4/ip_fragment.c linux-3.1.4/net/ipv4/ip_fragment.c
73497--- linux-3.1.4/net/ipv4/ip_fragment.c 2011-11-11 15:19:27.000000000 -0500
73498+++ linux-3.1.4/net/ipv4/ip_fragment.c 2011-11-16 18:39:08.000000000 -0500
73499@@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct
73500 return 0;
73501
73502 start = qp->rid;
73503- end = atomic_inc_return(&peer->rid);
73504+ end = atomic_inc_return_unchecked(&peer->rid);
73505 qp->rid = end;
73506
73507 rc = qp->q.fragments && (end - start) > max;
73508diff -urNp linux-3.1.4/net/ipv4/ip_sockglue.c linux-3.1.4/net/ipv4/ip_sockglue.c
73509--- linux-3.1.4/net/ipv4/ip_sockglue.c 2011-11-11 15:19:27.000000000 -0500
73510+++ linux-3.1.4/net/ipv4/ip_sockglue.c 2011-11-16 18:40:44.000000000 -0500
73511@@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock
73512 int val;
73513 int len;
73514
73515+ pax_track_stack();
73516+
73517 if (level != SOL_IP)
73518 return -EOPNOTSUPP;
73519
73520@@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock
73521 len = min_t(unsigned int, len, opt->optlen);
73522 if (put_user(len, optlen))
73523 return -EFAULT;
73524- if (copy_to_user(optval, opt->__data, len))
73525+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
73526+ copy_to_user(optval, opt->__data, len))
73527 return -EFAULT;
73528 return 0;
73529 }
73530@@ -1238,7 +1241,7 @@ static int do_ip_getsockopt(struct sock
73531 if (sk->sk_type != SOCK_STREAM)
73532 return -ENOPROTOOPT;
73533
73534- msg.msg_control = optval;
73535+ msg.msg_control = (void __force_kernel *)optval;
73536 msg.msg_controllen = len;
73537 msg.msg_flags = flags;
73538
73539diff -urNp linux-3.1.4/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-3.1.4/net/ipv4/netfilter/nf_nat_snmp_basic.c
73540--- linux-3.1.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-11-11 15:19:27.000000000 -0500
73541+++ linux-3.1.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-11-16 18:39:08.000000000 -0500
73542@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
73543
73544 *len = 0;
73545
73546- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
73547+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
73548 if (*octets == NULL) {
73549 if (net_ratelimit())
73550 pr_notice("OOM in bsalg (%d)\n", __LINE__);
73551diff -urNp linux-3.1.4/net/ipv4/ping.c linux-3.1.4/net/ipv4/ping.c
73552--- linux-3.1.4/net/ipv4/ping.c 2011-11-11 15:19:27.000000000 -0500
73553+++ linux-3.1.4/net/ipv4/ping.c 2011-11-16 18:39:08.000000000 -0500
73554@@ -837,7 +837,7 @@ static void ping_format_sock(struct sock
73555 sk_rmem_alloc_get(sp),
73556 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73557 atomic_read(&sp->sk_refcnt), sp,
73558- atomic_read(&sp->sk_drops), len);
73559+ atomic_read_unchecked(&sp->sk_drops), len);
73560 }
73561
73562 static int ping_seq_show(struct seq_file *seq, void *v)
73563diff -urNp linux-3.1.4/net/ipv4/raw.c linux-3.1.4/net/ipv4/raw.c
73564--- linux-3.1.4/net/ipv4/raw.c 2011-11-11 15:19:27.000000000 -0500
73565+++ linux-3.1.4/net/ipv4/raw.c 2011-11-17 18:58:40.000000000 -0500
73566@@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
73567 int raw_rcv(struct sock *sk, struct sk_buff *skb)
73568 {
73569 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
73570- atomic_inc(&sk->sk_drops);
73571+ atomic_inc_unchecked(&sk->sk_drops);
73572 kfree_skb(skb);
73573 return NET_RX_DROP;
73574 }
73575@@ -737,16 +737,20 @@ static int raw_init(struct sock *sk)
73576
73577 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
73578 {
73579+ struct icmp_filter filter;
73580+
73581 if (optlen > sizeof(struct icmp_filter))
73582 optlen = sizeof(struct icmp_filter);
73583- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
73584+ if (copy_from_user(&filter, optval, optlen))
73585 return -EFAULT;
73586+ raw_sk(sk)->filter = filter;
73587 return 0;
73588 }
73589
73590 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
73591 {
73592 int len, ret = -EFAULT;
73593+ struct icmp_filter filter;
73594
73595 if (get_user(len, optlen))
73596 goto out;
73597@@ -756,8 +760,8 @@ static int raw_geticmpfilter(struct sock
73598 if (len > sizeof(struct icmp_filter))
73599 len = sizeof(struct icmp_filter);
73600 ret = -EFAULT;
73601- if (put_user(len, optlen) ||
73602- copy_to_user(optval, &raw_sk(sk)->filter, len))
73603+ filter = raw_sk(sk)->filter;
73604+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
73605 goto out;
73606 ret = 0;
73607 out: return ret;
73608@@ -985,7 +989,13 @@ static void raw_sock_seq_show(struct seq
73609 sk_wmem_alloc_get(sp),
73610 sk_rmem_alloc_get(sp),
73611 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73612- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73613+ atomic_read(&sp->sk_refcnt),
73614+#ifdef CONFIG_GRKERNSEC_HIDESYM
73615+ NULL,
73616+#else
73617+ sp,
73618+#endif
73619+ atomic_read_unchecked(&sp->sk_drops));
73620 }
73621
73622 static int raw_seq_show(struct seq_file *seq, void *v)
73623diff -urNp linux-3.1.4/net/ipv4/route.c linux-3.1.4/net/ipv4/route.c
73624--- linux-3.1.4/net/ipv4/route.c 2011-11-11 15:19:27.000000000 -0500
73625+++ linux-3.1.4/net/ipv4/route.c 2011-11-16 18:39:08.000000000 -0500
73626@@ -308,7 +308,7 @@ static inline unsigned int rt_hash(__be3
73627
73628 static inline int rt_genid(struct net *net)
73629 {
73630- return atomic_read(&net->ipv4.rt_genid);
73631+ return atomic_read_unchecked(&net->ipv4.rt_genid);
73632 }
73633
73634 #ifdef CONFIG_PROC_FS
73635@@ -837,7 +837,7 @@ static void rt_cache_invalidate(struct n
73636 unsigned char shuffle;
73637
73638 get_random_bytes(&shuffle, sizeof(shuffle));
73639- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
73640+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
73641 }
73642
73643 /*
73644@@ -2872,7 +2872,7 @@ static int rt_fill_info(struct net *net,
73645 error = rt->dst.error;
73646 if (peer) {
73647 inet_peer_refcheck(rt->peer);
73648- id = atomic_read(&peer->ip_id_count) & 0xffff;
73649+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
73650 if (peer->tcp_ts_stamp) {
73651 ts = peer->tcp_ts;
73652 tsage = get_seconds() - peer->tcp_ts_stamp;
73653diff -urNp linux-3.1.4/net/ipv4/tcp.c linux-3.1.4/net/ipv4/tcp.c
73654--- linux-3.1.4/net/ipv4/tcp.c 2011-11-11 15:19:27.000000000 -0500
73655+++ linux-3.1.4/net/ipv4/tcp.c 2011-11-16 18:40:44.000000000 -0500
73656@@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock
73657 int val;
73658 int err = 0;
73659
73660+ pax_track_stack();
73661+
73662 /* These are data/string values, all the others are ints */
73663 switch (optname) {
73664 case TCP_CONGESTION: {
73665@@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock
73666 struct tcp_sock *tp = tcp_sk(sk);
73667 int val, len;
73668
73669+ pax_track_stack();
73670+
73671 if (get_user(len, optlen))
73672 return -EFAULT;
73673
73674diff -urNp linux-3.1.4/net/ipv4/tcp_ipv4.c linux-3.1.4/net/ipv4/tcp_ipv4.c
73675--- linux-3.1.4/net/ipv4/tcp_ipv4.c 2011-11-11 15:19:27.000000000 -0500
73676+++ linux-3.1.4/net/ipv4/tcp_ipv4.c 2011-11-16 18:40:44.000000000 -0500
73677@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
73678 int sysctl_tcp_low_latency __read_mostly;
73679 EXPORT_SYMBOL(sysctl_tcp_low_latency);
73680
73681+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73682+extern int grsec_enable_blackhole;
73683+#endif
73684
73685 #ifdef CONFIG_TCP_MD5SIG
73686 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
73687@@ -1622,6 +1625,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
73688 return 0;
73689
73690 reset:
73691+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73692+ if (!grsec_enable_blackhole)
73693+#endif
73694 tcp_v4_send_reset(rsk, skb);
73695 discard:
73696 kfree_skb(skb);
73697@@ -1684,12 +1690,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
73698 TCP_SKB_CB(skb)->sacked = 0;
73699
73700 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73701- if (!sk)
73702+ if (!sk) {
73703+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73704+ ret = 1;
73705+#endif
73706 goto no_tcp_socket;
73707-
73708+ }
73709 process:
73710- if (sk->sk_state == TCP_TIME_WAIT)
73711+ if (sk->sk_state == TCP_TIME_WAIT) {
73712+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73713+ ret = 2;
73714+#endif
73715 goto do_time_wait;
73716+ }
73717
73718 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
73719 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73720@@ -1739,6 +1752,10 @@ no_tcp_socket:
73721 bad_packet:
73722 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73723 } else {
73724+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73725+ if (!grsec_enable_blackhole || (ret == 1 &&
73726+ (skb->dev->flags & IFF_LOOPBACK)))
73727+#endif
73728 tcp_v4_send_reset(NULL, skb);
73729 }
73730
73731@@ -2403,7 +2420,11 @@ static void get_openreq4(struct sock *sk
73732 0, /* non standard timer */
73733 0, /* open_requests have no inode */
73734 atomic_read(&sk->sk_refcnt),
73735+#ifdef CONFIG_GRKERNSEC_HIDESYM
73736+ NULL,
73737+#else
73738 req,
73739+#endif
73740 len);
73741 }
73742
73743@@ -2453,7 +2474,12 @@ static void get_tcp4_sock(struct sock *s
73744 sock_i_uid(sk),
73745 icsk->icsk_probes_out,
73746 sock_i_ino(sk),
73747- atomic_read(&sk->sk_refcnt), sk,
73748+ atomic_read(&sk->sk_refcnt),
73749+#ifdef CONFIG_GRKERNSEC_HIDESYM
73750+ NULL,
73751+#else
73752+ sk,
73753+#endif
73754 jiffies_to_clock_t(icsk->icsk_rto),
73755 jiffies_to_clock_t(icsk->icsk_ack.ato),
73756 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
73757@@ -2481,7 +2507,13 @@ static void get_timewait4_sock(struct in
73758 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
73759 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
73760 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73761- atomic_read(&tw->tw_refcnt), tw, len);
73762+ atomic_read(&tw->tw_refcnt),
73763+#ifdef CONFIG_GRKERNSEC_HIDESYM
73764+ NULL,
73765+#else
73766+ tw,
73767+#endif
73768+ len);
73769 }
73770
73771 #define TMPSZ 150
73772diff -urNp linux-3.1.4/net/ipv4/tcp_minisocks.c linux-3.1.4/net/ipv4/tcp_minisocks.c
73773--- linux-3.1.4/net/ipv4/tcp_minisocks.c 2011-11-11 15:19:27.000000000 -0500
73774+++ linux-3.1.4/net/ipv4/tcp_minisocks.c 2011-11-16 18:40:44.000000000 -0500
73775@@ -27,6 +27,10 @@
73776 #include <net/inet_common.h>
73777 #include <net/xfrm.h>
73778
73779+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73780+extern int grsec_enable_blackhole;
73781+#endif
73782+
73783 int sysctl_tcp_syncookies __read_mostly = 1;
73784 EXPORT_SYMBOL(sysctl_tcp_syncookies);
73785
73786@@ -750,6 +754,10 @@ listen_overflow:
73787
73788 embryonic_reset:
73789 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
73790+
73791+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73792+ if (!grsec_enable_blackhole)
73793+#endif
73794 if (!(flg & TCP_FLAG_RST))
73795 req->rsk_ops->send_reset(sk, skb);
73796
73797diff -urNp linux-3.1.4/net/ipv4/tcp_output.c linux-3.1.4/net/ipv4/tcp_output.c
73798--- linux-3.1.4/net/ipv4/tcp_output.c 2011-11-11 15:19:27.000000000 -0500
73799+++ linux-3.1.4/net/ipv4/tcp_output.c 2011-11-16 18:40:44.000000000 -0500
73800@@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
73801 int mss;
73802 int s_data_desired = 0;
73803
73804+ pax_track_stack();
73805+
73806 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
73807 s_data_desired = cvp->s_data_desired;
73808 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
73809diff -urNp linux-3.1.4/net/ipv4/tcp_probe.c linux-3.1.4/net/ipv4/tcp_probe.c
73810--- linux-3.1.4/net/ipv4/tcp_probe.c 2011-11-11 15:19:27.000000000 -0500
73811+++ linux-3.1.4/net/ipv4/tcp_probe.c 2011-11-16 18:39:08.000000000 -0500
73812@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
73813 if (cnt + width >= len)
73814 break;
73815
73816- if (copy_to_user(buf + cnt, tbuf, width))
73817+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
73818 return -EFAULT;
73819 cnt += width;
73820 }
73821diff -urNp linux-3.1.4/net/ipv4/tcp_timer.c linux-3.1.4/net/ipv4/tcp_timer.c
73822--- linux-3.1.4/net/ipv4/tcp_timer.c 2011-11-11 15:19:27.000000000 -0500
73823+++ linux-3.1.4/net/ipv4/tcp_timer.c 2011-11-16 18:40:44.000000000 -0500
73824@@ -22,6 +22,10 @@
73825 #include <linux/gfp.h>
73826 #include <net/tcp.h>
73827
73828+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73829+extern int grsec_lastack_retries;
73830+#endif
73831+
73832 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
73833 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
73834 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
73835@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
73836 }
73837 }
73838
73839+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73840+ if ((sk->sk_state == TCP_LAST_ACK) &&
73841+ (grsec_lastack_retries > 0) &&
73842+ (grsec_lastack_retries < retry_until))
73843+ retry_until = grsec_lastack_retries;
73844+#endif
73845+
73846 if (retransmits_timed_out(sk, retry_until,
73847 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
73848 /* Has it gone just too far? */
73849diff -urNp linux-3.1.4/net/ipv4/udp.c linux-3.1.4/net/ipv4/udp.c
73850--- linux-3.1.4/net/ipv4/udp.c 2011-11-11 15:19:27.000000000 -0500
73851+++ linux-3.1.4/net/ipv4/udp.c 2011-11-16 19:17:54.000000000 -0500
73852@@ -86,6 +86,7 @@
73853 #include <linux/types.h>
73854 #include <linux/fcntl.h>
73855 #include <linux/module.h>
73856+#include <linux/security.h>
73857 #include <linux/socket.h>
73858 #include <linux/sockios.h>
73859 #include <linux/igmp.h>
73860@@ -108,6 +109,10 @@
73861 #include <trace/events/udp.h>
73862 #include "udp_impl.h"
73863
73864+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73865+extern int grsec_enable_blackhole;
73866+#endif
73867+
73868 struct udp_table udp_table __read_mostly;
73869 EXPORT_SYMBOL(udp_table);
73870
73871@@ -565,6 +570,9 @@ found:
73872 return s;
73873 }
73874
73875+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
73876+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
73877+
73878 /*
73879 * This routine is called by the ICMP module when it gets some
73880 * sort of error condition. If err < 0 then the socket should
73881@@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
73882 dport = usin->sin_port;
73883 if (dport == 0)
73884 return -EINVAL;
73885+
73886+ err = gr_search_udp_sendmsg(sk, usin);
73887+ if (err)
73888+ return err;
73889 } else {
73890 if (sk->sk_state != TCP_ESTABLISHED)
73891 return -EDESTADDRREQ;
73892+
73893+ err = gr_search_udp_sendmsg(sk, NULL);
73894+ if (err)
73895+ return err;
73896+
73897 daddr = inet->inet_daddr;
73898 dport = inet->inet_dport;
73899 /* Open fast path for connected socket.
73900@@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(
73901 udp_lib_checksum_complete(skb)) {
73902 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73903 IS_UDPLITE(sk));
73904- atomic_inc(&sk->sk_drops);
73905+ atomic_inc_unchecked(&sk->sk_drops);
73906 __skb_unlink(skb, rcvq);
73907 __skb_queue_tail(&list_kill, skb);
73908 }
73909@@ -1185,6 +1202,10 @@ try_again:
73910 if (!skb)
73911 goto out;
73912
73913+ err = gr_search_udp_recvmsg(sk, skb);
73914+ if (err)
73915+ goto out_free;
73916+
73917 ulen = skb->len - sizeof(struct udphdr);
73918 if (len > ulen)
73919 len = ulen;
73920@@ -1485,7 +1506,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
73921
73922 drop:
73923 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73924- atomic_inc(&sk->sk_drops);
73925+ atomic_inc_unchecked(&sk->sk_drops);
73926 kfree_skb(skb);
73927 return -1;
73928 }
73929@@ -1504,7 +1525,7 @@ static void flush_stack(struct sock **st
73930 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
73931
73932 if (!skb1) {
73933- atomic_inc(&sk->sk_drops);
73934+ atomic_inc_unchecked(&sk->sk_drops);
73935 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
73936 IS_UDPLITE(sk));
73937 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73938@@ -1673,6 +1694,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
73939 goto csum_error;
73940
73941 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
73942+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73943+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73944+#endif
73945 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
73946
73947 /*
73948@@ -2100,8 +2124,13 @@ static void udp4_format_sock(struct sock
73949 sk_wmem_alloc_get(sp),
73950 sk_rmem_alloc_get(sp),
73951 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73952- atomic_read(&sp->sk_refcnt), sp,
73953- atomic_read(&sp->sk_drops), len);
73954+ atomic_read(&sp->sk_refcnt),
73955+#ifdef CONFIG_GRKERNSEC_HIDESYM
73956+ NULL,
73957+#else
73958+ sp,
73959+#endif
73960+ atomic_read_unchecked(&sp->sk_drops), len);
73961 }
73962
73963 int udp4_seq_show(struct seq_file *seq, void *v)
73964diff -urNp linux-3.1.4/net/ipv6/addrconf.c linux-3.1.4/net/ipv6/addrconf.c
73965--- linux-3.1.4/net/ipv6/addrconf.c 2011-11-11 15:19:27.000000000 -0500
73966+++ linux-3.1.4/net/ipv6/addrconf.c 2011-11-16 18:39:08.000000000 -0500
73967@@ -2083,7 +2083,7 @@ int addrconf_set_dstaddr(struct net *net
73968 p.iph.ihl = 5;
73969 p.iph.protocol = IPPROTO_IPV6;
73970 p.iph.ttl = 64;
73971- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
73972+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
73973
73974 if (ops->ndo_do_ioctl) {
73975 mm_segment_t oldfs = get_fs();
73976diff -urNp linux-3.1.4/net/ipv6/inet6_connection_sock.c linux-3.1.4/net/ipv6/inet6_connection_sock.c
73977--- linux-3.1.4/net/ipv6/inet6_connection_sock.c 2011-11-11 15:19:27.000000000 -0500
73978+++ linux-3.1.4/net/ipv6/inet6_connection_sock.c 2011-11-16 18:39:08.000000000 -0500
73979@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
73980 #ifdef CONFIG_XFRM
73981 {
73982 struct rt6_info *rt = (struct rt6_info *)dst;
73983- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
73984+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
73985 }
73986 #endif
73987 }
73988@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
73989 #ifdef CONFIG_XFRM
73990 if (dst) {
73991 struct rt6_info *rt = (struct rt6_info *)dst;
73992- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
73993+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
73994 __sk_dst_reset(sk);
73995 dst = NULL;
73996 }
73997diff -urNp linux-3.1.4/net/ipv6/ipv6_sockglue.c linux-3.1.4/net/ipv6/ipv6_sockglue.c
73998--- linux-3.1.4/net/ipv6/ipv6_sockglue.c 2011-11-11 15:19:27.000000000 -0500
73999+++ linux-3.1.4/net/ipv6/ipv6_sockglue.c 2011-11-16 18:40:44.000000000 -0500
74000@@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
74001 int val, valbool;
74002 int retv = -ENOPROTOOPT;
74003
74004+ pax_track_stack();
74005+
74006 if (optval == NULL)
74007 val=0;
74008 else {
74009@@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
74010 int len;
74011 int val;
74012
74013+ pax_track_stack();
74014+
74015 if (ip6_mroute_opt(optname))
74016 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
74017
74018@@ -960,7 +964,7 @@ static int do_ipv6_getsockopt(struct soc
74019 if (sk->sk_type != SOCK_STREAM)
74020 return -ENOPROTOOPT;
74021
74022- msg.msg_control = optval;
74023+ msg.msg_control = (void __force_kernel *)optval;
74024 msg.msg_controllen = len;
74025 msg.msg_flags = flags;
74026
74027diff -urNp linux-3.1.4/net/ipv6/raw.c linux-3.1.4/net/ipv6/raw.c
74028--- linux-3.1.4/net/ipv6/raw.c 2011-11-11 15:19:27.000000000 -0500
74029+++ linux-3.1.4/net/ipv6/raw.c 2011-11-16 18:40:44.000000000 -0500
74030@@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
74031 {
74032 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
74033 skb_checksum_complete(skb)) {
74034- atomic_inc(&sk->sk_drops);
74035+ atomic_inc_unchecked(&sk->sk_drops);
74036 kfree_skb(skb);
74037 return NET_RX_DROP;
74038 }
74039@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
74040 struct raw6_sock *rp = raw6_sk(sk);
74041
74042 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
74043- atomic_inc(&sk->sk_drops);
74044+ atomic_inc_unchecked(&sk->sk_drops);
74045 kfree_skb(skb);
74046 return NET_RX_DROP;
74047 }
74048@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
74049
74050 if (inet->hdrincl) {
74051 if (skb_checksum_complete(skb)) {
74052- atomic_inc(&sk->sk_drops);
74053+ atomic_inc_unchecked(&sk->sk_drops);
74054 kfree_skb(skb);
74055 return NET_RX_DROP;
74056 }
74057@@ -601,7 +601,7 @@ out:
74058 return err;
74059 }
74060
74061-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
74062+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
74063 struct flowi6 *fl6, struct dst_entry **dstp,
74064 unsigned int flags)
74065 {
74066@@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
74067 u16 proto;
74068 int err;
74069
74070+ pax_track_stack();
74071+
74072 /* Rough check on arithmetic overflow,
74073 better check is made in ip6_append_data().
74074 */
74075@@ -909,12 +911,15 @@ do_confirm:
74076 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
74077 char __user *optval, int optlen)
74078 {
74079+ struct icmp6_filter filter;
74080+
74081 switch (optname) {
74082 case ICMPV6_FILTER:
74083 if (optlen > sizeof(struct icmp6_filter))
74084 optlen = sizeof(struct icmp6_filter);
74085- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
74086+ if (copy_from_user(&filter, optval, optlen))
74087 return -EFAULT;
74088+ raw6_sk(sk)->filter = filter;
74089 return 0;
74090 default:
74091 return -ENOPROTOOPT;
74092@@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
74093 char __user *optval, int __user *optlen)
74094 {
74095 int len;
74096+ struct icmp6_filter filter;
74097
74098 switch (optname) {
74099 case ICMPV6_FILTER:
74100@@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
74101 len = sizeof(struct icmp6_filter);
74102 if (put_user(len, optlen))
74103 return -EFAULT;
74104- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
74105+ filter = raw6_sk(sk)->filter;
74106+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
74107 return -EFAULT;
74108 return 0;
74109 default:
74110@@ -1245,7 +1252,13 @@ static void raw6_sock_seq_show(struct se
74111 0, 0L, 0,
74112 sock_i_uid(sp), 0,
74113 sock_i_ino(sp),
74114- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
74115+ atomic_read(&sp->sk_refcnt),
74116+#ifdef CONFIG_GRKERNSEC_HIDESYM
74117+ NULL,
74118+#else
74119+ sp,
74120+#endif
74121+ atomic_read_unchecked(&sp->sk_drops));
74122 }
74123
74124 static int raw6_seq_show(struct seq_file *seq, void *v)
74125diff -urNp linux-3.1.4/net/ipv6/tcp_ipv6.c linux-3.1.4/net/ipv6/tcp_ipv6.c
74126--- linux-3.1.4/net/ipv6/tcp_ipv6.c 2011-11-11 15:19:27.000000000 -0500
74127+++ linux-3.1.4/net/ipv6/tcp_ipv6.c 2011-11-16 18:40:44.000000000 -0500
74128@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
74129 }
74130 #endif
74131
74132+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74133+extern int grsec_enable_blackhole;
74134+#endif
74135+
74136 static void tcp_v6_hash(struct sock *sk)
74137 {
74138 if (sk->sk_state != TCP_CLOSE) {
74139@@ -1647,6 +1651,9 @@ static int tcp_v6_do_rcv(struct sock *sk
74140 return 0;
74141
74142 reset:
74143+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74144+ if (!grsec_enable_blackhole)
74145+#endif
74146 tcp_v6_send_reset(sk, skb);
74147 discard:
74148 if (opt_skb)
74149@@ -1726,12 +1733,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
74150 TCP_SKB_CB(skb)->sacked = 0;
74151
74152 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
74153- if (!sk)
74154+ if (!sk) {
74155+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74156+ ret = 1;
74157+#endif
74158 goto no_tcp_socket;
74159+ }
74160
74161 process:
74162- if (sk->sk_state == TCP_TIME_WAIT)
74163+ if (sk->sk_state == TCP_TIME_WAIT) {
74164+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74165+ ret = 2;
74166+#endif
74167 goto do_time_wait;
74168+ }
74169
74170 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
74171 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
74172@@ -1779,6 +1794,10 @@ no_tcp_socket:
74173 bad_packet:
74174 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
74175 } else {
74176+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74177+ if (!grsec_enable_blackhole || (ret == 1 &&
74178+ (skb->dev->flags & IFF_LOOPBACK)))
74179+#endif
74180 tcp_v6_send_reset(NULL, skb);
74181 }
74182
74183@@ -2039,7 +2058,13 @@ static void get_openreq6(struct seq_file
74184 uid,
74185 0, /* non standard timer */
74186 0, /* open_requests have no inode */
74187- 0, req);
74188+ 0,
74189+#ifdef CONFIG_GRKERNSEC_HIDESYM
74190+ NULL
74191+#else
74192+ req
74193+#endif
74194+ );
74195 }
74196
74197 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
74198@@ -2089,7 +2114,12 @@ static void get_tcp6_sock(struct seq_fil
74199 sock_i_uid(sp),
74200 icsk->icsk_probes_out,
74201 sock_i_ino(sp),
74202- atomic_read(&sp->sk_refcnt), sp,
74203+ atomic_read(&sp->sk_refcnt),
74204+#ifdef CONFIG_GRKERNSEC_HIDESYM
74205+ NULL,
74206+#else
74207+ sp,
74208+#endif
74209 jiffies_to_clock_t(icsk->icsk_rto),
74210 jiffies_to_clock_t(icsk->icsk_ack.ato),
74211 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
74212@@ -2124,7 +2154,13 @@ static void get_timewait6_sock(struct se
74213 dest->s6_addr32[2], dest->s6_addr32[3], destp,
74214 tw->tw_substate, 0, 0,
74215 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
74216- atomic_read(&tw->tw_refcnt), tw);
74217+ atomic_read(&tw->tw_refcnt),
74218+#ifdef CONFIG_GRKERNSEC_HIDESYM
74219+ NULL
74220+#else
74221+ tw
74222+#endif
74223+ );
74224 }
74225
74226 static int tcp6_seq_show(struct seq_file *seq, void *v)
74227diff -urNp linux-3.1.4/net/ipv6/udp.c linux-3.1.4/net/ipv6/udp.c
74228--- linux-3.1.4/net/ipv6/udp.c 2011-11-11 15:19:27.000000000 -0500
74229+++ linux-3.1.4/net/ipv6/udp.c 2011-11-16 18:40:44.000000000 -0500
74230@@ -50,6 +50,10 @@
74231 #include <linux/seq_file.h>
74232 #include "udp_impl.h"
74233
74234+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74235+extern int grsec_enable_blackhole;
74236+#endif
74237+
74238 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
74239 {
74240 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
74241@@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
74242
74243 return 0;
74244 drop:
74245- atomic_inc(&sk->sk_drops);
74246+ atomic_inc_unchecked(&sk->sk_drops);
74247 drop_no_sk_drops_inc:
74248 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
74249 kfree_skb(skb);
74250@@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
74251 continue;
74252 }
74253 drop:
74254- atomic_inc(&sk->sk_drops);
74255+ atomic_inc_unchecked(&sk->sk_drops);
74256 UDP6_INC_STATS_BH(sock_net(sk),
74257 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
74258 UDP6_INC_STATS_BH(sock_net(sk),
74259@@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
74260 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
74261 proto == IPPROTO_UDPLITE);
74262
74263+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74264+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
74265+#endif
74266 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
74267
74268 kfree_skb(skb);
74269@@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
74270 if (!sock_owned_by_user(sk))
74271 udpv6_queue_rcv_skb(sk, skb);
74272 else if (sk_add_backlog(sk, skb)) {
74273- atomic_inc(&sk->sk_drops);
74274+ atomic_inc_unchecked(&sk->sk_drops);
74275 bh_unlock_sock(sk);
74276 sock_put(sk);
74277 goto discard;
74278@@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
74279 0, 0L, 0,
74280 sock_i_uid(sp), 0,
74281 sock_i_ino(sp),
74282- atomic_read(&sp->sk_refcnt), sp,
74283- atomic_read(&sp->sk_drops));
74284+ atomic_read(&sp->sk_refcnt),
74285+#ifdef CONFIG_GRKERNSEC_HIDESYM
74286+ NULL,
74287+#else
74288+ sp,
74289+#endif
74290+ atomic_read_unchecked(&sp->sk_drops));
74291 }
74292
74293 int udp6_seq_show(struct seq_file *seq, void *v)
74294diff -urNp linux-3.1.4/net/irda/ircomm/ircomm_tty.c linux-3.1.4/net/irda/ircomm/ircomm_tty.c
74295--- linux-3.1.4/net/irda/ircomm/ircomm_tty.c 2011-11-11 15:19:27.000000000 -0500
74296+++ linux-3.1.4/net/irda/ircomm/ircomm_tty.c 2011-11-16 18:39:08.000000000 -0500
74297@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(st
74298 add_wait_queue(&self->open_wait, &wait);
74299
74300 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
74301- __FILE__,__LINE__, tty->driver->name, self->open_count );
74302+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
74303
74304 /* As far as I can see, we protect open_count - Jean II */
74305 spin_lock_irqsave(&self->spinlock, flags);
74306 if (!tty_hung_up_p(filp)) {
74307 extra_count = 1;
74308- self->open_count--;
74309+ local_dec(&self->open_count);
74310 }
74311 spin_unlock_irqrestore(&self->spinlock, flags);
74312- self->blocked_open++;
74313+ local_inc(&self->blocked_open);
74314
74315 while (1) {
74316 if (tty->termios->c_cflag & CBAUD) {
74317@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(st
74318 }
74319
74320 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
74321- __FILE__,__LINE__, tty->driver->name, self->open_count );
74322+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
74323
74324 schedule();
74325 }
74326@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(st
74327 if (extra_count) {
74328 /* ++ is not atomic, so this should be protected - Jean II */
74329 spin_lock_irqsave(&self->spinlock, flags);
74330- self->open_count++;
74331+ local_inc(&self->open_count);
74332 spin_unlock_irqrestore(&self->spinlock, flags);
74333 }
74334- self->blocked_open--;
74335+ local_dec(&self->blocked_open);
74336
74337 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
74338- __FILE__,__LINE__, tty->driver->name, self->open_count);
74339+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
74340
74341 if (!retval)
74342 self->flags |= ASYNC_NORMAL_ACTIVE;
74343@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_st
74344 }
74345 /* ++ is not atomic, so this should be protected - Jean II */
74346 spin_lock_irqsave(&self->spinlock, flags);
74347- self->open_count++;
74348+ local_inc(&self->open_count);
74349
74350 tty->driver_data = self;
74351 self->tty = tty;
74352 spin_unlock_irqrestore(&self->spinlock, flags);
74353
74354 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
74355- self->line, self->open_count);
74356+ self->line, local_read(&self->open_count));
74357
74358 /* Not really used by us, but lets do it anyway */
74359 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
74360@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_
74361 return;
74362 }
74363
74364- if ((tty->count == 1) && (self->open_count != 1)) {
74365+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
74366 /*
74367 * Uh, oh. tty->count is 1, which means that the tty
74368 * structure will be freed. state->count should always
74369@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_
74370 */
74371 IRDA_DEBUG(0, "%s(), bad serial port count; "
74372 "tty->count is 1, state->count is %d\n", __func__ ,
74373- self->open_count);
74374- self->open_count = 1;
74375+ local_read(&self->open_count));
74376+ local_set(&self->open_count, 1);
74377 }
74378
74379- if (--self->open_count < 0) {
74380+ if (local_dec_return(&self->open_count) < 0) {
74381 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
74382- __func__, self->line, self->open_count);
74383- self->open_count = 0;
74384+ __func__, self->line, local_read(&self->open_count));
74385+ local_set(&self->open_count, 0);
74386 }
74387- if (self->open_count) {
74388+ if (local_read(&self->open_count)) {
74389 spin_unlock_irqrestore(&self->spinlock, flags);
74390
74391 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
74392@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_
74393 tty->closing = 0;
74394 self->tty = NULL;
74395
74396- if (self->blocked_open) {
74397+ if (local_read(&self->blocked_open)) {
74398 if (self->close_delay)
74399 schedule_timeout_interruptible(self->close_delay);
74400 wake_up_interruptible(&self->open_wait);
74401@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty
74402 spin_lock_irqsave(&self->spinlock, flags);
74403 self->flags &= ~ASYNC_NORMAL_ACTIVE;
74404 self->tty = NULL;
74405- self->open_count = 0;
74406+ local_set(&self->open_count, 0);
74407 spin_unlock_irqrestore(&self->spinlock, flags);
74408
74409 wake_up_interruptible(&self->open_wait);
74410@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct
74411 seq_putc(m, '\n');
74412
74413 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
74414- seq_printf(m, "Open count: %d\n", self->open_count);
74415+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
74416 seq_printf(m, "Max data size: %d\n", self->max_data_size);
74417 seq_printf(m, "Max header size: %d\n", self->max_header_size);
74418
74419diff -urNp linux-3.1.4/net/iucv/af_iucv.c linux-3.1.4/net/iucv/af_iucv.c
74420--- linux-3.1.4/net/iucv/af_iucv.c 2011-11-11 15:19:27.000000000 -0500
74421+++ linux-3.1.4/net/iucv/af_iucv.c 2011-11-16 18:39:08.000000000 -0500
74422@@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct soc
74423
74424 write_lock_bh(&iucv_sk_list.lock);
74425
74426- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
74427+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
74428 while (__iucv_get_sock_by_name(name)) {
74429 sprintf(name, "%08x",
74430- atomic_inc_return(&iucv_sk_list.autobind_name));
74431+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
74432 }
74433
74434 write_unlock_bh(&iucv_sk_list.lock);
74435diff -urNp linux-3.1.4/net/key/af_key.c linux-3.1.4/net/key/af_key.c
74436--- linux-3.1.4/net/key/af_key.c 2011-11-11 15:19:27.000000000 -0500
74437+++ linux-3.1.4/net/key/af_key.c 2011-11-16 18:40:44.000000000 -0500
74438@@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
74439 struct xfrm_migrate m[XFRM_MAX_DEPTH];
74440 struct xfrm_kmaddress k;
74441
74442+ pax_track_stack();
74443+
74444 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
74445 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
74446 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
74447@@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
74448 static u32 get_acqseq(void)
74449 {
74450 u32 res;
74451- static atomic_t acqseq;
74452+ static atomic_unchecked_t acqseq;
74453
74454 do {
74455- res = atomic_inc_return(&acqseq);
74456+ res = atomic_inc_return_unchecked(&acqseq);
74457 } while (!res);
74458 return res;
74459 }
74460diff -urNp linux-3.1.4/net/lapb/lapb_iface.c linux-3.1.4/net/lapb/lapb_iface.c
74461--- linux-3.1.4/net/lapb/lapb_iface.c 2011-11-11 15:19:27.000000000 -0500
74462+++ linux-3.1.4/net/lapb/lapb_iface.c 2011-11-16 18:39:08.000000000 -0500
74463@@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
74464 goto out;
74465
74466 lapb->dev = dev;
74467- lapb->callbacks = *callbacks;
74468+ lapb->callbacks = callbacks;
74469
74470 __lapb_insert_cb(lapb);
74471
74472@@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
74473
74474 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
74475 {
74476- if (lapb->callbacks.connect_confirmation)
74477- lapb->callbacks.connect_confirmation(lapb->dev, reason);
74478+ if (lapb->callbacks->connect_confirmation)
74479+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
74480 }
74481
74482 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
74483 {
74484- if (lapb->callbacks.connect_indication)
74485- lapb->callbacks.connect_indication(lapb->dev, reason);
74486+ if (lapb->callbacks->connect_indication)
74487+ lapb->callbacks->connect_indication(lapb->dev, reason);
74488 }
74489
74490 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
74491 {
74492- if (lapb->callbacks.disconnect_confirmation)
74493- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
74494+ if (lapb->callbacks->disconnect_confirmation)
74495+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
74496 }
74497
74498 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
74499 {
74500- if (lapb->callbacks.disconnect_indication)
74501- lapb->callbacks.disconnect_indication(lapb->dev, reason);
74502+ if (lapb->callbacks->disconnect_indication)
74503+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
74504 }
74505
74506 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
74507 {
74508- if (lapb->callbacks.data_indication)
74509- return lapb->callbacks.data_indication(lapb->dev, skb);
74510+ if (lapb->callbacks->data_indication)
74511+ return lapb->callbacks->data_indication(lapb->dev, skb);
74512
74513 kfree_skb(skb);
74514 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
74515@@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
74516 {
74517 int used = 0;
74518
74519- if (lapb->callbacks.data_transmit) {
74520- lapb->callbacks.data_transmit(lapb->dev, skb);
74521+ if (lapb->callbacks->data_transmit) {
74522+ lapb->callbacks->data_transmit(lapb->dev, skb);
74523 used = 1;
74524 }
74525
74526diff -urNp linux-3.1.4/net/mac80211/debugfs_sta.c linux-3.1.4/net/mac80211/debugfs_sta.c
74527--- linux-3.1.4/net/mac80211/debugfs_sta.c 2011-11-11 15:19:27.000000000 -0500
74528+++ linux-3.1.4/net/mac80211/debugfs_sta.c 2011-11-16 18:40:44.000000000 -0500
74529@@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struc
74530 struct tid_ampdu_rx *tid_rx;
74531 struct tid_ampdu_tx *tid_tx;
74532
74533+ pax_track_stack();
74534+
74535 rcu_read_lock();
74536
74537 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
74538@@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct f
74539 struct sta_info *sta = file->private_data;
74540 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
74541
74542+ pax_track_stack();
74543+
74544 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
74545 htc->ht_supported ? "" : "not ");
74546 if (htc->ht_supported) {
74547diff -urNp linux-3.1.4/net/mac80211/ieee80211_i.h linux-3.1.4/net/mac80211/ieee80211_i.h
74548--- linux-3.1.4/net/mac80211/ieee80211_i.h 2011-11-11 15:19:27.000000000 -0500
74549+++ linux-3.1.4/net/mac80211/ieee80211_i.h 2011-11-16 18:39:08.000000000 -0500
74550@@ -27,6 +27,7 @@
74551 #include <net/ieee80211_radiotap.h>
74552 #include <net/cfg80211.h>
74553 #include <net/mac80211.h>
74554+#include <asm/local.h>
74555 #include "key.h"
74556 #include "sta_info.h"
74557
74558@@ -754,7 +755,7 @@ struct ieee80211_local {
74559 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
74560 spinlock_t queue_stop_reason_lock;
74561
74562- int open_count;
74563+ local_t open_count;
74564 int monitors, cooked_mntrs;
74565 /* number of interfaces with corresponding FIF_ flags */
74566 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
74567diff -urNp linux-3.1.4/net/mac80211/iface.c linux-3.1.4/net/mac80211/iface.c
74568--- linux-3.1.4/net/mac80211/iface.c 2011-11-11 15:19:27.000000000 -0500
74569+++ linux-3.1.4/net/mac80211/iface.c 2011-11-16 18:39:08.000000000 -0500
74570@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
74571 break;
74572 }
74573
74574- if (local->open_count == 0) {
74575+ if (local_read(&local->open_count) == 0) {
74576 res = drv_start(local);
74577 if (res)
74578 goto err_del_bss;
74579@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
74580 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
74581
74582 if (!is_valid_ether_addr(dev->dev_addr)) {
74583- if (!local->open_count)
74584+ if (!local_read(&local->open_count))
74585 drv_stop(local);
74586 return -EADDRNOTAVAIL;
74587 }
74588@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
74589 mutex_unlock(&local->mtx);
74590
74591 if (coming_up)
74592- local->open_count++;
74593+ local_inc(&local->open_count);
74594
74595 if (hw_reconf_flags) {
74596 ieee80211_hw_config(local, hw_reconf_flags);
74597@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
74598 err_del_interface:
74599 drv_remove_interface(local, &sdata->vif);
74600 err_stop:
74601- if (!local->open_count)
74602+ if (!local_read(&local->open_count))
74603 drv_stop(local);
74604 err_del_bss:
74605 sdata->bss = NULL;
74606@@ -474,7 +474,7 @@ static void ieee80211_do_stop(struct iee
74607 }
74608
74609 if (going_down)
74610- local->open_count--;
74611+ local_dec(&local->open_count);
74612
74613 switch (sdata->vif.type) {
74614 case NL80211_IFTYPE_AP_VLAN:
74615@@ -533,7 +533,7 @@ static void ieee80211_do_stop(struct iee
74616
74617 ieee80211_recalc_ps(local, -1);
74618
74619- if (local->open_count == 0) {
74620+ if (local_read(&local->open_count) == 0) {
74621 if (local->ops->napi_poll)
74622 napi_disable(&local->napi);
74623 ieee80211_clear_tx_pending(local);
74624diff -urNp linux-3.1.4/net/mac80211/main.c linux-3.1.4/net/mac80211/main.c
74625--- linux-3.1.4/net/mac80211/main.c 2011-11-11 15:19:27.000000000 -0500
74626+++ linux-3.1.4/net/mac80211/main.c 2011-11-16 18:39:08.000000000 -0500
74627@@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211
74628 local->hw.conf.power_level = power;
74629 }
74630
74631- if (changed && local->open_count) {
74632+ if (changed && local_read(&local->open_count)) {
74633 ret = drv_config(local, changed);
74634 /*
74635 * Goal:
74636diff -urNp linux-3.1.4/net/mac80211/mlme.c linux-3.1.4/net/mac80211/mlme.c
74637--- linux-3.1.4/net/mac80211/mlme.c 2011-11-11 15:19:27.000000000 -0500
74638+++ linux-3.1.4/net/mac80211/mlme.c 2011-11-16 18:40:44.000000000 -0500
74639@@ -1464,6 +1464,8 @@ static bool ieee80211_assoc_success(stru
74640 bool have_higher_than_11mbit = false;
74641 u16 ap_ht_cap_flags;
74642
74643+ pax_track_stack();
74644+
74645 /* AssocResp and ReassocResp have identical structure */
74646
74647 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
74648diff -urNp linux-3.1.4/net/mac80211/pm.c linux-3.1.4/net/mac80211/pm.c
74649--- linux-3.1.4/net/mac80211/pm.c 2011-11-11 15:19:27.000000000 -0500
74650+++ linux-3.1.4/net/mac80211/pm.c 2011-11-16 18:39:08.000000000 -0500
74651@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211
74652 struct ieee80211_sub_if_data *sdata;
74653 struct sta_info *sta;
74654
74655- if (!local->open_count)
74656+ if (!local_read(&local->open_count))
74657 goto suspend;
74658
74659 ieee80211_scan_cancel(local);
74660@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211
74661 cancel_work_sync(&local->dynamic_ps_enable_work);
74662 del_timer_sync(&local->dynamic_ps_timer);
74663
74664- local->wowlan = wowlan && local->open_count;
74665+ local->wowlan = wowlan && local_read(&local->open_count);
74666 if (local->wowlan) {
74667 int err = drv_suspend(local, wowlan);
74668 if (err < 0) {
74669@@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211
74670 }
74671
74672 /* stop hardware - this must stop RX */
74673- if (local->open_count)
74674+ if (local_read(&local->open_count))
74675 ieee80211_stop_device(local);
74676
74677 suspend:
74678diff -urNp linux-3.1.4/net/mac80211/rate.c linux-3.1.4/net/mac80211/rate.c
74679--- linux-3.1.4/net/mac80211/rate.c 2011-11-11 15:19:27.000000000 -0500
74680+++ linux-3.1.4/net/mac80211/rate.c 2011-11-16 18:39:08.000000000 -0500
74681@@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
74682
74683 ASSERT_RTNL();
74684
74685- if (local->open_count)
74686+ if (local_read(&local->open_count))
74687 return -EBUSY;
74688
74689 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
74690diff -urNp linux-3.1.4/net/mac80211/rc80211_pid_debugfs.c linux-3.1.4/net/mac80211/rc80211_pid_debugfs.c
74691--- linux-3.1.4/net/mac80211/rc80211_pid_debugfs.c 2011-11-11 15:19:27.000000000 -0500
74692+++ linux-3.1.4/net/mac80211/rc80211_pid_debugfs.c 2011-11-16 18:39:08.000000000 -0500
74693@@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
74694
74695 spin_unlock_irqrestore(&events->lock, status);
74696
74697- if (copy_to_user(buf, pb, p))
74698+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
74699 return -EFAULT;
74700
74701 return p;
74702diff -urNp linux-3.1.4/net/mac80211/util.c linux-3.1.4/net/mac80211/util.c
74703--- linux-3.1.4/net/mac80211/util.c 2011-11-26 19:57:29.000000000 -0500
74704+++ linux-3.1.4/net/mac80211/util.c 2011-11-26 20:00:43.000000000 -0500
74705@@ -1170,7 +1170,7 @@ int ieee80211_reconfig(struct ieee80211_
74706 drv_set_coverage_class(local, hw->wiphy->coverage_class);
74707
74708 /* everything else happens only if HW was up & running */
74709- if (!local->open_count)
74710+ if (!local_read(&local->open_count))
74711 goto wake_up;
74712
74713 /*
74714diff -urNp linux-3.1.4/net/netfilter/ipvs/ip_vs_conn.c linux-3.1.4/net/netfilter/ipvs/ip_vs_conn.c
74715--- linux-3.1.4/net/netfilter/ipvs/ip_vs_conn.c 2011-11-11 15:19:27.000000000 -0500
74716+++ linux-3.1.4/net/netfilter/ipvs/ip_vs_conn.c 2011-11-16 18:39:08.000000000 -0500
74717@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
74718 /* Increase the refcnt counter of the dest */
74719 atomic_inc(&dest->refcnt);
74720
74721- conn_flags = atomic_read(&dest->conn_flags);
74722+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
74723 if (cp->protocol != IPPROTO_UDP)
74724 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
74725 /* Bind with the destination and its corresponding transmitter */
74726@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
74727 atomic_set(&cp->refcnt, 1);
74728
74729 atomic_set(&cp->n_control, 0);
74730- atomic_set(&cp->in_pkts, 0);
74731+ atomic_set_unchecked(&cp->in_pkts, 0);
74732
74733 atomic_inc(&ipvs->conn_count);
74734 if (flags & IP_VS_CONN_F_NO_CPORT)
74735@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
74736
74737 /* Don't drop the entry if its number of incoming packets is not
74738 located in [0, 8] */
74739- i = atomic_read(&cp->in_pkts);
74740+ i = atomic_read_unchecked(&cp->in_pkts);
74741 if (i > 8 || i < 0) return 0;
74742
74743 if (!todrop_rate[i]) return 0;
74744diff -urNp linux-3.1.4/net/netfilter/ipvs/ip_vs_core.c linux-3.1.4/net/netfilter/ipvs/ip_vs_core.c
74745--- linux-3.1.4/net/netfilter/ipvs/ip_vs_core.c 2011-11-11 15:19:27.000000000 -0500
74746+++ linux-3.1.4/net/netfilter/ipvs/ip_vs_core.c 2011-11-16 18:39:08.000000000 -0500
74747@@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
74748 ret = cp->packet_xmit(skb, cp, pd->pp);
74749 /* do not touch skb anymore */
74750
74751- atomic_inc(&cp->in_pkts);
74752+ atomic_inc_unchecked(&cp->in_pkts);
74753 ip_vs_conn_put(cp);
74754 return ret;
74755 }
74756@@ -1612,7 +1612,7 @@ ip_vs_in(unsigned int hooknum, struct sk
74757 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
74758 pkts = sysctl_sync_threshold(ipvs);
74759 else
74760- pkts = atomic_add_return(1, &cp->in_pkts);
74761+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
74762
74763 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
74764 cp->protocol == IPPROTO_SCTP) {
74765diff -urNp linux-3.1.4/net/netfilter/ipvs/ip_vs_ctl.c linux-3.1.4/net/netfilter/ipvs/ip_vs_ctl.c
74766--- linux-3.1.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-11-11 15:19:27.000000000 -0500
74767+++ linux-3.1.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-11-16 19:13:12.000000000 -0500
74768@@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
74769 ip_vs_rs_hash(ipvs, dest);
74770 write_unlock_bh(&ipvs->rs_lock);
74771 }
74772- atomic_set(&dest->conn_flags, conn_flags);
74773+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
74774
74775 /* bind the service */
74776 if (!dest->svc) {
74777@@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
74778 " %-7s %-6d %-10d %-10d\n",
74779 &dest->addr.in6,
74780 ntohs(dest->port),
74781- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
74782+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
74783 atomic_read(&dest->weight),
74784 atomic_read(&dest->activeconns),
74785 atomic_read(&dest->inactconns));
74786@@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
74787 "%-7s %-6d %-10d %-10d\n",
74788 ntohl(dest->addr.ip),
74789 ntohs(dest->port),
74790- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
74791+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
74792 atomic_read(&dest->weight),
74793 atomic_read(&dest->activeconns),
74794 atomic_read(&dest->inactconns));
74795@@ -2285,6 +2285,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
74796 struct ip_vs_dest_user_kern udest;
74797 struct netns_ipvs *ipvs = net_ipvs(net);
74798
74799+ pax_track_stack();
74800+
74801 if (!capable(CAP_NET_ADMIN))
74802 return -EPERM;
74803
74804@@ -2508,7 +2510,7 @@ __ip_vs_get_dest_entries(struct net *net
74805
74806 entry.addr = dest->addr.ip;
74807 entry.port = dest->port;
74808- entry.conn_flags = atomic_read(&dest->conn_flags);
74809+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
74810 entry.weight = atomic_read(&dest->weight);
74811 entry.u_threshold = dest->u_threshold;
74812 entry.l_threshold = dest->l_threshold;
74813@@ -3041,7 +3043,7 @@ static int ip_vs_genl_fill_dest(struct s
74814 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
74815
74816 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
74817- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74818+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74819 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
74820 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
74821 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
74822diff -urNp linux-3.1.4/net/netfilter/ipvs/ip_vs_sync.c linux-3.1.4/net/netfilter/ipvs/ip_vs_sync.c
74823--- linux-3.1.4/net/netfilter/ipvs/ip_vs_sync.c 2011-11-11 15:19:27.000000000 -0500
74824+++ linux-3.1.4/net/netfilter/ipvs/ip_vs_sync.c 2011-11-16 18:39:08.000000000 -0500
74825@@ -649,7 +649,7 @@ control:
74826 * i.e only increment in_pkts for Templates.
74827 */
74828 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
74829- int pkts = atomic_add_return(1, &cp->in_pkts);
74830+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
74831
74832 if (pkts % sysctl_sync_period(ipvs) != 1)
74833 return;
74834@@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *
74835
74836 if (opt)
74837 memcpy(&cp->in_seq, opt, sizeof(*opt));
74838- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74839+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74840 cp->state = state;
74841 cp->old_state = cp->state;
74842 /*
74843diff -urNp linux-3.1.4/net/netfilter/ipvs/ip_vs_xmit.c linux-3.1.4/net/netfilter/ipvs/ip_vs_xmit.c
74844--- linux-3.1.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-11-11 15:19:27.000000000 -0500
74845+++ linux-3.1.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-11-16 18:39:08.000000000 -0500
74846@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
74847 else
74848 rc = NF_ACCEPT;
74849 /* do not touch skb anymore */
74850- atomic_inc(&cp->in_pkts);
74851+ atomic_inc_unchecked(&cp->in_pkts);
74852 goto out;
74853 }
74854
74855@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
74856 else
74857 rc = NF_ACCEPT;
74858 /* do not touch skb anymore */
74859- atomic_inc(&cp->in_pkts);
74860+ atomic_inc_unchecked(&cp->in_pkts);
74861 goto out;
74862 }
74863
74864diff -urNp linux-3.1.4/net/netfilter/Kconfig linux-3.1.4/net/netfilter/Kconfig
74865--- linux-3.1.4/net/netfilter/Kconfig 2011-11-11 15:19:27.000000000 -0500
74866+++ linux-3.1.4/net/netfilter/Kconfig 2011-11-16 18:40:44.000000000 -0500
74867@@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
74868
74869 To compile it as a module, choose M here. If unsure, say N.
74870
74871+config NETFILTER_XT_MATCH_GRADM
74872+ tristate '"gradm" match support'
74873+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
74874+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
74875+ ---help---
74876+ The gradm match allows to match on grsecurity RBAC being enabled.
74877+ It is useful when iptables rules are applied early on bootup to
74878+ prevent connections to the machine (except from a trusted host)
74879+ while the RBAC system is disabled.
74880+
74881 config NETFILTER_XT_MATCH_HASHLIMIT
74882 tristate '"hashlimit" match support'
74883 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
74884diff -urNp linux-3.1.4/net/netfilter/Makefile linux-3.1.4/net/netfilter/Makefile
74885--- linux-3.1.4/net/netfilter/Makefile 2011-11-11 15:19:27.000000000 -0500
74886+++ linux-3.1.4/net/netfilter/Makefile 2011-11-16 18:40:44.000000000 -0500
74887@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
74888 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
74889 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
74890 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
74891+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
74892 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
74893 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
74894 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
74895diff -urNp linux-3.1.4/net/netfilter/nfnetlink_log.c linux-3.1.4/net/netfilter/nfnetlink_log.c
74896--- linux-3.1.4/net/netfilter/nfnetlink_log.c 2011-11-11 15:19:27.000000000 -0500
74897+++ linux-3.1.4/net/netfilter/nfnetlink_log.c 2011-11-16 18:39:08.000000000 -0500
74898@@ -70,7 +70,7 @@ struct nfulnl_instance {
74899 };
74900
74901 static DEFINE_SPINLOCK(instances_lock);
74902-static atomic_t global_seq;
74903+static atomic_unchecked_t global_seq;
74904
74905 #define INSTANCE_BUCKETS 16
74906 static struct hlist_head instance_table[INSTANCE_BUCKETS];
74907@@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_ins
74908 /* global sequence number */
74909 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
74910 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
74911- htonl(atomic_inc_return(&global_seq)));
74912+ htonl(atomic_inc_return_unchecked(&global_seq)));
74913
74914 if (data_len) {
74915 struct nlattr *nla;
74916diff -urNp linux-3.1.4/net/netfilter/xt_gradm.c linux-3.1.4/net/netfilter/xt_gradm.c
74917--- linux-3.1.4/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
74918+++ linux-3.1.4/net/netfilter/xt_gradm.c 2011-11-16 18:40:44.000000000 -0500
74919@@ -0,0 +1,51 @@
74920+/*
74921+ * gradm match for netfilter
74922