]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-3.1.1-201111171911.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.1.1-201111171911.patch
CommitLineData
f07df70f
PK
1diff -urNp linux-3.1.1/arch/alpha/include/asm/elf.h linux-3.1.1/arch/alpha/include/asm/elf.h
2--- linux-3.1.1/arch/alpha/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
3+++ linux-3.1.1/arch/alpha/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
4@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8+#ifdef CONFIG_PAX_ASLR
9+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10+
11+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13+#endif
14+
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18diff -urNp linux-3.1.1/arch/alpha/include/asm/pgtable.h linux-3.1.1/arch/alpha/include/asm/pgtable.h
19--- linux-3.1.1/arch/alpha/include/asm/pgtable.h 2011-11-11 15:19:27.000000000 -0500
20+++ linux-3.1.1/arch/alpha/include/asm/pgtable.h 2011-11-16 18:39:07.000000000 -0500
21@@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25+
26+#ifdef CONFIG_PAX_PAGEEXEC
27+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30+#else
31+# define PAGE_SHARED_NOEXEC PAGE_SHARED
32+# define PAGE_COPY_NOEXEC PAGE_COPY
33+# define PAGE_READONLY_NOEXEC PAGE_READONLY
34+#endif
35+
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39diff -urNp linux-3.1.1/arch/alpha/kernel/module.c linux-3.1.1/arch/alpha/kernel/module.c
40--- linux-3.1.1/arch/alpha/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
41+++ linux-3.1.1/arch/alpha/kernel/module.c 2011-11-16 18:39:07.000000000 -0500
42@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46- gp = (u64)me->module_core + me->core_size - 0x8000;
47+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51diff -urNp linux-3.1.1/arch/alpha/kernel/osf_sys.c linux-3.1.1/arch/alpha/kernel/osf_sys.c
52--- linux-3.1.1/arch/alpha/kernel/osf_sys.c 2011-11-11 15:19:27.000000000 -0500
53+++ linux-3.1.1/arch/alpha/kernel/osf_sys.c 2011-11-16 18:39:07.000000000 -0500
54@@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58- if (!vma || addr + len <= vma->vm_start)
59+ if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63@@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67+#ifdef CONFIG_PAX_RANDMMAP
68+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69+#endif
70+
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74@@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79- len, limit);
80+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81+
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85diff -urNp linux-3.1.1/arch/alpha/mm/fault.c linux-3.1.1/arch/alpha/mm/fault.c
86--- linux-3.1.1/arch/alpha/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
87+++ linux-3.1.1/arch/alpha/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
88@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92+#ifdef CONFIG_PAX_PAGEEXEC
93+/*
94+ * PaX: decide what to do with offenders (regs->pc = fault address)
95+ *
96+ * returns 1 when task should be killed
97+ * 2 when patched PLT trampoline was detected
98+ * 3 when unpatched PLT trampoline was detected
99+ */
100+static int pax_handle_fetch_fault(struct pt_regs *regs)
101+{
102+
103+#ifdef CONFIG_PAX_EMUPLT
104+ int err;
105+
106+ do { /* PaX: patched PLT emulation #1 */
107+ unsigned int ldah, ldq, jmp;
108+
109+ err = get_user(ldah, (unsigned int *)regs->pc);
110+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112+
113+ if (err)
114+ break;
115+
116+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118+ jmp == 0x6BFB0000U)
119+ {
120+ unsigned long r27, addr;
121+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123+
124+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125+ err = get_user(r27, (unsigned long *)addr);
126+ if (err)
127+ break;
128+
129+ regs->r27 = r27;
130+ regs->pc = r27;
131+ return 2;
132+ }
133+ } while (0);
134+
135+ do { /* PaX: patched PLT emulation #2 */
136+ unsigned int ldah, lda, br;
137+
138+ err = get_user(ldah, (unsigned int *)regs->pc);
139+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
140+ err |= get_user(br, (unsigned int *)(regs->pc+8));
141+
142+ if (err)
143+ break;
144+
145+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
147+ (br & 0xFFE00000U) == 0xC3E00000U)
148+ {
149+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152+
153+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155+ return 2;
156+ }
157+ } while (0);
158+
159+ do { /* PaX: unpatched PLT emulation */
160+ unsigned int br;
161+
162+ err = get_user(br, (unsigned int *)regs->pc);
163+
164+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165+ unsigned int br2, ldq, nop, jmp;
166+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167+
168+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169+ err = get_user(br2, (unsigned int *)addr);
170+ err |= get_user(ldq, (unsigned int *)(addr+4));
171+ err |= get_user(nop, (unsigned int *)(addr+8));
172+ err |= get_user(jmp, (unsigned int *)(addr+12));
173+ err |= get_user(resolver, (unsigned long *)(addr+16));
174+
175+ if (err)
176+ break;
177+
178+ if (br2 == 0xC3600000U &&
179+ ldq == 0xA77B000CU &&
180+ nop == 0x47FF041FU &&
181+ jmp == 0x6B7B0000U)
182+ {
183+ regs->r28 = regs->pc+4;
184+ regs->r27 = addr+16;
185+ regs->pc = resolver;
186+ return 3;
187+ }
188+ }
189+ } while (0);
190+#endif
191+
192+ return 1;
193+}
194+
195+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
196+{
197+ unsigned long i;
198+
199+ printk(KERN_ERR "PAX: bytes at PC: ");
200+ for (i = 0; i < 5; i++) {
201+ unsigned int c;
202+ if (get_user(c, (unsigned int *)pc+i))
203+ printk(KERN_CONT "???????? ");
204+ else
205+ printk(KERN_CONT "%08x ", c);
206+ }
207+ printk("\n");
208+}
209+#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217- if (!(vma->vm_flags & VM_EXEC))
218+ if (!(vma->vm_flags & VM_EXEC)) {
219+
220+#ifdef CONFIG_PAX_PAGEEXEC
221+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222+ goto bad_area;
223+
224+ up_read(&mm->mmap_sem);
225+ switch (pax_handle_fetch_fault(regs)) {
226+
227+#ifdef CONFIG_PAX_EMUPLT
228+ case 2:
229+ case 3:
230+ return;
231+#endif
232+
233+ }
234+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235+ do_group_exit(SIGKILL);
236+#else
237 goto bad_area;
238+#endif
239+
240+ }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244diff -urNp linux-3.1.1/arch/arm/include/asm/elf.h linux-3.1.1/arch/arm/include/asm/elf.h
245--- linux-3.1.1/arch/arm/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
246+++ linux-3.1.1/arch/arm/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
247@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253+
254+#ifdef CONFIG_PAX_ASLR
255+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256+
257+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259+#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267-struct mm_struct;
268-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269-#define arch_randomize_brk arch_randomize_brk
270-
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274diff -urNp linux-3.1.1/arch/arm/include/asm/kmap_types.h linux-3.1.1/arch/arm/include/asm/kmap_types.h
275--- linux-3.1.1/arch/arm/include/asm/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
276+++ linux-3.1.1/arch/arm/include/asm/kmap_types.h 2011-11-16 18:39:07.000000000 -0500
277@@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281+ KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285diff -urNp linux-3.1.1/arch/arm/include/asm/uaccess.h linux-3.1.1/arch/arm/include/asm/uaccess.h
286--- linux-3.1.1/arch/arm/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
287+++ linux-3.1.1/arch/arm/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
288@@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292+extern void check_object_size(const void *ptr, unsigned long n, bool to);
293+
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297@@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305+
306+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307+{
308+ if (!__builtin_constant_p(n))
309+ check_object_size(to, n, false);
310+ return ___copy_from_user(to, from, n);
311+}
312+
313+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314+{
315+ if (!__builtin_constant_p(n))
316+ check_object_size(from, n, true);
317+ return ___copy_to_user(to, from, n);
318+}
319+
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327+ if ((long)n < 0)
328+ return n;
329+
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333@@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337+ if ((long)n < 0)
338+ return n;
339+
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343diff -urNp linux-3.1.1/arch/arm/kernel/armksyms.c linux-3.1.1/arch/arm/kernel/armksyms.c
344--- linux-3.1.1/arch/arm/kernel/armksyms.c 2011-11-11 15:19:27.000000000 -0500
345+++ linux-3.1.1/arch/arm/kernel/armksyms.c 2011-11-16 18:39:07.000000000 -0500
346@@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350-EXPORT_SYMBOL(__copy_from_user);
351-EXPORT_SYMBOL(__copy_to_user);
352+EXPORT_SYMBOL(___copy_from_user);
353+EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357diff -urNp linux-3.1.1/arch/arm/kernel/process.c linux-3.1.1/arch/arm/kernel/process.c
358--- linux-3.1.1/arch/arm/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
359+++ linux-3.1.1/arch/arm/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
360@@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364-#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366 #include <linux/cpuidle.h>
367
368@@ -481,12 +480,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372-unsigned long arch_randomize_brk(struct mm_struct *mm)
373-{
374- unsigned long range_end = mm->brk + 0x02000000;
375- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376-}
377-
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381diff -urNp linux-3.1.1/arch/arm/kernel/traps.c linux-3.1.1/arch/arm/kernel/traps.c
382--- linux-3.1.1/arch/arm/kernel/traps.c 2011-11-11 15:19:27.000000000 -0500
383+++ linux-3.1.1/arch/arm/kernel/traps.c 2011-11-16 18:40:08.000000000 -0500
384@@ -257,6 +257,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388+extern void gr_handle_kernel_exploit(void);
389+
390 /*
391 * This function is protected against re-entrancy.
392 */
393@@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397+
398+ gr_handle_kernel_exploit();
399+
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403diff -urNp linux-3.1.1/arch/arm/lib/copy_from_user.S linux-3.1.1/arch/arm/lib/copy_from_user.S
404--- linux-3.1.1/arch/arm/lib/copy_from_user.S 2011-11-11 15:19:27.000000000 -0500
405+++ linux-3.1.1/arch/arm/lib/copy_from_user.S 2011-11-16 18:39:07.000000000 -0500
406@@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410- * size_t __copy_from_user(void *to, const void *from, size_t n)
411+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415@@ -84,11 +84,11 @@
416
417 .text
418
419-ENTRY(__copy_from_user)
420+ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424-ENDPROC(__copy_from_user)
425+ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429diff -urNp linux-3.1.1/arch/arm/lib/copy_to_user.S linux-3.1.1/arch/arm/lib/copy_to_user.S
430--- linux-3.1.1/arch/arm/lib/copy_to_user.S 2011-11-11 15:19:27.000000000 -0500
431+++ linux-3.1.1/arch/arm/lib/copy_to_user.S 2011-11-16 18:39:07.000000000 -0500
432@@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436- * size_t __copy_to_user(void *to, const void *from, size_t n)
437+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441@@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445-WEAK(__copy_to_user)
446+WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450-ENDPROC(__copy_to_user)
451+ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455diff -urNp linux-3.1.1/arch/arm/lib/uaccess.S linux-3.1.1/arch/arm/lib/uaccess.S
456--- linux-3.1.1/arch/arm/lib/uaccess.S 2011-11-11 15:19:27.000000000 -0500
457+++ linux-3.1.1/arch/arm/lib/uaccess.S 2011-11-16 18:39:07.000000000 -0500
458@@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471-ENTRY(__copy_to_user)
472+ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480-ENDPROC(__copy_to_user)
481+ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497-ENTRY(__copy_from_user)
498+ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506-ENDPROC(__copy_from_user)
507+ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511diff -urNp linux-3.1.1/arch/arm/lib/uaccess_with_memcpy.c linux-3.1.1/arch/arm/lib/uaccess_with_memcpy.c
512--- linux-3.1.1/arch/arm/lib/uaccess_with_memcpy.c 2011-11-11 15:19:27.000000000 -0500
513+++ linux-3.1.1/arch/arm/lib/uaccess_with_memcpy.c 2011-11-16 18:39:07.000000000 -0500
514@@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518-__copy_to_user(void __user *to, const void *from, unsigned long n)
519+___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523diff -urNp linux-3.1.1/arch/arm/mach-ux500/mbox-db5500.c linux-3.1.1/arch/arm/mach-ux500/mbox-db5500.c
524--- linux-3.1.1/arch/arm/mach-ux500/mbox-db5500.c 2011-11-11 15:19:27.000000000 -0500
525+++ linux-3.1.1/arch/arm/mach-ux500/mbox-db5500.c 2011-11-16 18:40:08.000000000 -0500
526@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535diff -urNp linux-3.1.1/arch/arm/mm/fault.c linux-3.1.1/arch/arm/mm/fault.c
536--- linux-3.1.1/arch/arm/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
537+++ linux-3.1.1/arch/arm/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
538@@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542+#ifdef CONFIG_PAX_PAGEEXEC
543+ if (fsr & FSR_LNX_PF) {
544+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545+ do_group_exit(SIGKILL);
546+ }
547+#endif
548+
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552@@ -383,6 +390,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556+#ifdef CONFIG_PAX_PAGEEXEC
557+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
558+{
559+ long i;
560+
561+ printk(KERN_ERR "PAX: bytes at PC: ");
562+ for (i = 0; i < 20; i++) {
563+ unsigned char c;
564+ if (get_user(c, (__force unsigned char __user *)pc+i))
565+ printk(KERN_CONT "?? ");
566+ else
567+ printk(KERN_CONT "%02x ", c);
568+ }
569+ printk("\n");
570+
571+ printk(KERN_ERR "PAX: bytes at SP-4: ");
572+ for (i = -1; i < 20; i++) {
573+ unsigned long c;
574+ if (get_user(c, (__force unsigned long __user *)sp+i))
575+ printk(KERN_CONT "???????? ");
576+ else
577+ printk(KERN_CONT "%08lx ", c);
578+ }
579+ printk("\n");
580+}
581+#endif
582+
583 /*
584 * First Level Translation Fault Handler
585 *
586diff -urNp linux-3.1.1/arch/arm/mm/mmap.c linux-3.1.1/arch/arm/mm/mmap.c
587--- linux-3.1.1/arch/arm/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
588+++ linux-3.1.1/arch/arm/mm/mmap.c 2011-11-16 18:39:07.000000000 -0500
589@@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593+#ifdef CONFIG_PAX_RANDMMAP
594+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595+#endif
596+
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600@@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604- if (TASK_SIZE - len >= addr &&
605- (!vma || addr + len <= vma->vm_start))
606+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610- start_addr = addr = mm->free_area_cache;
611+ start_addr = addr = mm->free_area_cache;
612 } else {
613- start_addr = addr = TASK_UNMAPPED_BASE;
614- mm->cached_hole_size = 0;
615+ start_addr = addr = mm->mmap_base;
616+ mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620@@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624- if (start_addr != TASK_UNMAPPED_BASE) {
625- start_addr = addr = TASK_UNMAPPED_BASE;
626+ if (start_addr != mm->mmap_base) {
627+ start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633- if (!vma || addr + len <= vma->vm_start) {
634+ if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638diff -urNp linux-3.1.1/arch/avr32/include/asm/elf.h linux-3.1.1/arch/avr32/include/asm/elf.h
639--- linux-3.1.1/arch/avr32/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
640+++ linux-3.1.1/arch/avr32/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
641@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648+#ifdef CONFIG_PAX_ASLR
649+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650+
651+#define PAX_DELTA_MMAP_LEN 15
652+#define PAX_DELTA_STACK_LEN 15
653+#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657diff -urNp linux-3.1.1/arch/avr32/include/asm/kmap_types.h linux-3.1.1/arch/avr32/include/asm/kmap_types.h
658--- linux-3.1.1/arch/avr32/include/asm/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
659+++ linux-3.1.1/arch/avr32/include/asm/kmap_types.h 2011-11-16 18:39:07.000000000 -0500
660@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664-D(14) KM_TYPE_NR
665+D(14) KM_CLEARPAGE,
666+D(15) KM_TYPE_NR
667 };
668
669 #undef D
670diff -urNp linux-3.1.1/arch/avr32/mm/fault.c linux-3.1.1/arch/avr32/mm/fault.c
671--- linux-3.1.1/arch/avr32/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
672+++ linux-3.1.1/arch/avr32/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
673@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677+#ifdef CONFIG_PAX_PAGEEXEC
678+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
679+{
680+ unsigned long i;
681+
682+ printk(KERN_ERR "PAX: bytes at PC: ");
683+ for (i = 0; i < 20; i++) {
684+ unsigned char c;
685+ if (get_user(c, (unsigned char *)pc+i))
686+ printk(KERN_CONT "???????? ");
687+ else
688+ printk(KERN_CONT "%02x ", c);
689+ }
690+ printk("\n");
691+}
692+#endif
693+
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697@@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701+
702+#ifdef CONFIG_PAX_PAGEEXEC
703+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706+ do_group_exit(SIGKILL);
707+ }
708+ }
709+#endif
710+
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714diff -urNp linux-3.1.1/arch/frv/include/asm/kmap_types.h linux-3.1.1/arch/frv/include/asm/kmap_types.h
715--- linux-3.1.1/arch/frv/include/asm/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
716+++ linux-3.1.1/arch/frv/include/asm/kmap_types.h 2011-11-16 18:39:07.000000000 -0500
717@@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721+ KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725diff -urNp linux-3.1.1/arch/frv/mm/elf-fdpic.c linux-3.1.1/arch/frv/mm/elf-fdpic.c
726--- linux-3.1.1/arch/frv/mm/elf-fdpic.c 2011-11-11 15:19:27.000000000 -0500
727+++ linux-3.1.1/arch/frv/mm/elf-fdpic.c 2011-11-16 18:39:07.000000000 -0500
728@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732- if (TASK_SIZE - len >= addr &&
733- (!vma || addr + len <= vma->vm_start))
734+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742- if (addr + len <= vma->vm_start)
743+ if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751- if (addr + len <= vma->vm_start)
752+ if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756diff -urNp linux-3.1.1/arch/ia64/include/asm/elf.h linux-3.1.1/arch/ia64/include/asm/elf.h
757--- linux-3.1.1/arch/ia64/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
758+++ linux-3.1.1/arch/ia64/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
759@@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763+#ifdef CONFIG_PAX_ASLR
764+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765+
766+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768+#endif
769+
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773diff -urNp linux-3.1.1/arch/ia64/include/asm/pgtable.h linux-3.1.1/arch/ia64/include/asm/pgtable.h
774--- linux-3.1.1/arch/ia64/include/asm/pgtable.h 2011-11-11 15:19:27.000000000 -0500
775+++ linux-3.1.1/arch/ia64/include/asm/pgtable.h 2011-11-16 18:39:07.000000000 -0500
776@@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780-
781+#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785@@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789+
790+#ifdef CONFIG_PAX_PAGEEXEC
791+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794+#else
795+# define PAGE_SHARED_NOEXEC PAGE_SHARED
796+# define PAGE_READONLY_NOEXEC PAGE_READONLY
797+# define PAGE_COPY_NOEXEC PAGE_COPY
798+#endif
799+
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803diff -urNp linux-3.1.1/arch/ia64/include/asm/spinlock.h linux-3.1.1/arch/ia64/include/asm/spinlock.h
804--- linux-3.1.1/arch/ia64/include/asm/spinlock.h 2011-11-11 15:19:27.000000000 -0500
805+++ linux-3.1.1/arch/ia64/include/asm/spinlock.h 2011-11-16 18:39:07.000000000 -0500
806@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815diff -urNp linux-3.1.1/arch/ia64/include/asm/uaccess.h linux-3.1.1/arch/ia64/include/asm/uaccess.h
816--- linux-3.1.1/arch/ia64/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
817+++ linux-3.1.1/arch/ia64/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
818@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822- if (__access_ok(__cu_to, __cu_len, get_fs())) \
823+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831- if (__access_ok(__cu_from, __cu_len, get_fs())) \
832+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836diff -urNp linux-3.1.1/arch/ia64/kernel/module.c linux-3.1.1/arch/ia64/kernel/module.c
837--- linux-3.1.1/arch/ia64/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
838+++ linux-3.1.1/arch/ia64/kernel/module.c 2011-11-16 18:39:07.000000000 -0500
839@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843- if (mod && mod->arch.init_unw_table &&
844- module_region == mod->module_init) {
845+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853+in_init_rx (const struct module *mod, uint64_t addr)
854+{
855+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856+}
857+
858+static inline int
859+in_init_rw (const struct module *mod, uint64_t addr)
860+{
861+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862+}
863+
864+static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867- return addr - (uint64_t) mod->module_init < mod->init_size;
868+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869+}
870+
871+static inline int
872+in_core_rx (const struct module *mod, uint64_t addr)
873+{
874+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875+}
876+
877+static inline int
878+in_core_rw (const struct module *mod, uint64_t addr)
879+{
880+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886- return addr - (uint64_t) mod->module_core < mod->core_size;
887+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896+ if (in_init_rx(mod, val))
897+ val -= (uint64_t) mod->module_init_rx;
898+ else if (in_init_rw(mod, val))
899+ val -= (uint64_t) mod->module_init_rw;
900+ else if (in_core_rx(mod, val))
901+ val -= (uint64_t) mod->module_core_rx;
902+ else if (in_core_rw(mod, val))
903+ val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911- if (mod->core_size > MAX_LTOFF)
912+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917- gp = mod->core_size - MAX_LTOFF / 2;
918+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920- gp = mod->core_size / 2;
921- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927diff -urNp linux-3.1.1/arch/ia64/kernel/sys_ia64.c linux-3.1.1/arch/ia64/kernel/sys_ia64.c
928--- linux-3.1.1/arch/ia64/kernel/sys_ia64.c 2011-11-11 15:19:27.000000000 -0500
929+++ linux-3.1.1/arch/ia64/kernel/sys_ia64.c 2011-11-16 18:39:07.000000000 -0500
930@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934+
935+#ifdef CONFIG_PAX_RANDMMAP
936+ if (mm->pax_flags & MF_PAX_RANDMMAP)
937+ addr = mm->free_area_cache;
938+ else
939+#endif
940+
941 if (!addr)
942 addr = mm->free_area_cache;
943
944@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948- if (start_addr != TASK_UNMAPPED_BASE) {
949+ if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951- addr = TASK_UNMAPPED_BASE;
952+ addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957- if (!vma || addr + len <= vma->vm_start) {
958+ if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962diff -urNp linux-3.1.1/arch/ia64/kernel/vmlinux.lds.S linux-3.1.1/arch/ia64/kernel/vmlinux.lds.S
963--- linux-3.1.1/arch/ia64/kernel/vmlinux.lds.S 2011-11-11 15:19:27.000000000 -0500
964+++ linux-3.1.1/arch/ia64/kernel/vmlinux.lds.S 2011-11-16 18:39:07.000000000 -0500
965@@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969- __phys_per_cpu_start = __per_cpu_load;
970+ __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974diff -urNp linux-3.1.1/arch/ia64/mm/fault.c linux-3.1.1/arch/ia64/mm/fault.c
975--- linux-3.1.1/arch/ia64/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
976+++ linux-3.1.1/arch/ia64/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
977@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981+#ifdef CONFIG_PAX_PAGEEXEC
982+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
983+{
984+ unsigned long i;
985+
986+ printk(KERN_ERR "PAX: bytes at PC: ");
987+ for (i = 0; i < 8; i++) {
988+ unsigned int c;
989+ if (get_user(c, (unsigned int *)pc+i))
990+ printk(KERN_CONT "???????? ");
991+ else
992+ printk(KERN_CONT "%08x ", c);
993+ }
994+ printk("\n");
995+}
996+#endif
997+
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005- if ((vma->vm_flags & mask) != mask)
1006+ if ((vma->vm_flags & mask) != mask) {
1007+
1008+#ifdef CONFIG_PAX_PAGEEXEC
1009+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011+ goto bad_area;
1012+
1013+ up_read(&mm->mmap_sem);
1014+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015+ do_group_exit(SIGKILL);
1016+ }
1017+#endif
1018+
1019 goto bad_area;
1020
1021+ }
1022+
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026diff -urNp linux-3.1.1/arch/ia64/mm/hugetlbpage.c linux-3.1.1/arch/ia64/mm/hugetlbpage.c
1027--- linux-3.1.1/arch/ia64/mm/hugetlbpage.c 2011-11-11 15:19:27.000000000 -0500
1028+++ linux-3.1.1/arch/ia64/mm/hugetlbpage.c 2011-11-16 18:39:07.000000000 -0500
1029@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033- if (!vmm || (addr + len) <= vmm->vm_start)
1034+ if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038diff -urNp linux-3.1.1/arch/ia64/mm/init.c linux-3.1.1/arch/ia64/mm/init.c
1039--- linux-3.1.1/arch/ia64/mm/init.c 2011-11-11 15:19:27.000000000 -0500
1040+++ linux-3.1.1/arch/ia64/mm/init.c 2011-11-16 18:39:07.000000000 -0500
1041@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045+
1046+#ifdef CONFIG_PAX_PAGEEXEC
1047+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048+ vma->vm_flags &= ~VM_EXEC;
1049+
1050+#ifdef CONFIG_PAX_MPROTECT
1051+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052+ vma->vm_flags &= ~VM_MAYEXEC;
1053+#endif
1054+
1055+ }
1056+#endif
1057+
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061diff -urNp linux-3.1.1/arch/m32r/lib/usercopy.c linux-3.1.1/arch/m32r/lib/usercopy.c
1062--- linux-3.1.1/arch/m32r/lib/usercopy.c 2011-11-11 15:19:27.000000000 -0500
1063+++ linux-3.1.1/arch/m32r/lib/usercopy.c 2011-11-16 18:39:07.000000000 -0500
1064@@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068+ if ((long)n < 0)
1069+ return n;
1070+
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078+ if ((long)n < 0)
1079+ return n;
1080+
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084diff -urNp linux-3.1.1/arch/mips/include/asm/elf.h linux-3.1.1/arch/mips/include/asm/elf.h
1085--- linux-3.1.1/arch/mips/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
1086+++ linux-3.1.1/arch/mips/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
1087@@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091+#ifdef CONFIG_PAX_ASLR
1092+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093+
1094+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096+#endif
1097+
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103-struct mm_struct;
1104-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105-#define arch_randomize_brk arch_randomize_brk
1106-
1107 #endif /* _ASM_ELF_H */
1108diff -urNp linux-3.1.1/arch/mips/include/asm/page.h linux-3.1.1/arch/mips/include/asm/page.h
1109--- linux-3.1.1/arch/mips/include/asm/page.h 2011-11-11 15:19:27.000000000 -0500
1110+++ linux-3.1.1/arch/mips/include/asm/page.h 2011-11-16 18:39:07.000000000 -0500
1111@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120diff -urNp linux-3.1.1/arch/mips/include/asm/system.h linux-3.1.1/arch/mips/include/asm/system.h
1121--- linux-3.1.1/arch/mips/include/asm/system.h 2011-11-11 15:19:27.000000000 -0500
1122+++ linux-3.1.1/arch/mips/include/asm/system.h 2011-11-16 18:39:07.000000000 -0500
1123@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127-extern unsigned long arch_align_stack(unsigned long sp);
1128+#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131diff -urNp linux-3.1.1/arch/mips/kernel/binfmt_elfn32.c linux-3.1.1/arch/mips/kernel/binfmt_elfn32.c
1132--- linux-3.1.1/arch/mips/kernel/binfmt_elfn32.c 2011-11-11 15:19:27.000000000 -0500
1133+++ linux-3.1.1/arch/mips/kernel/binfmt_elfn32.c 2011-11-16 18:39:07.000000000 -0500
1134@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138+#ifdef CONFIG_PAX_ASLR
1139+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140+
1141+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143+#endif
1144+
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148diff -urNp linux-3.1.1/arch/mips/kernel/binfmt_elfo32.c linux-3.1.1/arch/mips/kernel/binfmt_elfo32.c
1149--- linux-3.1.1/arch/mips/kernel/binfmt_elfo32.c 2011-11-11 15:19:27.000000000 -0500
1150+++ linux-3.1.1/arch/mips/kernel/binfmt_elfo32.c 2011-11-16 18:39:07.000000000 -0500
1151@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155+#ifdef CONFIG_PAX_ASLR
1156+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157+
1158+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160+#endif
1161+
1162 #include <asm/processor.h>
1163
1164 /*
1165diff -urNp linux-3.1.1/arch/mips/kernel/process.c linux-3.1.1/arch/mips/kernel/process.c
1166--- linux-3.1.1/arch/mips/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
1167+++ linux-3.1.1/arch/mips/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
1168@@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172-
1173-/*
1174- * Don't forget that the stack pointer must be aligned on a 8 bytes
1175- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176- */
1177-unsigned long arch_align_stack(unsigned long sp)
1178-{
1179- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180- sp -= get_random_int() & ~PAGE_MASK;
1181-
1182- return sp & ALMASK;
1183-}
1184diff -urNp linux-3.1.1/arch/mips/mm/fault.c linux-3.1.1/arch/mips/mm/fault.c
1185--- linux-3.1.1/arch/mips/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
1186+++ linux-3.1.1/arch/mips/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
1187@@ -28,6 +28,23 @@
1188 #include <asm/highmem.h> /* For VMALLOC_END */
1189 #include <linux/kdebug.h>
1190
1191+#ifdef CONFIG_PAX_PAGEEXEC
1192+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1193+{
1194+ unsigned long i;
1195+
1196+ printk(KERN_ERR "PAX: bytes at PC: ");
1197+ for (i = 0; i < 5; i++) {
1198+ unsigned int c;
1199+ if (get_user(c, (unsigned int *)pc+i))
1200+ printk(KERN_CONT "???????? ");
1201+ else
1202+ printk(KERN_CONT "%08x ", c);
1203+ }
1204+ printk("\n");
1205+}
1206+#endif
1207+
1208 /*
1209 * This routine handles page faults. It determines the address,
1210 * and the problem, and then passes it off to one of the appropriate
1211diff -urNp linux-3.1.1/arch/mips/mm/mmap.c linux-3.1.1/arch/mips/mm/mmap.c
1212--- linux-3.1.1/arch/mips/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
1213+++ linux-3.1.1/arch/mips/mm/mmap.c 2011-11-16 18:39:07.000000000 -0500
1214@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_a
1215 do_color_align = 1;
1216
1217 /* requesting a specific address */
1218+
1219+#ifdef CONFIG_PAX_RANDMMAP
1220+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1221+#endif
1222+
1223 if (addr) {
1224 if (do_color_align)
1225 addr = COLOUR_ALIGN(addr, pgoff);
1226@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_a
1227 addr = PAGE_ALIGN(addr);
1228
1229 vma = find_vma(mm, addr);
1230- if (TASK_SIZE - len >= addr &&
1231- (!vma || addr + len <= vma->vm_start))
1232+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1233 return addr;
1234 }
1235
1236@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_a
1237 /* At this point: (!vma || addr < vma->vm_end). */
1238 if (TASK_SIZE - len < addr)
1239 return -ENOMEM;
1240- if (!vma || addr + len <= vma->vm_start)
1241+ if (check_heap_stack_gap(vmm, addr, len))
1242 return addr;
1243 addr = vma->vm_end;
1244 if (do_color_align)
1245@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_a
1246 /* make sure it can fit in the remaining address space */
1247 if (likely(addr > len)) {
1248 vma = find_vma(mm, addr - len);
1249- if (!vma || addr <= vma->vm_start) {
1250+ if (check_heap_stack_gap(vmm, addr - len, len))
1251 /* cache the address as a hint for next time */
1252 return mm->free_area_cache = addr - len;
1253 }
1254@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_a
1255 * return with success:
1256 */
1257 vma = find_vma(mm, addr);
1258- if (likely(!vma || addr + len <= vma->vm_start)) {
1259+ if (check_heap_stack_gap(vmm, addr, len)) {
1260 /* cache the address as a hint for next time */
1261 return mm->free_area_cache = addr;
1262 }
1263@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_str
1264 mm->unmap_area = arch_unmap_area_topdown;
1265 }
1266 }
1267-
1268-static inline unsigned long brk_rnd(void)
1269-{
1270- unsigned long rnd = get_random_int();
1271-
1272- rnd = rnd << PAGE_SHIFT;
1273- /* 8MB for 32bit, 256MB for 64bit */
1274- if (TASK_IS_32BIT_ADDR)
1275- rnd = rnd & 0x7ffffful;
1276- else
1277- rnd = rnd & 0xffffffful;
1278-
1279- return rnd;
1280-}
1281-
1282-unsigned long arch_randomize_brk(struct mm_struct *mm)
1283-{
1284- unsigned long base = mm->brk;
1285- unsigned long ret;
1286-
1287- ret = PAGE_ALIGN(base + brk_rnd());
1288-
1289- if (ret < mm->brk)
1290- return mm->brk;
1291-
1292- return ret;
1293-}
1294diff -urNp linux-3.1.1/arch/parisc/include/asm/elf.h linux-3.1.1/arch/parisc/include/asm/elf.h
1295--- linux-3.1.1/arch/parisc/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
1296+++ linux-3.1.1/arch/parisc/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
1297@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1298
1299 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1300
1301+#ifdef CONFIG_PAX_ASLR
1302+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1303+
1304+#define PAX_DELTA_MMAP_LEN 16
1305+#define PAX_DELTA_STACK_LEN 16
1306+#endif
1307+
1308 /* This yields a mask that user programs can use to figure out what
1309 instruction set this CPU supports. This could be done in user space,
1310 but it's not easy, and we've already done it here. */
1311diff -urNp linux-3.1.1/arch/parisc/include/asm/pgtable.h linux-3.1.1/arch/parisc/include/asm/pgtable.h
1312--- linux-3.1.1/arch/parisc/include/asm/pgtable.h 2011-11-11 15:19:27.000000000 -0500
1313+++ linux-3.1.1/arch/parisc/include/asm/pgtable.h 2011-11-16 18:39:07.000000000 -0500
1314@@ -210,6 +210,17 @@ struct vm_area_struct;
1315 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1316 #define PAGE_COPY PAGE_EXECREAD
1317 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1318+
1319+#ifdef CONFIG_PAX_PAGEEXEC
1320+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1321+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1322+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1323+#else
1324+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1325+# define PAGE_COPY_NOEXEC PAGE_COPY
1326+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1327+#endif
1328+
1329 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1330 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1331 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1332diff -urNp linux-3.1.1/arch/parisc/kernel/module.c linux-3.1.1/arch/parisc/kernel/module.c
1333--- linux-3.1.1/arch/parisc/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
1334+++ linux-3.1.1/arch/parisc/kernel/module.c 2011-11-16 18:39:07.000000000 -0500
1335@@ -98,16 +98,38 @@
1336
1337 /* three functions to determine where in the module core
1338 * or init pieces the location is */
1339+static inline int in_init_rx(struct module *me, void *loc)
1340+{
1341+ return (loc >= me->module_init_rx &&
1342+ loc < (me->module_init_rx + me->init_size_rx));
1343+}
1344+
1345+static inline int in_init_rw(struct module *me, void *loc)
1346+{
1347+ return (loc >= me->module_init_rw &&
1348+ loc < (me->module_init_rw + me->init_size_rw));
1349+}
1350+
1351 static inline int in_init(struct module *me, void *loc)
1352 {
1353- return (loc >= me->module_init &&
1354- loc <= (me->module_init + me->init_size));
1355+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1356+}
1357+
1358+static inline int in_core_rx(struct module *me, void *loc)
1359+{
1360+ return (loc >= me->module_core_rx &&
1361+ loc < (me->module_core_rx + me->core_size_rx));
1362+}
1363+
1364+static inline int in_core_rw(struct module *me, void *loc)
1365+{
1366+ return (loc >= me->module_core_rw &&
1367+ loc < (me->module_core_rw + me->core_size_rw));
1368 }
1369
1370 static inline int in_core(struct module *me, void *loc)
1371 {
1372- return (loc >= me->module_core &&
1373- loc <= (me->module_core + me->core_size));
1374+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1375 }
1376
1377 static inline int in_local(struct module *me, void *loc)
1378@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_
1379 }
1380
1381 /* align things a bit */
1382- me->core_size = ALIGN(me->core_size, 16);
1383- me->arch.got_offset = me->core_size;
1384- me->core_size += gots * sizeof(struct got_entry);
1385-
1386- me->core_size = ALIGN(me->core_size, 16);
1387- me->arch.fdesc_offset = me->core_size;
1388- me->core_size += fdescs * sizeof(Elf_Fdesc);
1389+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1390+ me->arch.got_offset = me->core_size_rw;
1391+ me->core_size_rw += gots * sizeof(struct got_entry);
1392+
1393+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1394+ me->arch.fdesc_offset = me->core_size_rw;
1395+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1396
1397 me->arch.got_max = gots;
1398 me->arch.fdesc_max = fdescs;
1399@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module
1400
1401 BUG_ON(value == 0);
1402
1403- got = me->module_core + me->arch.got_offset;
1404+ got = me->module_core_rw + me->arch.got_offset;
1405 for (i = 0; got[i].addr; i++)
1406 if (got[i].addr == value)
1407 goto out;
1408@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module
1409 #ifdef CONFIG_64BIT
1410 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1411 {
1412- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1413+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1414
1415 if (!value) {
1416 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1417@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module
1418
1419 /* Create new one */
1420 fdesc->addr = value;
1421- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1422+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1423 return (Elf_Addr)fdesc;
1424 }
1425 #endif /* CONFIG_64BIT */
1426@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
1427
1428 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1429 end = table + sechdrs[me->arch.unwind_section].sh_size;
1430- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1431+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1432
1433 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1434 me->arch.unwind_section, table, end, gp);
1435diff -urNp linux-3.1.1/arch/parisc/kernel/sys_parisc.c linux-3.1.1/arch/parisc/kernel/sys_parisc.c
1436--- linux-3.1.1/arch/parisc/kernel/sys_parisc.c 2011-11-11 15:19:27.000000000 -0500
1437+++ linux-3.1.1/arch/parisc/kernel/sys_parisc.c 2011-11-16 18:39:07.000000000 -0500
1438@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1439 /* At this point: (!vma || addr < vma->vm_end). */
1440 if (TASK_SIZE - len < addr)
1441 return -ENOMEM;
1442- if (!vma || addr + len <= vma->vm_start)
1443+ if (check_heap_stack_gap(vma, addr, len))
1444 return addr;
1445 addr = vma->vm_end;
1446 }
1447@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1448 /* At this point: (!vma || addr < vma->vm_end). */
1449 if (TASK_SIZE - len < addr)
1450 return -ENOMEM;
1451- if (!vma || addr + len <= vma->vm_start)
1452+ if (check_heap_stack_gap(vma, addr, len))
1453 return addr;
1454 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1455 if (addr < vma->vm_end) /* handle wraparound */
1456@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1457 if (flags & MAP_FIXED)
1458 return addr;
1459 if (!addr)
1460- addr = TASK_UNMAPPED_BASE;
1461+ addr = current->mm->mmap_base;
1462
1463 if (filp) {
1464 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1465diff -urNp linux-3.1.1/arch/parisc/kernel/traps.c linux-3.1.1/arch/parisc/kernel/traps.c
1466--- linux-3.1.1/arch/parisc/kernel/traps.c 2011-11-11 15:19:27.000000000 -0500
1467+++ linux-3.1.1/arch/parisc/kernel/traps.c 2011-11-16 18:39:07.000000000 -0500
1468@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1469
1470 down_read(&current->mm->mmap_sem);
1471 vma = find_vma(current->mm,regs->iaoq[0]);
1472- if (vma && (regs->iaoq[0] >= vma->vm_start)
1473- && (vma->vm_flags & VM_EXEC)) {
1474-
1475+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1476 fault_address = regs->iaoq[0];
1477 fault_space = regs->iasq[0];
1478
1479diff -urNp linux-3.1.1/arch/parisc/mm/fault.c linux-3.1.1/arch/parisc/mm/fault.c
1480--- linux-3.1.1/arch/parisc/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
1481+++ linux-3.1.1/arch/parisc/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
1482@@ -15,6 +15,7 @@
1483 #include <linux/sched.h>
1484 #include <linux/interrupt.h>
1485 #include <linux/module.h>
1486+#include <linux/unistd.h>
1487
1488 #include <asm/uaccess.h>
1489 #include <asm/traps.h>
1490@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1491 static unsigned long
1492 parisc_acctyp(unsigned long code, unsigned int inst)
1493 {
1494- if (code == 6 || code == 16)
1495+ if (code == 6 || code == 7 || code == 16)
1496 return VM_EXEC;
1497
1498 switch (inst & 0xf0000000) {
1499@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1500 }
1501 #endif
1502
1503+#ifdef CONFIG_PAX_PAGEEXEC
1504+/*
1505+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1506+ *
1507+ * returns 1 when task should be killed
1508+ * 2 when rt_sigreturn trampoline was detected
1509+ * 3 when unpatched PLT trampoline was detected
1510+ */
1511+static int pax_handle_fetch_fault(struct pt_regs *regs)
1512+{
1513+
1514+#ifdef CONFIG_PAX_EMUPLT
1515+ int err;
1516+
1517+ do { /* PaX: unpatched PLT emulation */
1518+ unsigned int bl, depwi;
1519+
1520+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1521+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1522+
1523+ if (err)
1524+ break;
1525+
1526+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1527+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1528+
1529+ err = get_user(ldw, (unsigned int *)addr);
1530+ err |= get_user(bv, (unsigned int *)(addr+4));
1531+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1532+
1533+ if (err)
1534+ break;
1535+
1536+ if (ldw == 0x0E801096U &&
1537+ bv == 0xEAC0C000U &&
1538+ ldw2 == 0x0E881095U)
1539+ {
1540+ unsigned int resolver, map;
1541+
1542+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1543+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1544+ if (err)
1545+ break;
1546+
1547+ regs->gr[20] = instruction_pointer(regs)+8;
1548+ regs->gr[21] = map;
1549+ regs->gr[22] = resolver;
1550+ regs->iaoq[0] = resolver | 3UL;
1551+ regs->iaoq[1] = regs->iaoq[0] + 4;
1552+ return 3;
1553+ }
1554+ }
1555+ } while (0);
1556+#endif
1557+
1558+#ifdef CONFIG_PAX_EMUTRAMP
1559+
1560+#ifndef CONFIG_PAX_EMUSIGRT
1561+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1562+ return 1;
1563+#endif
1564+
1565+ do { /* PaX: rt_sigreturn emulation */
1566+ unsigned int ldi1, ldi2, bel, nop;
1567+
1568+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1569+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1570+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1571+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1572+
1573+ if (err)
1574+ break;
1575+
1576+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1577+ ldi2 == 0x3414015AU &&
1578+ bel == 0xE4008200U &&
1579+ nop == 0x08000240U)
1580+ {
1581+ regs->gr[25] = (ldi1 & 2) >> 1;
1582+ regs->gr[20] = __NR_rt_sigreturn;
1583+ regs->gr[31] = regs->iaoq[1] + 16;
1584+ regs->sr[0] = regs->iasq[1];
1585+ regs->iaoq[0] = 0x100UL;
1586+ regs->iaoq[1] = regs->iaoq[0] + 4;
1587+ regs->iasq[0] = regs->sr[2];
1588+ regs->iasq[1] = regs->sr[2];
1589+ return 2;
1590+ }
1591+ } while (0);
1592+#endif
1593+
1594+ return 1;
1595+}
1596+
1597+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1598+{
1599+ unsigned long i;
1600+
1601+ printk(KERN_ERR "PAX: bytes at PC: ");
1602+ for (i = 0; i < 5; i++) {
1603+ unsigned int c;
1604+ if (get_user(c, (unsigned int *)pc+i))
1605+ printk(KERN_CONT "???????? ");
1606+ else
1607+ printk(KERN_CONT "%08x ", c);
1608+ }
1609+ printk("\n");
1610+}
1611+#endif
1612+
1613 int fixup_exception(struct pt_regs *regs)
1614 {
1615 const struct exception_table_entry *fix;
1616@@ -192,8 +303,33 @@ good_area:
1617
1618 acc_type = parisc_acctyp(code,regs->iir);
1619
1620- if ((vma->vm_flags & acc_type) != acc_type)
1621+ if ((vma->vm_flags & acc_type) != acc_type) {
1622+
1623+#ifdef CONFIG_PAX_PAGEEXEC
1624+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1625+ (address & ~3UL) == instruction_pointer(regs))
1626+ {
1627+ up_read(&mm->mmap_sem);
1628+ switch (pax_handle_fetch_fault(regs)) {
1629+
1630+#ifdef CONFIG_PAX_EMUPLT
1631+ case 3:
1632+ return;
1633+#endif
1634+
1635+#ifdef CONFIG_PAX_EMUTRAMP
1636+ case 2:
1637+ return;
1638+#endif
1639+
1640+ }
1641+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1642+ do_group_exit(SIGKILL);
1643+ }
1644+#endif
1645+
1646 goto bad_area;
1647+ }
1648
1649 /*
1650 * If for any reason at all we couldn't handle the fault, make
1651diff -urNp linux-3.1.1/arch/powerpc/include/asm/elf.h linux-3.1.1/arch/powerpc/include/asm/elf.h
1652--- linux-3.1.1/arch/powerpc/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
1653+++ linux-3.1.1/arch/powerpc/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
1654@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1655 the loader. We need to make sure that it is out of the way of the program
1656 that it will "exec", and that there is sufficient room for the brk. */
1657
1658-extern unsigned long randomize_et_dyn(unsigned long base);
1659-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1660+#define ELF_ET_DYN_BASE (0x20000000)
1661+
1662+#ifdef CONFIG_PAX_ASLR
1663+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1664+
1665+#ifdef __powerpc64__
1666+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1667+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1668+#else
1669+#define PAX_DELTA_MMAP_LEN 15
1670+#define PAX_DELTA_STACK_LEN 15
1671+#endif
1672+#endif
1673
1674 /*
1675 * Our registers are always unsigned longs, whether we're a 32 bit
1676@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1677 (0x7ff >> (PAGE_SHIFT - 12)) : \
1678 (0x3ffff >> (PAGE_SHIFT - 12)))
1679
1680-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1681-#define arch_randomize_brk arch_randomize_brk
1682-
1683 #endif /* __KERNEL__ */
1684
1685 /*
1686diff -urNp linux-3.1.1/arch/powerpc/include/asm/kmap_types.h linux-3.1.1/arch/powerpc/include/asm/kmap_types.h
1687--- linux-3.1.1/arch/powerpc/include/asm/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
1688+++ linux-3.1.1/arch/powerpc/include/asm/kmap_types.h 2011-11-16 18:39:07.000000000 -0500
1689@@ -27,6 +27,7 @@ enum km_type {
1690 KM_PPC_SYNC_PAGE,
1691 KM_PPC_SYNC_ICACHE,
1692 KM_KDB,
1693+ KM_CLEARPAGE,
1694 KM_TYPE_NR
1695 };
1696
1697diff -urNp linux-3.1.1/arch/powerpc/include/asm/mman.h linux-3.1.1/arch/powerpc/include/asm/mman.h
1698--- linux-3.1.1/arch/powerpc/include/asm/mman.h 2011-11-11 15:19:27.000000000 -0500
1699+++ linux-3.1.1/arch/powerpc/include/asm/mman.h 2011-11-16 18:39:07.000000000 -0500
1700@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm
1701 }
1702 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
1703
1704-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
1705+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
1706 {
1707 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
1708 }
1709diff -urNp linux-3.1.1/arch/powerpc/include/asm/page_64.h linux-3.1.1/arch/powerpc/include/asm/page_64.h
1710--- linux-3.1.1/arch/powerpc/include/asm/page_64.h 2011-11-11 15:19:27.000000000 -0500
1711+++ linux-3.1.1/arch/powerpc/include/asm/page_64.h 2011-11-16 18:39:07.000000000 -0500
1712@@ -155,15 +155,18 @@ do { \
1713 * stack by default, so in the absence of a PT_GNU_STACK program header
1714 * we turn execute permission off.
1715 */
1716-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1717- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1718+#define VM_STACK_DEFAULT_FLAGS32 \
1719+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1720+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1721
1722 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1723 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1724
1725+#ifndef CONFIG_PAX_PAGEEXEC
1726 #define VM_STACK_DEFAULT_FLAGS \
1727 (is_32bit_task() ? \
1728 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1729+#endif
1730
1731 #include <asm-generic/getorder.h>
1732
1733diff -urNp linux-3.1.1/arch/powerpc/include/asm/page.h linux-3.1.1/arch/powerpc/include/asm/page.h
1734--- linux-3.1.1/arch/powerpc/include/asm/page.h 2011-11-11 15:19:27.000000000 -0500
1735+++ linux-3.1.1/arch/powerpc/include/asm/page.h 2011-11-16 18:39:07.000000000 -0500
1736@@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1737 * and needs to be executable. This means the whole heap ends
1738 * up being executable.
1739 */
1740-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1741- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1742+#define VM_DATA_DEFAULT_FLAGS32 \
1743+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1744+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1745
1746 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1747 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1748@@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1749 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1750 #endif
1751
1752+#define ktla_ktva(addr) (addr)
1753+#define ktva_ktla(addr) (addr)
1754+
1755 #ifndef __ASSEMBLY__
1756
1757 #undef STRICT_MM_TYPECHECKS
1758diff -urNp linux-3.1.1/arch/powerpc/include/asm/pgtable.h linux-3.1.1/arch/powerpc/include/asm/pgtable.h
1759--- linux-3.1.1/arch/powerpc/include/asm/pgtable.h 2011-11-11 15:19:27.000000000 -0500
1760+++ linux-3.1.1/arch/powerpc/include/asm/pgtable.h 2011-11-16 18:39:07.000000000 -0500
1761@@ -2,6 +2,7 @@
1762 #define _ASM_POWERPC_PGTABLE_H
1763 #ifdef __KERNEL__
1764
1765+#include <linux/const.h>
1766 #ifndef __ASSEMBLY__
1767 #include <asm/processor.h> /* For TASK_SIZE */
1768 #include <asm/mmu.h>
1769diff -urNp linux-3.1.1/arch/powerpc/include/asm/pte-hash32.h linux-3.1.1/arch/powerpc/include/asm/pte-hash32.h
1770--- linux-3.1.1/arch/powerpc/include/asm/pte-hash32.h 2011-11-11 15:19:27.000000000 -0500
1771+++ linux-3.1.1/arch/powerpc/include/asm/pte-hash32.h 2011-11-16 18:39:07.000000000 -0500
1772@@ -21,6 +21,7 @@
1773 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1774 #define _PAGE_USER 0x004 /* usermode access allowed */
1775 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1776+#define _PAGE_EXEC _PAGE_GUARDED
1777 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1778 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1779 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1780diff -urNp linux-3.1.1/arch/powerpc/include/asm/reg.h linux-3.1.1/arch/powerpc/include/asm/reg.h
1781--- linux-3.1.1/arch/powerpc/include/asm/reg.h 2011-11-11 15:19:27.000000000 -0500
1782+++ linux-3.1.1/arch/powerpc/include/asm/reg.h 2011-11-16 18:39:07.000000000 -0500
1783@@ -212,6 +212,7 @@
1784 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1785 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1786 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1787+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1788 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1789 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1790 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1791diff -urNp linux-3.1.1/arch/powerpc/include/asm/system.h linux-3.1.1/arch/powerpc/include/asm/system.h
1792--- linux-3.1.1/arch/powerpc/include/asm/system.h 2011-11-11 15:19:27.000000000 -0500
1793+++ linux-3.1.1/arch/powerpc/include/asm/system.h 2011-11-16 18:39:07.000000000 -0500
1794@@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1795 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1796 #endif
1797
1798-extern unsigned long arch_align_stack(unsigned long sp);
1799+#define arch_align_stack(x) ((x) & ~0xfUL)
1800
1801 /* Used in very early kernel initialization. */
1802 extern unsigned long reloc_offset(void);
1803diff -urNp linux-3.1.1/arch/powerpc/include/asm/uaccess.h linux-3.1.1/arch/powerpc/include/asm/uaccess.h
1804--- linux-3.1.1/arch/powerpc/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
1805+++ linux-3.1.1/arch/powerpc/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
1806@@ -13,6 +13,8 @@
1807 #define VERIFY_READ 0
1808 #define VERIFY_WRITE 1
1809
1810+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1811+
1812 /*
1813 * The fs value determines whether argument validity checking should be
1814 * performed or not. If get_fs() == USER_DS, checking is performed, with
1815@@ -327,52 +329,6 @@ do { \
1816 extern unsigned long __copy_tofrom_user(void __user *to,
1817 const void __user *from, unsigned long size);
1818
1819-#ifndef __powerpc64__
1820-
1821-static inline unsigned long copy_from_user(void *to,
1822- const void __user *from, unsigned long n)
1823-{
1824- unsigned long over;
1825-
1826- if (access_ok(VERIFY_READ, from, n))
1827- return __copy_tofrom_user((__force void __user *)to, from, n);
1828- if ((unsigned long)from < TASK_SIZE) {
1829- over = (unsigned long)from + n - TASK_SIZE;
1830- return __copy_tofrom_user((__force void __user *)to, from,
1831- n - over) + over;
1832- }
1833- return n;
1834-}
1835-
1836-static inline unsigned long copy_to_user(void __user *to,
1837- const void *from, unsigned long n)
1838-{
1839- unsigned long over;
1840-
1841- if (access_ok(VERIFY_WRITE, to, n))
1842- return __copy_tofrom_user(to, (__force void __user *)from, n);
1843- if ((unsigned long)to < TASK_SIZE) {
1844- over = (unsigned long)to + n - TASK_SIZE;
1845- return __copy_tofrom_user(to, (__force void __user *)from,
1846- n - over) + over;
1847- }
1848- return n;
1849-}
1850-
1851-#else /* __powerpc64__ */
1852-
1853-#define __copy_in_user(to, from, size) \
1854- __copy_tofrom_user((to), (from), (size))
1855-
1856-extern unsigned long copy_from_user(void *to, const void __user *from,
1857- unsigned long n);
1858-extern unsigned long copy_to_user(void __user *to, const void *from,
1859- unsigned long n);
1860-extern unsigned long copy_in_user(void __user *to, const void __user *from,
1861- unsigned long n);
1862-
1863-#endif /* __powerpc64__ */
1864-
1865 static inline unsigned long __copy_from_user_inatomic(void *to,
1866 const void __user *from, unsigned long n)
1867 {
1868@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1869 if (ret == 0)
1870 return 0;
1871 }
1872+
1873+ if (!__builtin_constant_p(n))
1874+ check_object_size(to, n, false);
1875+
1876 return __copy_tofrom_user((__force void __user *)to, from, n);
1877 }
1878
1879@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1880 if (ret == 0)
1881 return 0;
1882 }
1883+
1884+ if (!__builtin_constant_p(n))
1885+ check_object_size(from, n, true);
1886+
1887 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1888 }
1889
1890@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1891 return __copy_to_user_inatomic(to, from, size);
1892 }
1893
1894+#ifndef __powerpc64__
1895+
1896+static inline unsigned long __must_check copy_from_user(void *to,
1897+ const void __user *from, unsigned long n)
1898+{
1899+ unsigned long over;
1900+
1901+ if ((long)n < 0)
1902+ return n;
1903+
1904+ if (access_ok(VERIFY_READ, from, n)) {
1905+ if (!__builtin_constant_p(n))
1906+ check_object_size(to, n, false);
1907+ return __copy_tofrom_user((__force void __user *)to, from, n);
1908+ }
1909+ if ((unsigned long)from < TASK_SIZE) {
1910+ over = (unsigned long)from + n - TASK_SIZE;
1911+ if (!__builtin_constant_p(n - over))
1912+ check_object_size(to, n - over, false);
1913+ return __copy_tofrom_user((__force void __user *)to, from,
1914+ n - over) + over;
1915+ }
1916+ return n;
1917+}
1918+
1919+static inline unsigned long __must_check copy_to_user(void __user *to,
1920+ const void *from, unsigned long n)
1921+{
1922+ unsigned long over;
1923+
1924+ if ((long)n < 0)
1925+ return n;
1926+
1927+ if (access_ok(VERIFY_WRITE, to, n)) {
1928+ if (!__builtin_constant_p(n))
1929+ check_object_size(from, n, true);
1930+ return __copy_tofrom_user(to, (__force void __user *)from, n);
1931+ }
1932+ if ((unsigned long)to < TASK_SIZE) {
1933+ over = (unsigned long)to + n - TASK_SIZE;
1934+ if (!__builtin_constant_p(n))
1935+ check_object_size(from, n - over, true);
1936+ return __copy_tofrom_user(to, (__force void __user *)from,
1937+ n - over) + over;
1938+ }
1939+ return n;
1940+}
1941+
1942+#else /* __powerpc64__ */
1943+
1944+#define __copy_in_user(to, from, size) \
1945+ __copy_tofrom_user((to), (from), (size))
1946+
1947+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1948+{
1949+ if ((long)n < 0 || n > INT_MAX)
1950+ return n;
1951+
1952+ if (!__builtin_constant_p(n))
1953+ check_object_size(to, n, false);
1954+
1955+ if (likely(access_ok(VERIFY_READ, from, n)))
1956+ n = __copy_from_user(to, from, n);
1957+ else
1958+ memset(to, 0, n);
1959+ return n;
1960+}
1961+
1962+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1963+{
1964+ if ((long)n < 0 || n > INT_MAX)
1965+ return n;
1966+
1967+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
1968+ if (!__builtin_constant_p(n))
1969+ check_object_size(from, n, true);
1970+ n = __copy_to_user(to, from, n);
1971+ }
1972+ return n;
1973+}
1974+
1975+extern unsigned long copy_in_user(void __user *to, const void __user *from,
1976+ unsigned long n);
1977+
1978+#endif /* __powerpc64__ */
1979+
1980 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1981
1982 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1983diff -urNp linux-3.1.1/arch/powerpc/kernel/exceptions-64e.S linux-3.1.1/arch/powerpc/kernel/exceptions-64e.S
1984--- linux-3.1.1/arch/powerpc/kernel/exceptions-64e.S 2011-11-11 15:19:27.000000000 -0500
1985+++ linux-3.1.1/arch/powerpc/kernel/exceptions-64e.S 2011-11-16 18:39:07.000000000 -0500
1986@@ -587,6 +587,7 @@ storage_fault_common:
1987 std r14,_DAR(r1)
1988 std r15,_DSISR(r1)
1989 addi r3,r1,STACK_FRAME_OVERHEAD
1990+ bl .save_nvgprs
1991 mr r4,r14
1992 mr r5,r15
1993 ld r14,PACA_EXGEN+EX_R14(r13)
1994@@ -596,8 +597,7 @@ storage_fault_common:
1995 cmpdi r3,0
1996 bne- 1f
1997 b .ret_from_except_lite
1998-1: bl .save_nvgprs
1999- mr r5,r3
2000+1: mr r5,r3
2001 addi r3,r1,STACK_FRAME_OVERHEAD
2002 ld r4,_DAR(r1)
2003 bl .bad_page_fault
2004diff -urNp linux-3.1.1/arch/powerpc/kernel/exceptions-64s.S linux-3.1.1/arch/powerpc/kernel/exceptions-64s.S
2005--- linux-3.1.1/arch/powerpc/kernel/exceptions-64s.S 2011-11-11 15:19:27.000000000 -0500
2006+++ linux-3.1.1/arch/powerpc/kernel/exceptions-64s.S 2011-11-16 18:39:07.000000000 -0500
2007@@ -1014,10 +1014,10 @@ handle_page_fault:
2008 11: ld r4,_DAR(r1)
2009 ld r5,_DSISR(r1)
2010 addi r3,r1,STACK_FRAME_OVERHEAD
2011+ bl .save_nvgprs
2012 bl .do_page_fault
2013 cmpdi r3,0
2014 beq+ 13f
2015- bl .save_nvgprs
2016 mr r5,r3
2017 addi r3,r1,STACK_FRAME_OVERHEAD
2018 lwz r4,_DAR(r1)
2019diff -urNp linux-3.1.1/arch/powerpc/kernel/module_32.c linux-3.1.1/arch/powerpc/kernel/module_32.c
2020--- linux-3.1.1/arch/powerpc/kernel/module_32.c 2011-11-11 15:19:27.000000000 -0500
2021+++ linux-3.1.1/arch/powerpc/kernel/module_32.c 2011-11-16 18:39:07.000000000 -0500
2022@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2023 me->arch.core_plt_section = i;
2024 }
2025 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2026- printk("Module doesn't contain .plt or .init.plt sections.\n");
2027+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2028 return -ENOEXEC;
2029 }
2030
2031@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *locati
2032
2033 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2034 /* Init, or core PLT? */
2035- if (location >= mod->module_core
2036- && location < mod->module_core + mod->core_size)
2037+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2038+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2039 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2040- else
2041+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2042+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2043 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2044+ else {
2045+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2046+ return ~0UL;
2047+ }
2048
2049 /* Find this entry, or if that fails, the next avail. entry */
2050 while (entry->jump[0]) {
2051diff -urNp linux-3.1.1/arch/powerpc/kernel/process.c linux-3.1.1/arch/powerpc/kernel/process.c
2052--- linux-3.1.1/arch/powerpc/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
2053+++ linux-3.1.1/arch/powerpc/kernel/process.c 2011-11-16 18:40:08.000000000 -0500
2054@@ -682,8 +682,8 @@ void show_regs(struct pt_regs * regs)
2055 * Lookup NIP late so we have the best change of getting the
2056 * above info out without failing
2057 */
2058- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2059- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2060+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2061+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2062 #endif
2063 show_stack(current, (unsigned long *) regs->gpr[1]);
2064 if (!user_mode(regs))
2065@@ -1187,10 +1187,10 @@ void show_stack(struct task_struct *tsk,
2066 newsp = stack[0];
2067 ip = stack[STACK_FRAME_LR_SAVE];
2068 if (!firstframe || ip != lr) {
2069- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2070+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2071 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2072 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2073- printk(" (%pS)",
2074+ printk(" (%pA)",
2075 (void *)current->ret_stack[curr_frame].ret);
2076 curr_frame--;
2077 }
2078@@ -1210,7 +1210,7 @@ void show_stack(struct task_struct *tsk,
2079 struct pt_regs *regs = (struct pt_regs *)
2080 (sp + STACK_FRAME_OVERHEAD);
2081 lr = regs->link;
2082- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2083+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2084 regs->trap, (void *)regs->nip, (void *)lr);
2085 firstframe = 1;
2086 }
2087@@ -1285,58 +1285,3 @@ void thread_info_cache_init(void)
2088 }
2089
2090 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2091-
2092-unsigned long arch_align_stack(unsigned long sp)
2093-{
2094- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2095- sp -= get_random_int() & ~PAGE_MASK;
2096- return sp & ~0xf;
2097-}
2098-
2099-static inline unsigned long brk_rnd(void)
2100-{
2101- unsigned long rnd = 0;
2102-
2103- /* 8MB for 32bit, 1GB for 64bit */
2104- if (is_32bit_task())
2105- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2106- else
2107- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2108-
2109- return rnd << PAGE_SHIFT;
2110-}
2111-
2112-unsigned long arch_randomize_brk(struct mm_struct *mm)
2113-{
2114- unsigned long base = mm->brk;
2115- unsigned long ret;
2116-
2117-#ifdef CONFIG_PPC_STD_MMU_64
2118- /*
2119- * If we are using 1TB segments and we are allowed to randomise
2120- * the heap, we can put it above 1TB so it is backed by a 1TB
2121- * segment. Otherwise the heap will be in the bottom 1TB
2122- * which always uses 256MB segments and this may result in a
2123- * performance penalty.
2124- */
2125- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2126- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2127-#endif
2128-
2129- ret = PAGE_ALIGN(base + brk_rnd());
2130-
2131- if (ret < mm->brk)
2132- return mm->brk;
2133-
2134- return ret;
2135-}
2136-
2137-unsigned long randomize_et_dyn(unsigned long base)
2138-{
2139- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2140-
2141- if (ret < base)
2142- return base;
2143-
2144- return ret;
2145-}
2146diff -urNp linux-3.1.1/arch/powerpc/kernel/signal_32.c linux-3.1.1/arch/powerpc/kernel/signal_32.c
2147--- linux-3.1.1/arch/powerpc/kernel/signal_32.c 2011-11-11 15:19:27.000000000 -0500
2148+++ linux-3.1.1/arch/powerpc/kernel/signal_32.c 2011-11-16 18:39:07.000000000 -0500
2149@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig
2150 /* Save user registers on the stack */
2151 frame = &rt_sf->uc.uc_mcontext;
2152 addr = frame;
2153- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2154+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2155 if (save_user_regs(regs, frame, 0, 1))
2156 goto badframe;
2157 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2158diff -urNp linux-3.1.1/arch/powerpc/kernel/signal_64.c linux-3.1.1/arch/powerpc/kernel/signal_64.c
2159--- linux-3.1.1/arch/powerpc/kernel/signal_64.c 2011-11-11 15:19:27.000000000 -0500
2160+++ linux-3.1.1/arch/powerpc/kernel/signal_64.c 2011-11-16 18:39:07.000000000 -0500
2161@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct
2162 current->thread.fpscr.val = 0;
2163
2164 /* Set up to return from userspace. */
2165- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2166+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2167 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2168 } else {
2169 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2170diff -urNp linux-3.1.1/arch/powerpc/kernel/traps.c linux-3.1.1/arch/powerpc/kernel/traps.c
2171--- linux-3.1.1/arch/powerpc/kernel/traps.c 2011-11-11 15:19:27.000000000 -0500
2172+++ linux-3.1.1/arch/powerpc/kernel/traps.c 2011-11-16 18:40:08.000000000 -0500
2173@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2174 static inline void pmac_backlight_unblank(void) { }
2175 #endif
2176
2177+extern void gr_handle_kernel_exploit(void);
2178+
2179 int die(const char *str, struct pt_regs *regs, long err)
2180 {
2181 static struct {
2182@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs
2183 if (panic_on_oops)
2184 panic("Fatal exception");
2185
2186+ gr_handle_kernel_exploit();
2187+
2188 oops_exit();
2189 do_exit(err);
2190
2191diff -urNp linux-3.1.1/arch/powerpc/kernel/vdso.c linux-3.1.1/arch/powerpc/kernel/vdso.c
2192--- linux-3.1.1/arch/powerpc/kernel/vdso.c 2011-11-11 15:19:27.000000000 -0500
2193+++ linux-3.1.1/arch/powerpc/kernel/vdso.c 2011-11-16 18:39:07.000000000 -0500
2194@@ -36,6 +36,7 @@
2195 #include <asm/firmware.h>
2196 #include <asm/vdso.h>
2197 #include <asm/vdso_datapage.h>
2198+#include <asm/mman.h>
2199
2200 #include "setup.h"
2201
2202@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2203 vdso_base = VDSO32_MBASE;
2204 #endif
2205
2206- current->mm->context.vdso_base = 0;
2207+ current->mm->context.vdso_base = ~0UL;
2208
2209 /* vDSO has a problem and was disabled, just don't "enable" it for the
2210 * process
2211@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2212 vdso_base = get_unmapped_area(NULL, vdso_base,
2213 (vdso_pages << PAGE_SHIFT) +
2214 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2215- 0, 0);
2216+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2217 if (IS_ERR_VALUE(vdso_base)) {
2218 rc = vdso_base;
2219 goto fail_mmapsem;
2220diff -urNp linux-3.1.1/arch/powerpc/lib/usercopy_64.c linux-3.1.1/arch/powerpc/lib/usercopy_64.c
2221--- linux-3.1.1/arch/powerpc/lib/usercopy_64.c 2011-11-11 15:19:27.000000000 -0500
2222+++ linux-3.1.1/arch/powerpc/lib/usercopy_64.c 2011-11-16 18:39:07.000000000 -0500
2223@@ -9,22 +9,6 @@
2224 #include <linux/module.h>
2225 #include <asm/uaccess.h>
2226
2227-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2228-{
2229- if (likely(access_ok(VERIFY_READ, from, n)))
2230- n = __copy_from_user(to, from, n);
2231- else
2232- memset(to, 0, n);
2233- return n;
2234-}
2235-
2236-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2237-{
2238- if (likely(access_ok(VERIFY_WRITE, to, n)))
2239- n = __copy_to_user(to, from, n);
2240- return n;
2241-}
2242-
2243 unsigned long copy_in_user(void __user *to, const void __user *from,
2244 unsigned long n)
2245 {
2246@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2247 return n;
2248 }
2249
2250-EXPORT_SYMBOL(copy_from_user);
2251-EXPORT_SYMBOL(copy_to_user);
2252 EXPORT_SYMBOL(copy_in_user);
2253
2254diff -urNp linux-3.1.1/arch/powerpc/mm/fault.c linux-3.1.1/arch/powerpc/mm/fault.c
2255--- linux-3.1.1/arch/powerpc/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
2256+++ linux-3.1.1/arch/powerpc/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
2257@@ -32,6 +32,10 @@
2258 #include <linux/perf_event.h>
2259 #include <linux/magic.h>
2260 #include <linux/ratelimit.h>
2261+#include <linux/slab.h>
2262+#include <linux/pagemap.h>
2263+#include <linux/compiler.h>
2264+#include <linux/unistd.h>
2265
2266 #include <asm/firmware.h>
2267 #include <asm/page.h>
2268@@ -43,6 +47,7 @@
2269 #include <asm/tlbflush.h>
2270 #include <asm/siginfo.h>
2271 #include <mm/mmu_decl.h>
2272+#include <asm/ptrace.h>
2273
2274 #ifdef CONFIG_KPROBES
2275 static inline int notify_page_fault(struct pt_regs *regs)
2276@@ -66,6 +71,33 @@ static inline int notify_page_fault(stru
2277 }
2278 #endif
2279
2280+#ifdef CONFIG_PAX_PAGEEXEC
2281+/*
2282+ * PaX: decide what to do with offenders (regs->nip = fault address)
2283+ *
2284+ * returns 1 when task should be killed
2285+ */
2286+static int pax_handle_fetch_fault(struct pt_regs *regs)
2287+{
2288+ return 1;
2289+}
2290+
2291+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2292+{
2293+ unsigned long i;
2294+
2295+ printk(KERN_ERR "PAX: bytes at PC: ");
2296+ for (i = 0; i < 5; i++) {
2297+ unsigned int c;
2298+ if (get_user(c, (unsigned int __user *)pc+i))
2299+ printk(KERN_CONT "???????? ");
2300+ else
2301+ printk(KERN_CONT "%08x ", c);
2302+ }
2303+ printk("\n");
2304+}
2305+#endif
2306+
2307 /*
2308 * Check whether the instruction at regs->nip is a store using
2309 * an update addressing form which will update r1.
2310@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_re
2311 * indicate errors in DSISR but can validly be set in SRR1.
2312 */
2313 if (trap == 0x400)
2314- error_code &= 0x48200000;
2315+ error_code &= 0x58200000;
2316 else
2317 is_write = error_code & DSISR_ISSTORE;
2318 #else
2319@@ -259,7 +291,7 @@ good_area:
2320 * "undefined". Of those that can be set, this is the only
2321 * one which seems bad.
2322 */
2323- if (error_code & 0x10000000)
2324+ if (error_code & DSISR_GUARDED)
2325 /* Guarded storage error. */
2326 goto bad_area;
2327 #endif /* CONFIG_8xx */
2328@@ -274,7 +306,7 @@ good_area:
2329 * processors use the same I/D cache coherency mechanism
2330 * as embedded.
2331 */
2332- if (error_code & DSISR_PROTFAULT)
2333+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2334 goto bad_area;
2335 #endif /* CONFIG_PPC_STD_MMU */
2336
2337@@ -343,6 +375,23 @@ bad_area:
2338 bad_area_nosemaphore:
2339 /* User mode accesses cause a SIGSEGV */
2340 if (user_mode(regs)) {
2341+
2342+#ifdef CONFIG_PAX_PAGEEXEC
2343+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2344+#ifdef CONFIG_PPC_STD_MMU
2345+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2346+#else
2347+ if (is_exec && regs->nip == address) {
2348+#endif
2349+ switch (pax_handle_fetch_fault(regs)) {
2350+ }
2351+
2352+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2353+ do_group_exit(SIGKILL);
2354+ }
2355+ }
2356+#endif
2357+
2358 _exception(SIGSEGV, regs, code, address);
2359 return 0;
2360 }
2361diff -urNp linux-3.1.1/arch/powerpc/mm/mmap_64.c linux-3.1.1/arch/powerpc/mm/mmap_64.c
2362--- linux-3.1.1/arch/powerpc/mm/mmap_64.c 2011-11-11 15:19:27.000000000 -0500
2363+++ linux-3.1.1/arch/powerpc/mm/mmap_64.c 2011-11-16 18:39:07.000000000 -0500
2364@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2365 */
2366 if (mmap_is_legacy()) {
2367 mm->mmap_base = TASK_UNMAPPED_BASE;
2368+
2369+#ifdef CONFIG_PAX_RANDMMAP
2370+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2371+ mm->mmap_base += mm->delta_mmap;
2372+#endif
2373+
2374 mm->get_unmapped_area = arch_get_unmapped_area;
2375 mm->unmap_area = arch_unmap_area;
2376 } else {
2377 mm->mmap_base = mmap_base();
2378+
2379+#ifdef CONFIG_PAX_RANDMMAP
2380+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2381+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2382+#endif
2383+
2384 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2385 mm->unmap_area = arch_unmap_area_topdown;
2386 }
2387diff -urNp linux-3.1.1/arch/powerpc/mm/slice.c linux-3.1.1/arch/powerpc/mm/slice.c
2388--- linux-3.1.1/arch/powerpc/mm/slice.c 2011-11-11 15:19:27.000000000 -0500
2389+++ linux-3.1.1/arch/powerpc/mm/slice.c 2011-11-16 18:39:07.000000000 -0500
2390@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2391 if ((mm->task_size - len) < addr)
2392 return 0;
2393 vma = find_vma(mm, addr);
2394- return (!vma || (addr + len) <= vma->vm_start);
2395+ return check_heap_stack_gap(vma, addr, len);
2396 }
2397
2398 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2399@@ -256,7 +256,7 @@ full_search:
2400 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2401 continue;
2402 }
2403- if (!vma || addr + len <= vma->vm_start) {
2404+ if (check_heap_stack_gap(vma, addr, len)) {
2405 /*
2406 * Remember the place where we stopped the search:
2407 */
2408@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2409 }
2410 }
2411
2412- addr = mm->mmap_base;
2413- while (addr > len) {
2414+ if (mm->mmap_base < len)
2415+ addr = -ENOMEM;
2416+ else
2417+ addr = mm->mmap_base - len;
2418+
2419+ while (!IS_ERR_VALUE(addr)) {
2420 /* Go down by chunk size */
2421- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2422+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
2423
2424 /* Check for hit with different page size */
2425 mask = slice_range_to_mask(addr, len);
2426@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2427 * return with success:
2428 */
2429 vma = find_vma(mm, addr);
2430- if (!vma || (addr + len) <= vma->vm_start) {
2431+ if (check_heap_stack_gap(vma, addr, len)) {
2432 /* remember the address as a hint for next time */
2433 if (use_cache)
2434 mm->free_area_cache = addr;
2435@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2436 mm->cached_hole_size = vma->vm_start - addr;
2437
2438 /* try just below the current vma->vm_start */
2439- addr = vma->vm_start;
2440+ addr = skip_heap_stack_gap(vma, len);
2441 }
2442
2443 /*
2444@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2445 if (fixed && addr > (mm->task_size - len))
2446 return -EINVAL;
2447
2448+#ifdef CONFIG_PAX_RANDMMAP
2449+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2450+ addr = 0;
2451+#endif
2452+
2453 /* If hint, make sure it matches our alignment restrictions */
2454 if (!fixed && addr) {
2455 addr = _ALIGN_UP(addr, 1ul << pshift);
2456diff -urNp linux-3.1.1/arch/s390/include/asm/elf.h linux-3.1.1/arch/s390/include/asm/elf.h
2457--- linux-3.1.1/arch/s390/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
2458+++ linux-3.1.1/arch/s390/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
2459@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2460 the loader. We need to make sure that it is out of the way of the program
2461 that it will "exec", and that there is sufficient room for the brk. */
2462
2463-extern unsigned long randomize_et_dyn(unsigned long base);
2464-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2465+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2466+
2467+#ifdef CONFIG_PAX_ASLR
2468+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2469+
2470+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2471+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2472+#endif
2473
2474 /* This yields a mask that user programs can use to figure out what
2475 instruction set this CPU supports. */
2476@@ -211,7 +217,4 @@ struct linux_binprm;
2477 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2478 int arch_setup_additional_pages(struct linux_binprm *, int);
2479
2480-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2481-#define arch_randomize_brk arch_randomize_brk
2482-
2483 #endif
2484diff -urNp linux-3.1.1/arch/s390/include/asm/system.h linux-3.1.1/arch/s390/include/asm/system.h
2485--- linux-3.1.1/arch/s390/include/asm/system.h 2011-11-11 15:19:27.000000000 -0500
2486+++ linux-3.1.1/arch/s390/include/asm/system.h 2011-11-16 18:39:07.000000000 -0500
2487@@ -256,7 +256,7 @@ extern void (*_machine_restart)(char *co
2488 extern void (*_machine_halt)(void);
2489 extern void (*_machine_power_off)(void);
2490
2491-extern unsigned long arch_align_stack(unsigned long sp);
2492+#define arch_align_stack(x) ((x) & ~0xfUL)
2493
2494 static inline int tprot(unsigned long addr)
2495 {
2496diff -urNp linux-3.1.1/arch/s390/include/asm/uaccess.h linux-3.1.1/arch/s390/include/asm/uaccess.h
2497--- linux-3.1.1/arch/s390/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
2498+++ linux-3.1.1/arch/s390/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
2499@@ -235,6 +235,10 @@ static inline unsigned long __must_check
2500 copy_to_user(void __user *to, const void *from, unsigned long n)
2501 {
2502 might_fault();
2503+
2504+ if ((long)n < 0)
2505+ return n;
2506+
2507 if (access_ok(VERIFY_WRITE, to, n))
2508 n = __copy_to_user(to, from, n);
2509 return n;
2510@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void
2511 static inline unsigned long __must_check
2512 __copy_from_user(void *to, const void __user *from, unsigned long n)
2513 {
2514+ if ((long)n < 0)
2515+ return n;
2516+
2517 if (__builtin_constant_p(n) && (n <= 256))
2518 return uaccess.copy_from_user_small(n, from, to);
2519 else
2520@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __us
2521 unsigned int sz = __compiletime_object_size(to);
2522
2523 might_fault();
2524+
2525+ if ((long)n < 0)
2526+ return n;
2527+
2528 if (unlikely(sz != -1 && sz < n)) {
2529 copy_from_user_overflow();
2530 return n;
2531diff -urNp linux-3.1.1/arch/s390/kernel/module.c linux-3.1.1/arch/s390/kernel/module.c
2532--- linux-3.1.1/arch/s390/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
2533+++ linux-3.1.1/arch/s390/kernel/module.c 2011-11-16 18:39:07.000000000 -0500
2534@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2535
2536 /* Increase core size by size of got & plt and set start
2537 offsets for got and plt. */
2538- me->core_size = ALIGN(me->core_size, 4);
2539- me->arch.got_offset = me->core_size;
2540- me->core_size += me->arch.got_size;
2541- me->arch.plt_offset = me->core_size;
2542- me->core_size += me->arch.plt_size;
2543+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
2544+ me->arch.got_offset = me->core_size_rw;
2545+ me->core_size_rw += me->arch.got_size;
2546+ me->arch.plt_offset = me->core_size_rx;
2547+ me->core_size_rx += me->arch.plt_size;
2548 return 0;
2549 }
2550
2551@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2552 if (info->got_initialized == 0) {
2553 Elf_Addr *gotent;
2554
2555- gotent = me->module_core + me->arch.got_offset +
2556+ gotent = me->module_core_rw + me->arch.got_offset +
2557 info->got_offset;
2558 *gotent = val;
2559 info->got_initialized = 1;
2560@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2561 else if (r_type == R_390_GOTENT ||
2562 r_type == R_390_GOTPLTENT)
2563 *(unsigned int *) loc =
2564- (val + (Elf_Addr) me->module_core - loc) >> 1;
2565+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2566 else if (r_type == R_390_GOT64 ||
2567 r_type == R_390_GOTPLT64)
2568 *(unsigned long *) loc = val;
2569@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2570 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2571 if (info->plt_initialized == 0) {
2572 unsigned int *ip;
2573- ip = me->module_core + me->arch.plt_offset +
2574+ ip = me->module_core_rx + me->arch.plt_offset +
2575 info->plt_offset;
2576 #ifndef CONFIG_64BIT
2577 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2578@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2579 val - loc + 0xffffUL < 0x1ffffeUL) ||
2580 (r_type == R_390_PLT32DBL &&
2581 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2582- val = (Elf_Addr) me->module_core +
2583+ val = (Elf_Addr) me->module_core_rx +
2584 me->arch.plt_offset +
2585 info->plt_offset;
2586 val += rela->r_addend - loc;
2587@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2588 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2589 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2590 val = val + rela->r_addend -
2591- ((Elf_Addr) me->module_core + me->arch.got_offset);
2592+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2593 if (r_type == R_390_GOTOFF16)
2594 *(unsigned short *) loc = val;
2595 else if (r_type == R_390_GOTOFF32)
2596@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2597 break;
2598 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2599 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2600- val = (Elf_Addr) me->module_core + me->arch.got_offset +
2601+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2602 rela->r_addend - loc;
2603 if (r_type == R_390_GOTPC)
2604 *(unsigned int *) loc = val;
2605diff -urNp linux-3.1.1/arch/s390/kernel/process.c linux-3.1.1/arch/s390/kernel/process.c
2606--- linux-3.1.1/arch/s390/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
2607+++ linux-3.1.1/arch/s390/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
2608@@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_stru
2609 }
2610 return 0;
2611 }
2612-
2613-unsigned long arch_align_stack(unsigned long sp)
2614-{
2615- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2616- sp -= get_random_int() & ~PAGE_MASK;
2617- return sp & ~0xf;
2618-}
2619-
2620-static inline unsigned long brk_rnd(void)
2621-{
2622- /* 8MB for 32bit, 1GB for 64bit */
2623- if (is_32bit_task())
2624- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2625- else
2626- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2627-}
2628-
2629-unsigned long arch_randomize_brk(struct mm_struct *mm)
2630-{
2631- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2632-
2633- if (ret < mm->brk)
2634- return mm->brk;
2635- return ret;
2636-}
2637-
2638-unsigned long randomize_et_dyn(unsigned long base)
2639-{
2640- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2641-
2642- if (!(current->flags & PF_RANDOMIZE))
2643- return base;
2644- if (ret < base)
2645- return base;
2646- return ret;
2647-}
2648diff -urNp linux-3.1.1/arch/s390/kernel/setup.c linux-3.1.1/arch/s390/kernel/setup.c
2649--- linux-3.1.1/arch/s390/kernel/setup.c 2011-11-11 15:19:27.000000000 -0500
2650+++ linux-3.1.1/arch/s390/kernel/setup.c 2011-11-16 18:39:07.000000000 -0500
2651@@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2652 }
2653 early_param("mem", early_parse_mem);
2654
2655-unsigned int user_mode = HOME_SPACE_MODE;
2656+unsigned int user_mode = SECONDARY_SPACE_MODE;
2657 EXPORT_SYMBOL_GPL(user_mode);
2658
2659 static int set_amode_and_uaccess(unsigned long user_amode,
2660diff -urNp linux-3.1.1/arch/s390/mm/mmap.c linux-3.1.1/arch/s390/mm/mmap.c
2661--- linux-3.1.1/arch/s390/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
2662+++ linux-3.1.1/arch/s390/mm/mmap.c 2011-11-16 18:39:07.000000000 -0500
2663@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2664 */
2665 if (mmap_is_legacy()) {
2666 mm->mmap_base = TASK_UNMAPPED_BASE;
2667+
2668+#ifdef CONFIG_PAX_RANDMMAP
2669+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2670+ mm->mmap_base += mm->delta_mmap;
2671+#endif
2672+
2673 mm->get_unmapped_area = arch_get_unmapped_area;
2674 mm->unmap_area = arch_unmap_area;
2675 } else {
2676 mm->mmap_base = mmap_base();
2677+
2678+#ifdef CONFIG_PAX_RANDMMAP
2679+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2680+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2681+#endif
2682+
2683 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2684 mm->unmap_area = arch_unmap_area_topdown;
2685 }
2686@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2687 */
2688 if (mmap_is_legacy()) {
2689 mm->mmap_base = TASK_UNMAPPED_BASE;
2690+
2691+#ifdef CONFIG_PAX_RANDMMAP
2692+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2693+ mm->mmap_base += mm->delta_mmap;
2694+#endif
2695+
2696 mm->get_unmapped_area = s390_get_unmapped_area;
2697 mm->unmap_area = arch_unmap_area;
2698 } else {
2699 mm->mmap_base = mmap_base();
2700+
2701+#ifdef CONFIG_PAX_RANDMMAP
2702+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2703+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2704+#endif
2705+
2706 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2707 mm->unmap_area = arch_unmap_area_topdown;
2708 }
2709diff -urNp linux-3.1.1/arch/score/include/asm/system.h linux-3.1.1/arch/score/include/asm/system.h
2710--- linux-3.1.1/arch/score/include/asm/system.h 2011-11-11 15:19:27.000000000 -0500
2711+++ linux-3.1.1/arch/score/include/asm/system.h 2011-11-16 18:39:07.000000000 -0500
2712@@ -17,7 +17,7 @@ do { \
2713 #define finish_arch_switch(prev) do {} while (0)
2714
2715 typedef void (*vi_handler_t)(void);
2716-extern unsigned long arch_align_stack(unsigned long sp);
2717+#define arch_align_stack(x) (x)
2718
2719 #define mb() barrier()
2720 #define rmb() barrier()
2721diff -urNp linux-3.1.1/arch/score/kernel/process.c linux-3.1.1/arch/score/kernel/process.c
2722--- linux-3.1.1/arch/score/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
2723+++ linux-3.1.1/arch/score/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
2724@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2725
2726 return task_pt_regs(task)->cp0_epc;
2727 }
2728-
2729-unsigned long arch_align_stack(unsigned long sp)
2730-{
2731- return sp;
2732-}
2733diff -urNp linux-3.1.1/arch/sh/mm/mmap.c linux-3.1.1/arch/sh/mm/mmap.c
2734--- linux-3.1.1/arch/sh/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
2735+++ linux-3.1.1/arch/sh/mm/mmap.c 2011-11-16 18:39:07.000000000 -0500
2736@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2737 addr = PAGE_ALIGN(addr);
2738
2739 vma = find_vma(mm, addr);
2740- if (TASK_SIZE - len >= addr &&
2741- (!vma || addr + len <= vma->vm_start))
2742+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2743 return addr;
2744 }
2745
2746@@ -106,7 +105,7 @@ full_search:
2747 }
2748 return -ENOMEM;
2749 }
2750- if (likely(!vma || addr + len <= vma->vm_start)) {
2751+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2752 /*
2753 * Remember the place where we stopped the search:
2754 */
2755@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2756 addr = PAGE_ALIGN(addr);
2757
2758 vma = find_vma(mm, addr);
2759- if (TASK_SIZE - len >= addr &&
2760- (!vma || addr + len <= vma->vm_start))
2761+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2762 return addr;
2763 }
2764
2765@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2766 /* make sure it can fit in the remaining address space */
2767 if (likely(addr > len)) {
2768 vma = find_vma(mm, addr-len);
2769- if (!vma || addr <= vma->vm_start) {
2770+ if (check_heap_stack_gap(vma, addr - len, len)) {
2771 /* remember the address as a hint for next time */
2772 return (mm->free_area_cache = addr-len);
2773 }
2774@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2775 if (unlikely(mm->mmap_base < len))
2776 goto bottomup;
2777
2778- addr = mm->mmap_base-len;
2779- if (do_colour_align)
2780- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2781+ addr = mm->mmap_base - len;
2782
2783 do {
2784+ if (do_colour_align)
2785+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2786 /*
2787 * Lookup failure means no vma is above this address,
2788 * else if new region fits below vma->vm_start,
2789 * return with success:
2790 */
2791 vma = find_vma(mm, addr);
2792- if (likely(!vma || addr+len <= vma->vm_start)) {
2793+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2794 /* remember the address as a hint for next time */
2795 return (mm->free_area_cache = addr);
2796 }
2797@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2798 mm->cached_hole_size = vma->vm_start - addr;
2799
2800 /* try just below the current vma->vm_start */
2801- addr = vma->vm_start-len;
2802- if (do_colour_align)
2803- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2804- } while (likely(len < vma->vm_start));
2805+ addr = skip_heap_stack_gap(vma, len);
2806+ } while (!IS_ERR_VALUE(addr));
2807
2808 bottomup:
2809 /*
2810diff -urNp linux-3.1.1/arch/sparc/include/asm/atomic_64.h linux-3.1.1/arch/sparc/include/asm/atomic_64.h
2811--- linux-3.1.1/arch/sparc/include/asm/atomic_64.h 2011-11-11 15:19:27.000000000 -0500
2812+++ linux-3.1.1/arch/sparc/include/asm/atomic_64.h 2011-11-16 18:39:07.000000000 -0500
2813@@ -14,18 +14,40 @@
2814 #define ATOMIC64_INIT(i) { (i) }
2815
2816 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2817+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2818+{
2819+ return v->counter;
2820+}
2821 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2822+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2823+{
2824+ return v->counter;
2825+}
2826
2827 #define atomic_set(v, i) (((v)->counter) = i)
2828+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2829+{
2830+ v->counter = i;
2831+}
2832 #define atomic64_set(v, i) (((v)->counter) = i)
2833+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2834+{
2835+ v->counter = i;
2836+}
2837
2838 extern void atomic_add(int, atomic_t *);
2839+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2840 extern void atomic64_add(long, atomic64_t *);
2841+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2842 extern void atomic_sub(int, atomic_t *);
2843+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2844 extern void atomic64_sub(long, atomic64_t *);
2845+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2846
2847 extern int atomic_add_ret(int, atomic_t *);
2848+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2849 extern long atomic64_add_ret(long, atomic64_t *);
2850+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2851 extern int atomic_sub_ret(int, atomic_t *);
2852 extern long atomic64_sub_ret(long, atomic64_t *);
2853
2854@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2855 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2856
2857 #define atomic_inc_return(v) atomic_add_ret(1, v)
2858+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2859+{
2860+ return atomic_add_ret_unchecked(1, v);
2861+}
2862 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2863+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2864+{
2865+ return atomic64_add_ret_unchecked(1, v);
2866+}
2867
2868 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2869 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2870
2871 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2872+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2873+{
2874+ return atomic_add_ret_unchecked(i, v);
2875+}
2876 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2877+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2878+{
2879+ return atomic64_add_ret_unchecked(i, v);
2880+}
2881
2882 /*
2883 * atomic_inc_and_test - increment and test
2884@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
2885 * other cases.
2886 */
2887 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2888+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
2889+{
2890+ return atomic_inc_return_unchecked(v) == 0;
2891+}
2892 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2893
2894 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2895@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomi
2896 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2897
2898 #define atomic_inc(v) atomic_add(1, v)
2899+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2900+{
2901+ atomic_add_unchecked(1, v);
2902+}
2903 #define atomic64_inc(v) atomic64_add(1, v)
2904+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2905+{
2906+ atomic64_add_unchecked(1, v);
2907+}
2908
2909 #define atomic_dec(v) atomic_sub(1, v)
2910+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2911+{
2912+ atomic_sub_unchecked(1, v);
2913+}
2914 #define atomic64_dec(v) atomic64_sub(1, v)
2915+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2916+{
2917+ atomic64_sub_unchecked(1, v);
2918+}
2919
2920 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2921 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2922
2923 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2924+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
2925+{
2926+ return cmpxchg(&v->counter, old, new);
2927+}
2928 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2929+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
2930+{
2931+ return xchg(&v->counter, new);
2932+}
2933
2934 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
2935 {
2936- int c, old;
2937+ int c, old, new;
2938 c = atomic_read(v);
2939 for (;;) {
2940- if (unlikely(c == (u)))
2941+ if (unlikely(c == u))
2942 break;
2943- old = atomic_cmpxchg((v), c, c + (a));
2944+
2945+ asm volatile("addcc %2, %0, %0\n"
2946+
2947+#ifdef CONFIG_PAX_REFCOUNT
2948+ "tvs %%icc, 6\n"
2949+#endif
2950+
2951+ : "=r" (new)
2952+ : "0" (c), "ir" (a)
2953+ : "cc");
2954+
2955+ old = atomic_cmpxchg(v, c, new);
2956 if (likely(old == c))
2957 break;
2958 c = old;
2959@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(at
2960 #define atomic64_cmpxchg(v, o, n) \
2961 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
2962 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
2963+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
2964+{
2965+ return xchg(&v->counter, new);
2966+}
2967
2968 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
2969 {
2970- long c, old;
2971+ long c, old, new;
2972 c = atomic64_read(v);
2973 for (;;) {
2974- if (unlikely(c == (u)))
2975+ if (unlikely(c == u))
2976 break;
2977- old = atomic64_cmpxchg((v), c, c + (a));
2978+
2979+ asm volatile("addcc %2, %0, %0\n"
2980+
2981+#ifdef CONFIG_PAX_REFCOUNT
2982+ "tvs %%xcc, 6\n"
2983+#endif
2984+
2985+ : "=r" (new)
2986+ : "0" (c), "ir" (a)
2987+ : "cc");
2988+
2989+ old = atomic64_cmpxchg(v, c, new);
2990 if (likely(old == c))
2991 break;
2992 c = old;
2993 }
2994- return c != (u);
2995+ return c != u;
2996 }
2997
2998 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
2999diff -urNp linux-3.1.1/arch/sparc/include/asm/cache.h linux-3.1.1/arch/sparc/include/asm/cache.h
3000--- linux-3.1.1/arch/sparc/include/asm/cache.h 2011-11-11 15:19:27.000000000 -0500
3001+++ linux-3.1.1/arch/sparc/include/asm/cache.h 2011-11-16 18:39:07.000000000 -0500
3002@@ -10,7 +10,7 @@
3003 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3004
3005 #define L1_CACHE_SHIFT 5
3006-#define L1_CACHE_BYTES 32
3007+#define L1_CACHE_BYTES 32UL
3008
3009 #ifdef CONFIG_SPARC32
3010 #define SMP_CACHE_BYTES_SHIFT 5
3011diff -urNp linux-3.1.1/arch/sparc/include/asm/elf_32.h linux-3.1.1/arch/sparc/include/asm/elf_32.h
3012--- linux-3.1.1/arch/sparc/include/asm/elf_32.h 2011-11-11 15:19:27.000000000 -0500
3013+++ linux-3.1.1/arch/sparc/include/asm/elf_32.h 2011-11-16 18:39:07.000000000 -0500
3014@@ -114,6 +114,13 @@ typedef struct {
3015
3016 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3017
3018+#ifdef CONFIG_PAX_ASLR
3019+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3020+
3021+#define PAX_DELTA_MMAP_LEN 16
3022+#define PAX_DELTA_STACK_LEN 16
3023+#endif
3024+
3025 /* This yields a mask that user programs can use to figure out what
3026 instruction set this cpu supports. This can NOT be done in userspace
3027 on Sparc. */
3028diff -urNp linux-3.1.1/arch/sparc/include/asm/elf_64.h linux-3.1.1/arch/sparc/include/asm/elf_64.h
3029--- linux-3.1.1/arch/sparc/include/asm/elf_64.h 2011-11-11 15:19:27.000000000 -0500
3030+++ linux-3.1.1/arch/sparc/include/asm/elf_64.h 2011-11-16 18:39:07.000000000 -0500
3031@@ -180,6 +180,13 @@ typedef struct {
3032 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3033 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3034
3035+#ifdef CONFIG_PAX_ASLR
3036+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3037+
3038+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3039+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3040+#endif
3041+
3042 extern unsigned long sparc64_elf_hwcap;
3043 #define ELF_HWCAP sparc64_elf_hwcap
3044
3045diff -urNp linux-3.1.1/arch/sparc/include/asm/pgtable_32.h linux-3.1.1/arch/sparc/include/asm/pgtable_32.h
3046--- linux-3.1.1/arch/sparc/include/asm/pgtable_32.h 2011-11-11 15:19:27.000000000 -0500
3047+++ linux-3.1.1/arch/sparc/include/asm/pgtable_32.h 2011-11-16 18:39:07.000000000 -0500
3048@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3049 BTFIXUPDEF_INT(page_none)
3050 BTFIXUPDEF_INT(page_copy)
3051 BTFIXUPDEF_INT(page_readonly)
3052+
3053+#ifdef CONFIG_PAX_PAGEEXEC
3054+BTFIXUPDEF_INT(page_shared_noexec)
3055+BTFIXUPDEF_INT(page_copy_noexec)
3056+BTFIXUPDEF_INT(page_readonly_noexec)
3057+#endif
3058+
3059 BTFIXUPDEF_INT(page_kernel)
3060
3061 #define PMD_SHIFT SUN4C_PMD_SHIFT
3062@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3063 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3064 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3065
3066+#ifdef CONFIG_PAX_PAGEEXEC
3067+extern pgprot_t PAGE_SHARED_NOEXEC;
3068+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3069+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3070+#else
3071+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3072+# define PAGE_COPY_NOEXEC PAGE_COPY
3073+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3074+#endif
3075+
3076 extern unsigned long page_kernel;
3077
3078 #ifdef MODULE
3079diff -urNp linux-3.1.1/arch/sparc/include/asm/pgtsrmmu.h linux-3.1.1/arch/sparc/include/asm/pgtsrmmu.h
3080--- linux-3.1.1/arch/sparc/include/asm/pgtsrmmu.h 2011-11-11 15:19:27.000000000 -0500
3081+++ linux-3.1.1/arch/sparc/include/asm/pgtsrmmu.h 2011-11-16 18:39:07.000000000 -0500
3082@@ -115,6 +115,13 @@
3083 SRMMU_EXEC | SRMMU_REF)
3084 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3085 SRMMU_EXEC | SRMMU_REF)
3086+
3087+#ifdef CONFIG_PAX_PAGEEXEC
3088+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3089+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3090+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3091+#endif
3092+
3093 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3094 SRMMU_DIRTY | SRMMU_REF)
3095
3096diff -urNp linux-3.1.1/arch/sparc/include/asm/spinlock_64.h linux-3.1.1/arch/sparc/include/asm/spinlock_64.h
3097--- linux-3.1.1/arch/sparc/include/asm/spinlock_64.h 2011-11-11 15:19:27.000000000 -0500
3098+++ linux-3.1.1/arch/sparc/include/asm/spinlock_64.h 2011-11-16 18:39:07.000000000 -0500
3099@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3100
3101 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3102
3103-static void inline arch_read_lock(arch_rwlock_t *lock)
3104+static inline void arch_read_lock(arch_rwlock_t *lock)
3105 {
3106 unsigned long tmp1, tmp2;
3107
3108 __asm__ __volatile__ (
3109 "1: ldsw [%2], %0\n"
3110 " brlz,pn %0, 2f\n"
3111-"4: add %0, 1, %1\n"
3112+"4: addcc %0, 1, %1\n"
3113+
3114+#ifdef CONFIG_PAX_REFCOUNT
3115+" tvs %%icc, 6\n"
3116+#endif
3117+
3118 " cas [%2], %0, %1\n"
3119 " cmp %0, %1\n"
3120 " bne,pn %%icc, 1b\n"
3121@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3122 " .previous"
3123 : "=&r" (tmp1), "=&r" (tmp2)
3124 : "r" (lock)
3125- : "memory");
3126+ : "memory", "cc");
3127 }
3128
3129-static int inline arch_read_trylock(arch_rwlock_t *lock)
3130+static inline int arch_read_trylock(arch_rwlock_t *lock)
3131 {
3132 int tmp1, tmp2;
3133
3134@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3135 "1: ldsw [%2], %0\n"
3136 " brlz,a,pn %0, 2f\n"
3137 " mov 0, %0\n"
3138-" add %0, 1, %1\n"
3139+" addcc %0, 1, %1\n"
3140+
3141+#ifdef CONFIG_PAX_REFCOUNT
3142+" tvs %%icc, 6\n"
3143+#endif
3144+
3145 " cas [%2], %0, %1\n"
3146 " cmp %0, %1\n"
3147 " bne,pn %%icc, 1b\n"
3148@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3149 return tmp1;
3150 }
3151
3152-static void inline arch_read_unlock(arch_rwlock_t *lock)
3153+static inline void arch_read_unlock(arch_rwlock_t *lock)
3154 {
3155 unsigned long tmp1, tmp2;
3156
3157 __asm__ __volatile__(
3158 "1: lduw [%2], %0\n"
3159-" sub %0, 1, %1\n"
3160+" subcc %0, 1, %1\n"
3161+
3162+#ifdef CONFIG_PAX_REFCOUNT
3163+" tvs %%icc, 6\n"
3164+#endif
3165+
3166 " cas [%2], %0, %1\n"
3167 " cmp %0, %1\n"
3168 " bne,pn %%xcc, 1b\n"
3169@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3170 : "memory");
3171 }
3172
3173-static void inline arch_write_lock(arch_rwlock_t *lock)
3174+static inline void arch_write_lock(arch_rwlock_t *lock)
3175 {
3176 unsigned long mask, tmp1, tmp2;
3177
3178@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3179 : "memory");
3180 }
3181
3182-static void inline arch_write_unlock(arch_rwlock_t *lock)
3183+static inline void arch_write_unlock(arch_rwlock_t *lock)
3184 {
3185 __asm__ __volatile__(
3186 " stw %%g0, [%0]"
3187@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3188 : "memory");
3189 }
3190
3191-static int inline arch_write_trylock(arch_rwlock_t *lock)
3192+static inline int arch_write_trylock(arch_rwlock_t *lock)
3193 {
3194 unsigned long mask, tmp1, tmp2, result;
3195
3196diff -urNp linux-3.1.1/arch/sparc/include/asm/thread_info_32.h linux-3.1.1/arch/sparc/include/asm/thread_info_32.h
3197--- linux-3.1.1/arch/sparc/include/asm/thread_info_32.h 2011-11-11 15:19:27.000000000 -0500
3198+++ linux-3.1.1/arch/sparc/include/asm/thread_info_32.h 2011-11-16 18:39:07.000000000 -0500
3199@@ -50,6 +50,8 @@ struct thread_info {
3200 unsigned long w_saved;
3201
3202 struct restart_block restart_block;
3203+
3204+ unsigned long lowest_stack;
3205 };
3206
3207 /*
3208diff -urNp linux-3.1.1/arch/sparc/include/asm/thread_info_64.h linux-3.1.1/arch/sparc/include/asm/thread_info_64.h
3209--- linux-3.1.1/arch/sparc/include/asm/thread_info_64.h 2011-11-11 15:19:27.000000000 -0500
3210+++ linux-3.1.1/arch/sparc/include/asm/thread_info_64.h 2011-11-16 18:39:07.000000000 -0500
3211@@ -63,6 +63,8 @@ struct thread_info {
3212 struct pt_regs *kern_una_regs;
3213 unsigned int kern_una_insn;
3214
3215+ unsigned long lowest_stack;
3216+
3217 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3218 };
3219
3220diff -urNp linux-3.1.1/arch/sparc/include/asm/uaccess_32.h linux-3.1.1/arch/sparc/include/asm/uaccess_32.h
3221--- linux-3.1.1/arch/sparc/include/asm/uaccess_32.h 2011-11-11 15:19:27.000000000 -0500
3222+++ linux-3.1.1/arch/sparc/include/asm/uaccess_32.h 2011-11-16 18:39:07.000000000 -0500
3223@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3224
3225 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3226 {
3227- if (n && __access_ok((unsigned long) to, n))
3228+ if ((long)n < 0)
3229+ return n;
3230+
3231+ if (n && __access_ok((unsigned long) to, n)) {
3232+ if (!__builtin_constant_p(n))
3233+ check_object_size(from, n, true);
3234 return __copy_user(to, (__force void __user *) from, n);
3235- else
3236+ } else
3237 return n;
3238 }
3239
3240 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3241 {
3242+ if ((long)n < 0)
3243+ return n;
3244+
3245+ if (!__builtin_constant_p(n))
3246+ check_object_size(from, n, true);
3247+
3248 return __copy_user(to, (__force void __user *) from, n);
3249 }
3250
3251 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3252 {
3253- if (n && __access_ok((unsigned long) from, n))
3254+ if ((long)n < 0)
3255+ return n;
3256+
3257+ if (n && __access_ok((unsigned long) from, n)) {
3258+ if (!__builtin_constant_p(n))
3259+ check_object_size(to, n, false);
3260 return __copy_user((__force void __user *) to, from, n);
3261- else
3262+ } else
3263 return n;
3264 }
3265
3266 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3267 {
3268+ if ((long)n < 0)
3269+ return n;
3270+
3271 return __copy_user((__force void __user *) to, from, n);
3272 }
3273
3274diff -urNp linux-3.1.1/arch/sparc/include/asm/uaccess_64.h linux-3.1.1/arch/sparc/include/asm/uaccess_64.h
3275--- linux-3.1.1/arch/sparc/include/asm/uaccess_64.h 2011-11-11 15:19:27.000000000 -0500
3276+++ linux-3.1.1/arch/sparc/include/asm/uaccess_64.h 2011-11-16 18:39:07.000000000 -0500
3277@@ -10,6 +10,7 @@
3278 #include <linux/compiler.h>
3279 #include <linux/string.h>
3280 #include <linux/thread_info.h>
3281+#include <linux/kernel.h>
3282 #include <asm/asi.h>
3283 #include <asm/system.h>
3284 #include <asm/spitfire.h>
3285@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3286 static inline unsigned long __must_check
3287 copy_from_user(void *to, const void __user *from, unsigned long size)
3288 {
3289- unsigned long ret = ___copy_from_user(to, from, size);
3290+ unsigned long ret;
3291
3292+ if ((long)size < 0 || size > INT_MAX)
3293+ return size;
3294+
3295+ if (!__builtin_constant_p(size))
3296+ check_object_size(to, size, false);
3297+
3298+ ret = ___copy_from_user(to, from, size);
3299 if (unlikely(ret))
3300 ret = copy_from_user_fixup(to, from, size);
3301
3302@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3303 static inline unsigned long __must_check
3304 copy_to_user(void __user *to, const void *from, unsigned long size)
3305 {
3306- unsigned long ret = ___copy_to_user(to, from, size);
3307+ unsigned long ret;
3308+
3309+ if ((long)size < 0 || size > INT_MAX)
3310+ return size;
3311+
3312+ if (!__builtin_constant_p(size))
3313+ check_object_size(from, size, true);
3314
3315+ ret = ___copy_to_user(to, from, size);
3316 if (unlikely(ret))
3317 ret = copy_to_user_fixup(to, from, size);
3318 return ret;
3319diff -urNp linux-3.1.1/arch/sparc/include/asm/uaccess.h linux-3.1.1/arch/sparc/include/asm/uaccess.h
3320--- linux-3.1.1/arch/sparc/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
3321+++ linux-3.1.1/arch/sparc/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
3322@@ -1,5 +1,13 @@
3323 #ifndef ___ASM_SPARC_UACCESS_H
3324 #define ___ASM_SPARC_UACCESS_H
3325+
3326+#ifdef __KERNEL__
3327+#ifndef __ASSEMBLY__
3328+#include <linux/types.h>
3329+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3330+#endif
3331+#endif
3332+
3333 #if defined(__sparc__) && defined(__arch64__)
3334 #include <asm/uaccess_64.h>
3335 #else
3336diff -urNp linux-3.1.1/arch/sparc/kernel/Makefile linux-3.1.1/arch/sparc/kernel/Makefile
3337--- linux-3.1.1/arch/sparc/kernel/Makefile 2011-11-11 15:19:27.000000000 -0500
3338+++ linux-3.1.1/arch/sparc/kernel/Makefile 2011-11-16 18:39:07.000000000 -0500
3339@@ -3,7 +3,7 @@
3340 #
3341
3342 asflags-y := -ansi
3343-ccflags-y := -Werror
3344+#ccflags-y := -Werror
3345
3346 extra-y := head_$(BITS).o
3347 extra-y += init_task.o
3348diff -urNp linux-3.1.1/arch/sparc/kernel/process_32.c linux-3.1.1/arch/sparc/kernel/process_32.c
3349--- linux-3.1.1/arch/sparc/kernel/process_32.c 2011-11-11 15:19:27.000000000 -0500
3350+++ linux-3.1.1/arch/sparc/kernel/process_32.c 2011-11-16 18:40:08.000000000 -0500
3351@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3352 rw->ins[4], rw->ins[5],
3353 rw->ins[6],
3354 rw->ins[7]);
3355- printk("%pS\n", (void *) rw->ins[7]);
3356+ printk("%pA\n", (void *) rw->ins[7]);
3357 rw = (struct reg_window32 *) rw->ins[6];
3358 }
3359 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3360@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3361
3362 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3363 r->psr, r->pc, r->npc, r->y, print_tainted());
3364- printk("PC: <%pS>\n", (void *) r->pc);
3365+ printk("PC: <%pA>\n", (void *) r->pc);
3366 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3367 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3368 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3369 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3370 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3371 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3372- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3373+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3374
3375 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3376 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3377@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk,
3378 rw = (struct reg_window32 *) fp;
3379 pc = rw->ins[7];
3380 printk("[%08lx : ", pc);
3381- printk("%pS ] ", (void *) pc);
3382+ printk("%pA ] ", (void *) pc);
3383 fp = rw->ins[6];
3384 } while (++count < 16);
3385 printk("\n");
3386diff -urNp linux-3.1.1/arch/sparc/kernel/process_64.c linux-3.1.1/arch/sparc/kernel/process_64.c
3387--- linux-3.1.1/arch/sparc/kernel/process_64.c 2011-11-11 15:19:27.000000000 -0500
3388+++ linux-3.1.1/arch/sparc/kernel/process_64.c 2011-11-16 18:40:08.000000000 -0500
3389@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3390 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3391 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3392 if (regs->tstate & TSTATE_PRIV)
3393- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3394+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3395 }
3396
3397 void show_regs(struct pt_regs *regs)
3398 {
3399 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3400 regs->tpc, regs->tnpc, regs->y, print_tainted());
3401- printk("TPC: <%pS>\n", (void *) regs->tpc);
3402+ printk("TPC: <%pA>\n", (void *) regs->tpc);
3403 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3404 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3405 regs->u_regs[3]);
3406@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3407 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3408 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3409 regs->u_regs[15]);
3410- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3411+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3412 show_regwindow(regs);
3413 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3414 }
3415@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3416 ((tp && tp->task) ? tp->task->pid : -1));
3417
3418 if (gp->tstate & TSTATE_PRIV) {
3419- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3420+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3421 (void *) gp->tpc,
3422 (void *) gp->o7,
3423 (void *) gp->i7,
3424diff -urNp linux-3.1.1/arch/sparc/kernel/sys_sparc_32.c linux-3.1.1/arch/sparc/kernel/sys_sparc_32.c
3425--- linux-3.1.1/arch/sparc/kernel/sys_sparc_32.c 2011-11-11 15:19:27.000000000 -0500
3426+++ linux-3.1.1/arch/sparc/kernel/sys_sparc_32.c 2011-11-16 18:39:07.000000000 -0500
3427@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3428 if (ARCH_SUN4C && len > 0x20000000)
3429 return -ENOMEM;
3430 if (!addr)
3431- addr = TASK_UNMAPPED_BASE;
3432+ addr = current->mm->mmap_base;
3433
3434 if (flags & MAP_SHARED)
3435 addr = COLOUR_ALIGN(addr);
3436@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3437 }
3438 if (TASK_SIZE - PAGE_SIZE - len < addr)
3439 return -ENOMEM;
3440- if (!vmm || addr + len <= vmm->vm_start)
3441+ if (check_heap_stack_gap(vmm, addr, len))
3442 return addr;
3443 addr = vmm->vm_end;
3444 if (flags & MAP_SHARED)
3445diff -urNp linux-3.1.1/arch/sparc/kernel/sys_sparc_64.c linux-3.1.1/arch/sparc/kernel/sys_sparc_64.c
3446--- linux-3.1.1/arch/sparc/kernel/sys_sparc_64.c 2011-11-11 15:19:27.000000000 -0500
3447+++ linux-3.1.1/arch/sparc/kernel/sys_sparc_64.c 2011-11-16 18:39:07.000000000 -0500
3448@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3449 /* We do not accept a shared mapping if it would violate
3450 * cache aliasing constraints.
3451 */
3452- if ((flags & MAP_SHARED) &&
3453+ if ((filp || (flags & MAP_SHARED)) &&
3454 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3455 return -EINVAL;
3456 return addr;
3457@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3458 if (filp || (flags & MAP_SHARED))
3459 do_color_align = 1;
3460
3461+#ifdef CONFIG_PAX_RANDMMAP
3462+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3463+#endif
3464+
3465 if (addr) {
3466 if (do_color_align)
3467 addr = COLOUR_ALIGN(addr, pgoff);
3468@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3469 addr = PAGE_ALIGN(addr);
3470
3471 vma = find_vma(mm, addr);
3472- if (task_size - len >= addr &&
3473- (!vma || addr + len <= vma->vm_start))
3474+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3475 return addr;
3476 }
3477
3478 if (len > mm->cached_hole_size) {
3479- start_addr = addr = mm->free_area_cache;
3480+ start_addr = addr = mm->free_area_cache;
3481 } else {
3482- start_addr = addr = TASK_UNMAPPED_BASE;
3483+ start_addr = addr = mm->mmap_base;
3484 mm->cached_hole_size = 0;
3485 }
3486
3487@@ -174,14 +177,14 @@ full_search:
3488 vma = find_vma(mm, VA_EXCLUDE_END);
3489 }
3490 if (unlikely(task_size < addr)) {
3491- if (start_addr != TASK_UNMAPPED_BASE) {
3492- start_addr = addr = TASK_UNMAPPED_BASE;
3493+ if (start_addr != mm->mmap_base) {
3494+ start_addr = addr = mm->mmap_base;
3495 mm->cached_hole_size = 0;
3496 goto full_search;
3497 }
3498 return -ENOMEM;
3499 }
3500- if (likely(!vma || addr + len <= vma->vm_start)) {
3501+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3502 /*
3503 * Remember the place where we stopped the search:
3504 */
3505@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3506 /* We do not accept a shared mapping if it would violate
3507 * cache aliasing constraints.
3508 */
3509- if ((flags & MAP_SHARED) &&
3510+ if ((filp || (flags & MAP_SHARED)) &&
3511 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3512 return -EINVAL;
3513 return addr;
3514@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3515 addr = PAGE_ALIGN(addr);
3516
3517 vma = find_vma(mm, addr);
3518- if (task_size - len >= addr &&
3519- (!vma || addr + len <= vma->vm_start))
3520+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3521 return addr;
3522 }
3523
3524@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3525 /* make sure it can fit in the remaining address space */
3526 if (likely(addr > len)) {
3527 vma = find_vma(mm, addr-len);
3528- if (!vma || addr <= vma->vm_start) {
3529+ if (check_heap_stack_gap(vma, addr - len, len)) {
3530 /* remember the address as a hint for next time */
3531 return (mm->free_area_cache = addr-len);
3532 }
3533@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3534 if (unlikely(mm->mmap_base < len))
3535 goto bottomup;
3536
3537- addr = mm->mmap_base-len;
3538- if (do_color_align)
3539- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3540+ addr = mm->mmap_base - len;
3541
3542 do {
3543+ if (do_color_align)
3544+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3545 /*
3546 * Lookup failure means no vma is above this address,
3547 * else if new region fits below vma->vm_start,
3548 * return with success:
3549 */
3550 vma = find_vma(mm, addr);
3551- if (likely(!vma || addr+len <= vma->vm_start)) {
3552+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3553 /* remember the address as a hint for next time */
3554 return (mm->free_area_cache = addr);
3555 }
3556@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3557 mm->cached_hole_size = vma->vm_start - addr;
3558
3559 /* try just below the current vma->vm_start */
3560- addr = vma->vm_start-len;
3561- if (do_color_align)
3562- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3563- } while (likely(len < vma->vm_start));
3564+ addr = skip_heap_stack_gap(vma, len);
3565+ } while (!IS_ERR_VALUE(addr));
3566
3567 bottomup:
3568 /*
3569@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3570 gap == RLIM_INFINITY ||
3571 sysctl_legacy_va_layout) {
3572 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3573+
3574+#ifdef CONFIG_PAX_RANDMMAP
3575+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3576+ mm->mmap_base += mm->delta_mmap;
3577+#endif
3578+
3579 mm->get_unmapped_area = arch_get_unmapped_area;
3580 mm->unmap_area = arch_unmap_area;
3581 } else {
3582@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3583 gap = (task_size / 6 * 5);
3584
3585 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3586+
3587+#ifdef CONFIG_PAX_RANDMMAP
3588+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3589+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3590+#endif
3591+
3592 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3593 mm->unmap_area = arch_unmap_area_topdown;
3594 }
3595diff -urNp linux-3.1.1/arch/sparc/kernel/traps_32.c linux-3.1.1/arch/sparc/kernel/traps_32.c
3596--- linux-3.1.1/arch/sparc/kernel/traps_32.c 2011-11-11 15:19:27.000000000 -0500
3597+++ linux-3.1.1/arch/sparc/kernel/traps_32.c 2011-11-16 18:40:08.000000000 -0500
3598@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3599 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3600 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3601
3602+extern void gr_handle_kernel_exploit(void);
3603+
3604 void die_if_kernel(char *str, struct pt_regs *regs)
3605 {
3606 static int die_counter;
3607@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3608 count++ < 30 &&
3609 (((unsigned long) rw) >= PAGE_OFFSET) &&
3610 !(((unsigned long) rw) & 0x7)) {
3611- printk("Caller[%08lx]: %pS\n", rw->ins[7],
3612+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
3613 (void *) rw->ins[7]);
3614 rw = (struct reg_window32 *)rw->ins[6];
3615 }
3616 }
3617 printk("Instruction DUMP:");
3618 instruction_dump ((unsigned long *) regs->pc);
3619- if(regs->psr & PSR_PS)
3620+ if(regs->psr & PSR_PS) {
3621+ gr_handle_kernel_exploit();
3622 do_exit(SIGKILL);
3623+ }
3624 do_exit(SIGSEGV);
3625 }
3626
3627diff -urNp linux-3.1.1/arch/sparc/kernel/traps_64.c linux-3.1.1/arch/sparc/kernel/traps_64.c
3628--- linux-3.1.1/arch/sparc/kernel/traps_64.c 2011-11-11 15:19:27.000000000 -0500
3629+++ linux-3.1.1/arch/sparc/kernel/traps_64.c 2011-11-16 18:40:08.000000000 -0500
3630@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3631 i + 1,
3632 p->trapstack[i].tstate, p->trapstack[i].tpc,
3633 p->trapstack[i].tnpc, p->trapstack[i].tt);
3634- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3635+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3636 }
3637 }
3638
3639@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3640
3641 lvl -= 0x100;
3642 if (regs->tstate & TSTATE_PRIV) {
3643+
3644+#ifdef CONFIG_PAX_REFCOUNT
3645+ if (lvl == 6)
3646+ pax_report_refcount_overflow(regs);
3647+#endif
3648+
3649 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3650 die_if_kernel(buffer, regs);
3651 }
3652@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3653 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3654 {
3655 char buffer[32];
3656-
3657+
3658 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3659 0, lvl, SIGTRAP) == NOTIFY_STOP)
3660 return;
3661
3662+#ifdef CONFIG_PAX_REFCOUNT
3663+ if (lvl == 6)
3664+ pax_report_refcount_overflow(regs);
3665+#endif
3666+
3667 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3668
3669 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3670@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3671 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3672 printk("%s" "ERROR(%d): ",
3673 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3674- printk("TPC<%pS>\n", (void *) regs->tpc);
3675+ printk("TPC<%pA>\n", (void *) regs->tpc);
3676 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3677 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3678 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3679@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3680 smp_processor_id(),
3681 (type & 0x1) ? 'I' : 'D',
3682 regs->tpc);
3683- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3684+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3685 panic("Irrecoverable Cheetah+ parity error.");
3686 }
3687
3688@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3689 smp_processor_id(),
3690 (type & 0x1) ? 'I' : 'D',
3691 regs->tpc);
3692- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3693+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3694 }
3695
3696 struct sun4v_error_entry {
3697@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3698
3699 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3700 regs->tpc, tl);
3701- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3702+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3703 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3704- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3705+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3706 (void *) regs->u_regs[UREG_I7]);
3707 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3708 "pte[%lx] error[%lx]\n",
3709@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3710
3711 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3712 regs->tpc, tl);
3713- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3714+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3715 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3716- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3717+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3718 (void *) regs->u_regs[UREG_I7]);
3719 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3720 "pte[%lx] error[%lx]\n",
3721@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3722 fp = (unsigned long)sf->fp + STACK_BIAS;
3723 }
3724
3725- printk(" [%016lx] %pS\n", pc, (void *) pc);
3726+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3727 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3728 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3729 int index = tsk->curr_ret_stack;
3730 if (tsk->ret_stack && index >= graph) {
3731 pc = tsk->ret_stack[index - graph].ret;
3732- printk(" [%016lx] %pS\n", pc, (void *) pc);
3733+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3734 graph++;
3735 }
3736 }
3737@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3738 return (struct reg_window *) (fp + STACK_BIAS);
3739 }
3740
3741+extern void gr_handle_kernel_exploit(void);
3742+
3743 void die_if_kernel(char *str, struct pt_regs *regs)
3744 {
3745 static int die_counter;
3746@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3747 while (rw &&
3748 count++ < 30 &&
3749 kstack_valid(tp, (unsigned long) rw)) {
3750- printk("Caller[%016lx]: %pS\n", rw->ins[7],
3751+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
3752 (void *) rw->ins[7]);
3753
3754 rw = kernel_stack_up(rw);
3755@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3756 }
3757 user_instruction_dump ((unsigned int __user *) regs->tpc);
3758 }
3759- if (regs->tstate & TSTATE_PRIV)
3760+ if (regs->tstate & TSTATE_PRIV) {
3761+ gr_handle_kernel_exploit();
3762 do_exit(SIGKILL);
3763+ }
3764 do_exit(SIGSEGV);
3765 }
3766 EXPORT_SYMBOL(die_if_kernel);
3767diff -urNp linux-3.1.1/arch/sparc/kernel/unaligned_64.c linux-3.1.1/arch/sparc/kernel/unaligned_64.c
3768--- linux-3.1.1/arch/sparc/kernel/unaligned_64.c 2011-11-11 15:19:27.000000000 -0500
3769+++ linux-3.1.1/arch/sparc/kernel/unaligned_64.c 2011-11-16 18:40:08.000000000 -0500
3770@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs
3771 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3772
3773 if (__ratelimit(&ratelimit)) {
3774- printk("Kernel unaligned access at TPC[%lx] %pS\n",
3775+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
3776 regs->tpc, (void *) regs->tpc);
3777 }
3778 }
3779diff -urNp linux-3.1.1/arch/sparc/lib/atomic_64.S linux-3.1.1/arch/sparc/lib/atomic_64.S
3780--- linux-3.1.1/arch/sparc/lib/atomic_64.S 2011-11-11 15:19:27.000000000 -0500
3781+++ linux-3.1.1/arch/sparc/lib/atomic_64.S 2011-11-16 18:39:07.000000000 -0500
3782@@ -18,7 +18,12 @@
3783 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3784 BACKOFF_SETUP(%o2)
3785 1: lduw [%o1], %g1
3786- add %g1, %o0, %g7
3787+ addcc %g1, %o0, %g7
3788+
3789+#ifdef CONFIG_PAX_REFCOUNT
3790+ tvs %icc, 6
3791+#endif
3792+
3793 cas [%o1], %g1, %g7
3794 cmp %g1, %g7
3795 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3796@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3797 2: BACKOFF_SPIN(%o2, %o3, 1b)
3798 .size atomic_add, .-atomic_add
3799
3800+ .globl atomic_add_unchecked
3801+ .type atomic_add_unchecked,#function
3802+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3803+ BACKOFF_SETUP(%o2)
3804+1: lduw [%o1], %g1
3805+ add %g1, %o0, %g7
3806+ cas [%o1], %g1, %g7
3807+ cmp %g1, %g7
3808+ bne,pn %icc, 2f
3809+ nop
3810+ retl
3811+ nop
3812+2: BACKOFF_SPIN(%o2, %o3, 1b)
3813+ .size atomic_add_unchecked, .-atomic_add_unchecked
3814+
3815 .globl atomic_sub
3816 .type atomic_sub,#function
3817 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3818 BACKOFF_SETUP(%o2)
3819 1: lduw [%o1], %g1
3820- sub %g1, %o0, %g7
3821+ subcc %g1, %o0, %g7
3822+
3823+#ifdef CONFIG_PAX_REFCOUNT
3824+ tvs %icc, 6
3825+#endif
3826+
3827 cas [%o1], %g1, %g7
3828 cmp %g1, %g7
3829 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3830@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3831 2: BACKOFF_SPIN(%o2, %o3, 1b)
3832 .size atomic_sub, .-atomic_sub
3833
3834+ .globl atomic_sub_unchecked
3835+ .type atomic_sub_unchecked,#function
3836+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3837+ BACKOFF_SETUP(%o2)
3838+1: lduw [%o1], %g1
3839+ sub %g1, %o0, %g7
3840+ cas [%o1], %g1, %g7
3841+ cmp %g1, %g7
3842+ bne,pn %icc, 2f
3843+ nop
3844+ retl
3845+ nop
3846+2: BACKOFF_SPIN(%o2, %o3, 1b)
3847+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
3848+
3849 .globl atomic_add_ret
3850 .type atomic_add_ret,#function
3851 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3852 BACKOFF_SETUP(%o2)
3853 1: lduw [%o1], %g1
3854- add %g1, %o0, %g7
3855+ addcc %g1, %o0, %g7
3856+
3857+#ifdef CONFIG_PAX_REFCOUNT
3858+ tvs %icc, 6
3859+#endif
3860+
3861 cas [%o1], %g1, %g7
3862 cmp %g1, %g7
3863 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3864@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3865 2: BACKOFF_SPIN(%o2, %o3, 1b)
3866 .size atomic_add_ret, .-atomic_add_ret
3867
3868+ .globl atomic_add_ret_unchecked
3869+ .type atomic_add_ret_unchecked,#function
3870+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3871+ BACKOFF_SETUP(%o2)
3872+1: lduw [%o1], %g1
3873+ addcc %g1, %o0, %g7
3874+ cas [%o1], %g1, %g7
3875+ cmp %g1, %g7
3876+ bne,pn %icc, 2f
3877+ add %g7, %o0, %g7
3878+ sra %g7, 0, %o0
3879+ retl
3880+ nop
3881+2: BACKOFF_SPIN(%o2, %o3, 1b)
3882+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3883+
3884 .globl atomic_sub_ret
3885 .type atomic_sub_ret,#function
3886 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3887 BACKOFF_SETUP(%o2)
3888 1: lduw [%o1], %g1
3889- sub %g1, %o0, %g7
3890+ subcc %g1, %o0, %g7
3891+
3892+#ifdef CONFIG_PAX_REFCOUNT
3893+ tvs %icc, 6
3894+#endif
3895+
3896 cas [%o1], %g1, %g7
3897 cmp %g1, %g7
3898 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3899@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3900 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3901 BACKOFF_SETUP(%o2)
3902 1: ldx [%o1], %g1
3903- add %g1, %o0, %g7
3904+ addcc %g1, %o0, %g7
3905+
3906+#ifdef CONFIG_PAX_REFCOUNT
3907+ tvs %xcc, 6
3908+#endif
3909+
3910 casx [%o1], %g1, %g7
3911 cmp %g1, %g7
3912 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3913@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3914 2: BACKOFF_SPIN(%o2, %o3, 1b)
3915 .size atomic64_add, .-atomic64_add
3916
3917+ .globl atomic64_add_unchecked
3918+ .type atomic64_add_unchecked,#function
3919+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3920+ BACKOFF_SETUP(%o2)
3921+1: ldx [%o1], %g1
3922+ addcc %g1, %o0, %g7
3923+ casx [%o1], %g1, %g7
3924+ cmp %g1, %g7
3925+ bne,pn %xcc, 2f
3926+ nop
3927+ retl
3928+ nop
3929+2: BACKOFF_SPIN(%o2, %o3, 1b)
3930+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
3931+
3932 .globl atomic64_sub
3933 .type atomic64_sub,#function
3934 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3935 BACKOFF_SETUP(%o2)
3936 1: ldx [%o1], %g1
3937- sub %g1, %o0, %g7
3938+ subcc %g1, %o0, %g7
3939+
3940+#ifdef CONFIG_PAX_REFCOUNT
3941+ tvs %xcc, 6
3942+#endif
3943+
3944 casx [%o1], %g1, %g7
3945 cmp %g1, %g7
3946 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3947@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
3948 2: BACKOFF_SPIN(%o2, %o3, 1b)
3949 .size atomic64_sub, .-atomic64_sub
3950
3951+ .globl atomic64_sub_unchecked
3952+ .type atomic64_sub_unchecked,#function
3953+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3954+ BACKOFF_SETUP(%o2)
3955+1: ldx [%o1], %g1
3956+ subcc %g1, %o0, %g7
3957+ casx [%o1], %g1, %g7
3958+ cmp %g1, %g7
3959+ bne,pn %xcc, 2f
3960+ nop
3961+ retl
3962+ nop
3963+2: BACKOFF_SPIN(%o2, %o3, 1b)
3964+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
3965+
3966 .globl atomic64_add_ret
3967 .type atomic64_add_ret,#function
3968 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3969 BACKOFF_SETUP(%o2)
3970 1: ldx [%o1], %g1
3971- add %g1, %o0, %g7
3972+ addcc %g1, %o0, %g7
3973+
3974+#ifdef CONFIG_PAX_REFCOUNT
3975+ tvs %xcc, 6
3976+#endif
3977+
3978 casx [%o1], %g1, %g7
3979 cmp %g1, %g7
3980 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3981@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
3982 2: BACKOFF_SPIN(%o2, %o3, 1b)
3983 .size atomic64_add_ret, .-atomic64_add_ret
3984
3985+ .globl atomic64_add_ret_unchecked
3986+ .type atomic64_add_ret_unchecked,#function
3987+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3988+ BACKOFF_SETUP(%o2)
3989+1: ldx [%o1], %g1
3990+ addcc %g1, %o0, %g7
3991+ casx [%o1], %g1, %g7
3992+ cmp %g1, %g7
3993+ bne,pn %xcc, 2f
3994+ add %g7, %o0, %g7
3995+ mov %g7, %o0
3996+ retl
3997+ nop
3998+2: BACKOFF_SPIN(%o2, %o3, 1b)
3999+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4000+
4001 .globl atomic64_sub_ret
4002 .type atomic64_sub_ret,#function
4003 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4004 BACKOFF_SETUP(%o2)
4005 1: ldx [%o1], %g1
4006- sub %g1, %o0, %g7
4007+ subcc %g1, %o0, %g7
4008+
4009+#ifdef CONFIG_PAX_REFCOUNT
4010+ tvs %xcc, 6
4011+#endif
4012+
4013 casx [%o1], %g1, %g7
4014 cmp %g1, %g7
4015 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4016diff -urNp linux-3.1.1/arch/sparc/lib/ksyms.c linux-3.1.1/arch/sparc/lib/ksyms.c
4017--- linux-3.1.1/arch/sparc/lib/ksyms.c 2011-11-11 15:19:27.000000000 -0500
4018+++ linux-3.1.1/arch/sparc/lib/ksyms.c 2011-11-16 18:39:07.000000000 -0500
4019@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4020
4021 /* Atomic counter implementation. */
4022 EXPORT_SYMBOL(atomic_add);
4023+EXPORT_SYMBOL(atomic_add_unchecked);
4024 EXPORT_SYMBOL(atomic_add_ret);
4025+EXPORT_SYMBOL(atomic_add_ret_unchecked);
4026 EXPORT_SYMBOL(atomic_sub);
4027+EXPORT_SYMBOL(atomic_sub_unchecked);
4028 EXPORT_SYMBOL(atomic_sub_ret);
4029 EXPORT_SYMBOL(atomic64_add);
4030+EXPORT_SYMBOL(atomic64_add_unchecked);
4031 EXPORT_SYMBOL(atomic64_add_ret);
4032+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4033 EXPORT_SYMBOL(atomic64_sub);
4034+EXPORT_SYMBOL(atomic64_sub_unchecked);
4035 EXPORT_SYMBOL(atomic64_sub_ret);
4036
4037 /* Atomic bit operations. */
4038diff -urNp linux-3.1.1/arch/sparc/lib/Makefile linux-3.1.1/arch/sparc/lib/Makefile
4039--- linux-3.1.1/arch/sparc/lib/Makefile 2011-11-11 15:19:27.000000000 -0500
4040+++ linux-3.1.1/arch/sparc/lib/Makefile 2011-11-16 18:39:07.000000000 -0500
4041@@ -2,7 +2,7 @@
4042 #
4043
4044 asflags-y := -ansi -DST_DIV0=0x02
4045-ccflags-y := -Werror
4046+#ccflags-y := -Werror
4047
4048 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4049 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4050diff -urNp linux-3.1.1/arch/sparc/Makefile linux-3.1.1/arch/sparc/Makefile
4051--- linux-3.1.1/arch/sparc/Makefile 2011-11-11 15:19:27.000000000 -0500
4052+++ linux-3.1.1/arch/sparc/Makefile 2011-11-16 18:40:08.000000000 -0500
4053@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4054 # Export what is needed by arch/sparc/boot/Makefile
4055 export VMLINUX_INIT VMLINUX_MAIN
4056 VMLINUX_INIT := $(head-y) $(init-y)
4057-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4058+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4059 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4060 VMLINUX_MAIN += $(drivers-y) $(net-y)
4061
4062diff -urNp linux-3.1.1/arch/sparc/mm/fault_32.c linux-3.1.1/arch/sparc/mm/fault_32.c
4063--- linux-3.1.1/arch/sparc/mm/fault_32.c 2011-11-11 15:19:27.000000000 -0500
4064+++ linux-3.1.1/arch/sparc/mm/fault_32.c 2011-11-16 18:39:07.000000000 -0500
4065@@ -22,6 +22,9 @@
4066 #include <linux/interrupt.h>
4067 #include <linux/module.h>
4068 #include <linux/kdebug.h>
4069+#include <linux/slab.h>
4070+#include <linux/pagemap.h>
4071+#include <linux/compiler.h>
4072
4073 #include <asm/system.h>
4074 #include <asm/page.h>
4075@@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4076 return safe_compute_effective_address(regs, insn);
4077 }
4078
4079+#ifdef CONFIG_PAX_PAGEEXEC
4080+#ifdef CONFIG_PAX_DLRESOLVE
4081+static void pax_emuplt_close(struct vm_area_struct *vma)
4082+{
4083+ vma->vm_mm->call_dl_resolve = 0UL;
4084+}
4085+
4086+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4087+{
4088+ unsigned int *kaddr;
4089+
4090+ vmf->page = alloc_page(GFP_HIGHUSER);
4091+ if (!vmf->page)
4092+ return VM_FAULT_OOM;
4093+
4094+ kaddr = kmap(vmf->page);
4095+ memset(kaddr, 0, PAGE_SIZE);
4096+ kaddr[0] = 0x9DE3BFA8U; /* save */
4097+ flush_dcache_page(vmf->page);
4098+ kunmap(vmf->page);
4099+ return VM_FAULT_MAJOR;
4100+}
4101+
4102+static const struct vm_operations_struct pax_vm_ops = {
4103+ .close = pax_emuplt_close,
4104+ .fault = pax_emuplt_fault
4105+};
4106+
4107+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4108+{
4109+ int ret;
4110+
4111+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4112+ vma->vm_mm = current->mm;
4113+ vma->vm_start = addr;
4114+ vma->vm_end = addr + PAGE_SIZE;
4115+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4116+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4117+ vma->vm_ops = &pax_vm_ops;
4118+
4119+ ret = insert_vm_struct(current->mm, vma);
4120+ if (ret)
4121+ return ret;
4122+
4123+ ++current->mm->total_vm;
4124+ return 0;
4125+}
4126+#endif
4127+
4128+/*
4129+ * PaX: decide what to do with offenders (regs->pc = fault address)
4130+ *
4131+ * returns 1 when task should be killed
4132+ * 2 when patched PLT trampoline was detected
4133+ * 3 when unpatched PLT trampoline was detected
4134+ */
4135+static int pax_handle_fetch_fault(struct pt_regs *regs)
4136+{
4137+
4138+#ifdef CONFIG_PAX_EMUPLT
4139+ int err;
4140+
4141+ do { /* PaX: patched PLT emulation #1 */
4142+ unsigned int sethi1, sethi2, jmpl;
4143+
4144+ err = get_user(sethi1, (unsigned int *)regs->pc);
4145+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4146+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4147+
4148+ if (err)
4149+ break;
4150+
4151+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4152+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4153+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4154+ {
4155+ unsigned int addr;
4156+
4157+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4158+ addr = regs->u_regs[UREG_G1];
4159+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4160+ regs->pc = addr;
4161+ regs->npc = addr+4;
4162+ return 2;
4163+ }
4164+ } while (0);
4165+
4166+ { /* PaX: patched PLT emulation #2 */
4167+ unsigned int ba;
4168+
4169+ err = get_user(ba, (unsigned int *)regs->pc);
4170+
4171+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4172+ unsigned int addr;
4173+
4174+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4175+ regs->pc = addr;
4176+ regs->npc = addr+4;
4177+ return 2;
4178+ }
4179+ }
4180+
4181+ do { /* PaX: patched PLT emulation #3 */
4182+ unsigned int sethi, jmpl, nop;
4183+
4184+ err = get_user(sethi, (unsigned int *)regs->pc);
4185+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4186+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4187+
4188+ if (err)
4189+ break;
4190+
4191+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4192+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4193+ nop == 0x01000000U)
4194+ {
4195+ unsigned int addr;
4196+
4197+ addr = (sethi & 0x003FFFFFU) << 10;
4198+ regs->u_regs[UREG_G1] = addr;
4199+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4200+ regs->pc = addr;
4201+ regs->npc = addr+4;
4202+ return 2;
4203+ }
4204+ } while (0);
4205+
4206+ do { /* PaX: unpatched PLT emulation step 1 */
4207+ unsigned int sethi, ba, nop;
4208+
4209+ err = get_user(sethi, (unsigned int *)regs->pc);
4210+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4211+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4212+
4213+ if (err)
4214+ break;
4215+
4216+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4217+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4218+ nop == 0x01000000U)
4219+ {
4220+ unsigned int addr, save, call;
4221+
4222+ if ((ba & 0xFFC00000U) == 0x30800000U)
4223+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4224+ else
4225+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4226+
4227+ err = get_user(save, (unsigned int *)addr);
4228+ err |= get_user(call, (unsigned int *)(addr+4));
4229+ err |= get_user(nop, (unsigned int *)(addr+8));
4230+ if (err)
4231+ break;
4232+
4233+#ifdef CONFIG_PAX_DLRESOLVE
4234+ if (save == 0x9DE3BFA8U &&
4235+ (call & 0xC0000000U) == 0x40000000U &&
4236+ nop == 0x01000000U)
4237+ {
4238+ struct vm_area_struct *vma;
4239+ unsigned long call_dl_resolve;
4240+
4241+ down_read(&current->mm->mmap_sem);
4242+ call_dl_resolve = current->mm->call_dl_resolve;
4243+ up_read(&current->mm->mmap_sem);
4244+ if (likely(call_dl_resolve))
4245+ goto emulate;
4246+
4247+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4248+
4249+ down_write(&current->mm->mmap_sem);
4250+ if (current->mm->call_dl_resolve) {
4251+ call_dl_resolve = current->mm->call_dl_resolve;
4252+ up_write(&current->mm->mmap_sem);
4253+ if (vma)
4254+ kmem_cache_free(vm_area_cachep, vma);
4255+ goto emulate;
4256+ }
4257+
4258+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4259+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4260+ up_write(&current->mm->mmap_sem);
4261+ if (vma)
4262+ kmem_cache_free(vm_area_cachep, vma);
4263+ return 1;
4264+ }
4265+
4266+ if (pax_insert_vma(vma, call_dl_resolve)) {
4267+ up_write(&current->mm->mmap_sem);
4268+ kmem_cache_free(vm_area_cachep, vma);
4269+ return 1;
4270+ }
4271+
4272+ current->mm->call_dl_resolve = call_dl_resolve;
4273+ up_write(&current->mm->mmap_sem);
4274+
4275+emulate:
4276+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4277+ regs->pc = call_dl_resolve;
4278+ regs->npc = addr+4;
4279+ return 3;
4280+ }
4281+#endif
4282+
4283+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4284+ if ((save & 0xFFC00000U) == 0x05000000U &&
4285+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4286+ nop == 0x01000000U)
4287+ {
4288+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4289+ regs->u_regs[UREG_G2] = addr + 4;
4290+ addr = (save & 0x003FFFFFU) << 10;
4291+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4292+ regs->pc = addr;
4293+ regs->npc = addr+4;
4294+ return 3;
4295+ }
4296+ }
4297+ } while (0);
4298+
4299+ do { /* PaX: unpatched PLT emulation step 2 */
4300+ unsigned int save, call, nop;
4301+
4302+ err = get_user(save, (unsigned int *)(regs->pc-4));
4303+ err |= get_user(call, (unsigned int *)regs->pc);
4304+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
4305+ if (err)
4306+ break;
4307+
4308+ if (save == 0x9DE3BFA8U &&
4309+ (call & 0xC0000000U) == 0x40000000U &&
4310+ nop == 0x01000000U)
4311+ {
4312+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4313+
4314+ regs->u_regs[UREG_RETPC] = regs->pc;
4315+ regs->pc = dl_resolve;
4316+ regs->npc = dl_resolve+4;
4317+ return 3;
4318+ }
4319+ } while (0);
4320+#endif
4321+
4322+ return 1;
4323+}
4324+
4325+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4326+{
4327+ unsigned long i;
4328+
4329+ printk(KERN_ERR "PAX: bytes at PC: ");
4330+ for (i = 0; i < 8; i++) {
4331+ unsigned int c;
4332+ if (get_user(c, (unsigned int *)pc+i))
4333+ printk(KERN_CONT "???????? ");
4334+ else
4335+ printk(KERN_CONT "%08x ", c);
4336+ }
4337+ printk("\n");
4338+}
4339+#endif
4340+
4341 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4342 int text_fault)
4343 {
4344@@ -281,6 +546,24 @@ good_area:
4345 if(!(vma->vm_flags & VM_WRITE))
4346 goto bad_area;
4347 } else {
4348+
4349+#ifdef CONFIG_PAX_PAGEEXEC
4350+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4351+ up_read(&mm->mmap_sem);
4352+ switch (pax_handle_fetch_fault(regs)) {
4353+
4354+#ifdef CONFIG_PAX_EMUPLT
4355+ case 2:
4356+ case 3:
4357+ return;
4358+#endif
4359+
4360+ }
4361+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4362+ do_group_exit(SIGKILL);
4363+ }
4364+#endif
4365+
4366 /* Allow reads even for write-only mappings */
4367 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4368 goto bad_area;
4369diff -urNp linux-3.1.1/arch/sparc/mm/fault_64.c linux-3.1.1/arch/sparc/mm/fault_64.c
4370--- linux-3.1.1/arch/sparc/mm/fault_64.c 2011-11-11 15:19:27.000000000 -0500
4371+++ linux-3.1.1/arch/sparc/mm/fault_64.c 2011-11-16 18:40:08.000000000 -0500
4372@@ -21,6 +21,9 @@
4373 #include <linux/kprobes.h>
4374 #include <linux/kdebug.h>
4375 #include <linux/percpu.h>
4376+#include <linux/slab.h>
4377+#include <linux/pagemap.h>
4378+#include <linux/compiler.h>
4379
4380 #include <asm/page.h>
4381 #include <asm/pgtable.h>
4382@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4383 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4384 regs->tpc);
4385 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4386- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4387+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4388 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4389 dump_stack();
4390 unhandled_fault(regs->tpc, current, regs);
4391@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4392 show_regs(regs);
4393 }
4394
4395+#ifdef CONFIG_PAX_PAGEEXEC
4396+#ifdef CONFIG_PAX_DLRESOLVE
4397+static void pax_emuplt_close(struct vm_area_struct *vma)
4398+{
4399+ vma->vm_mm->call_dl_resolve = 0UL;
4400+}
4401+
4402+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4403+{
4404+ unsigned int *kaddr;
4405+
4406+ vmf->page = alloc_page(GFP_HIGHUSER);
4407+ if (!vmf->page)
4408+ return VM_FAULT_OOM;
4409+
4410+ kaddr = kmap(vmf->page);
4411+ memset(kaddr, 0, PAGE_SIZE);
4412+ kaddr[0] = 0x9DE3BFA8U; /* save */
4413+ flush_dcache_page(vmf->page);
4414+ kunmap(vmf->page);
4415+ return VM_FAULT_MAJOR;
4416+}
4417+
4418+static const struct vm_operations_struct pax_vm_ops = {
4419+ .close = pax_emuplt_close,
4420+ .fault = pax_emuplt_fault
4421+};
4422+
4423+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4424+{
4425+ int ret;
4426+
4427+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4428+ vma->vm_mm = current->mm;
4429+ vma->vm_start = addr;
4430+ vma->vm_end = addr + PAGE_SIZE;
4431+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4432+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4433+ vma->vm_ops = &pax_vm_ops;
4434+
4435+ ret = insert_vm_struct(current->mm, vma);
4436+ if (ret)
4437+ return ret;
4438+
4439+ ++current->mm->total_vm;
4440+ return 0;
4441+}
4442+#endif
4443+
4444+/*
4445+ * PaX: decide what to do with offenders (regs->tpc = fault address)
4446+ *
4447+ * returns 1 when task should be killed
4448+ * 2 when patched PLT trampoline was detected
4449+ * 3 when unpatched PLT trampoline was detected
4450+ */
4451+static int pax_handle_fetch_fault(struct pt_regs *regs)
4452+{
4453+
4454+#ifdef CONFIG_PAX_EMUPLT
4455+ int err;
4456+
4457+ do { /* PaX: patched PLT emulation #1 */
4458+ unsigned int sethi1, sethi2, jmpl;
4459+
4460+ err = get_user(sethi1, (unsigned int *)regs->tpc);
4461+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4462+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4463+
4464+ if (err)
4465+ break;
4466+
4467+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4468+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4469+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4470+ {
4471+ unsigned long addr;
4472+
4473+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4474+ addr = regs->u_regs[UREG_G1];
4475+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4476+
4477+ if (test_thread_flag(TIF_32BIT))
4478+ addr &= 0xFFFFFFFFUL;
4479+
4480+ regs->tpc = addr;
4481+ regs->tnpc = addr+4;
4482+ return 2;
4483+ }
4484+ } while (0);
4485+
4486+ { /* PaX: patched PLT emulation #2 */
4487+ unsigned int ba;
4488+
4489+ err = get_user(ba, (unsigned int *)regs->tpc);
4490+
4491+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4492+ unsigned long addr;
4493+
4494+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4495+
4496+ if (test_thread_flag(TIF_32BIT))
4497+ addr &= 0xFFFFFFFFUL;
4498+
4499+ regs->tpc = addr;
4500+ regs->tnpc = addr+4;
4501+ return 2;
4502+ }
4503+ }
4504+
4505+ do { /* PaX: patched PLT emulation #3 */
4506+ unsigned int sethi, jmpl, nop;
4507+
4508+ err = get_user(sethi, (unsigned int *)regs->tpc);
4509+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4510+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4511+
4512+ if (err)
4513+ break;
4514+
4515+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4516+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4517+ nop == 0x01000000U)
4518+ {
4519+ unsigned long addr;
4520+
4521+ addr = (sethi & 0x003FFFFFU) << 10;
4522+ regs->u_regs[UREG_G1] = addr;
4523+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4524+
4525+ if (test_thread_flag(TIF_32BIT))
4526+ addr &= 0xFFFFFFFFUL;
4527+
4528+ regs->tpc = addr;
4529+ regs->tnpc = addr+4;
4530+ return 2;
4531+ }
4532+ } while (0);
4533+
4534+ do { /* PaX: patched PLT emulation #4 */
4535+ unsigned int sethi, mov1, call, mov2;
4536+
4537+ err = get_user(sethi, (unsigned int *)regs->tpc);
4538+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4539+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
4540+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4541+
4542+ if (err)
4543+ break;
4544+
4545+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4546+ mov1 == 0x8210000FU &&
4547+ (call & 0xC0000000U) == 0x40000000U &&
4548+ mov2 == 0x9E100001U)
4549+ {
4550+ unsigned long addr;
4551+
4552+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4553+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4554+
4555+ if (test_thread_flag(TIF_32BIT))
4556+ addr &= 0xFFFFFFFFUL;
4557+
4558+ regs->tpc = addr;
4559+ regs->tnpc = addr+4;
4560+ return 2;
4561+ }
4562+ } while (0);
4563+
4564+ do { /* PaX: patched PLT emulation #5 */
4565+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4566+
4567+ err = get_user(sethi, (unsigned int *)regs->tpc);
4568+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4569+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4570+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4571+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4572+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4573+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4574+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4575+
4576+ if (err)
4577+ break;
4578+
4579+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4580+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4581+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4582+ (or1 & 0xFFFFE000U) == 0x82106000U &&
4583+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4584+ sllx == 0x83287020U &&
4585+ jmpl == 0x81C04005U &&
4586+ nop == 0x01000000U)
4587+ {
4588+ unsigned long addr;
4589+
4590+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4591+ regs->u_regs[UREG_G1] <<= 32;
4592+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4593+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4594+ regs->tpc = addr;
4595+ regs->tnpc = addr+4;
4596+ return 2;
4597+ }
4598+ } while (0);
4599+
4600+ do { /* PaX: patched PLT emulation #6 */
4601+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4602+
4603+ err = get_user(sethi, (unsigned int *)regs->tpc);
4604+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4605+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4606+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4607+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
4608+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4609+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4610+
4611+ if (err)
4612+ break;
4613+
4614+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4615+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4616+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4617+ sllx == 0x83287020U &&
4618+ (or & 0xFFFFE000U) == 0x8A116000U &&
4619+ jmpl == 0x81C04005U &&
4620+ nop == 0x01000000U)
4621+ {
4622+ unsigned long addr;
4623+
4624+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4625+ regs->u_regs[UREG_G1] <<= 32;
4626+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4627+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4628+ regs->tpc = addr;
4629+ regs->tnpc = addr+4;
4630+ return 2;
4631+ }
4632+ } while (0);
4633+
4634+ do { /* PaX: unpatched PLT emulation step 1 */
4635+ unsigned int sethi, ba, nop;
4636+
4637+ err = get_user(sethi, (unsigned int *)regs->tpc);
4638+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4639+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4640+
4641+ if (err)
4642+ break;
4643+
4644+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4645+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4646+ nop == 0x01000000U)
4647+ {
4648+ unsigned long addr;
4649+ unsigned int save, call;
4650+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4651+
4652+ if ((ba & 0xFFC00000U) == 0x30800000U)
4653+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4654+ else
4655+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4656+
4657+ if (test_thread_flag(TIF_32BIT))
4658+ addr &= 0xFFFFFFFFUL;
4659+
4660+ err = get_user(save, (unsigned int *)addr);
4661+ err |= get_user(call, (unsigned int *)(addr+4));
4662+ err |= get_user(nop, (unsigned int *)(addr+8));
4663+ if (err)
4664+ break;
4665+
4666+#ifdef CONFIG_PAX_DLRESOLVE
4667+ if (save == 0x9DE3BFA8U &&
4668+ (call & 0xC0000000U) == 0x40000000U &&
4669+ nop == 0x01000000U)
4670+ {
4671+ struct vm_area_struct *vma;
4672+ unsigned long call_dl_resolve;
4673+
4674+ down_read(&current->mm->mmap_sem);
4675+ call_dl_resolve = current->mm->call_dl_resolve;
4676+ up_read(&current->mm->mmap_sem);
4677+ if (likely(call_dl_resolve))
4678+ goto emulate;
4679+
4680+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4681+
4682+ down_write(&current->mm->mmap_sem);
4683+ if (current->mm->call_dl_resolve) {
4684+ call_dl_resolve = current->mm->call_dl_resolve;
4685+ up_write(&current->mm->mmap_sem);
4686+ if (vma)
4687+ kmem_cache_free(vm_area_cachep, vma);
4688+ goto emulate;
4689+ }
4690+
4691+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4692+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4693+ up_write(&current->mm->mmap_sem);
4694+ if (vma)
4695+ kmem_cache_free(vm_area_cachep, vma);
4696+ return 1;
4697+ }
4698+
4699+ if (pax_insert_vma(vma, call_dl_resolve)) {
4700+ up_write(&current->mm->mmap_sem);
4701+ kmem_cache_free(vm_area_cachep, vma);
4702+ return 1;
4703+ }
4704+
4705+ current->mm->call_dl_resolve = call_dl_resolve;
4706+ up_write(&current->mm->mmap_sem);
4707+
4708+emulate:
4709+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4710+ regs->tpc = call_dl_resolve;
4711+ regs->tnpc = addr+4;
4712+ return 3;
4713+ }
4714+#endif
4715+
4716+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4717+ if ((save & 0xFFC00000U) == 0x05000000U &&
4718+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4719+ nop == 0x01000000U)
4720+ {
4721+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4722+ regs->u_regs[UREG_G2] = addr + 4;
4723+ addr = (save & 0x003FFFFFU) << 10;
4724+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4725+
4726+ if (test_thread_flag(TIF_32BIT))
4727+ addr &= 0xFFFFFFFFUL;
4728+
4729+ regs->tpc = addr;
4730+ regs->tnpc = addr+4;
4731+ return 3;
4732+ }
4733+
4734+ /* PaX: 64-bit PLT stub */
4735+ err = get_user(sethi1, (unsigned int *)addr);
4736+ err |= get_user(sethi2, (unsigned int *)(addr+4));
4737+ err |= get_user(or1, (unsigned int *)(addr+8));
4738+ err |= get_user(or2, (unsigned int *)(addr+12));
4739+ err |= get_user(sllx, (unsigned int *)(addr+16));
4740+ err |= get_user(add, (unsigned int *)(addr+20));
4741+ err |= get_user(jmpl, (unsigned int *)(addr+24));
4742+ err |= get_user(nop, (unsigned int *)(addr+28));
4743+ if (err)
4744+ break;
4745+
4746+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4747+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4748+ (or1 & 0xFFFFE000U) == 0x88112000U &&
4749+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4750+ sllx == 0x89293020U &&
4751+ add == 0x8A010005U &&
4752+ jmpl == 0x89C14000U &&
4753+ nop == 0x01000000U)
4754+ {
4755+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4756+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4757+ regs->u_regs[UREG_G4] <<= 32;
4758+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4759+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4760+ regs->u_regs[UREG_G4] = addr + 24;
4761+ addr = regs->u_regs[UREG_G5];
4762+ regs->tpc = addr;
4763+ regs->tnpc = addr+4;
4764+ return 3;
4765+ }
4766+ }
4767+ } while (0);
4768+
4769+#ifdef CONFIG_PAX_DLRESOLVE
4770+ do { /* PaX: unpatched PLT emulation step 2 */
4771+ unsigned int save, call, nop;
4772+
4773+ err = get_user(save, (unsigned int *)(regs->tpc-4));
4774+ err |= get_user(call, (unsigned int *)regs->tpc);
4775+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4776+ if (err)
4777+ break;
4778+
4779+ if (save == 0x9DE3BFA8U &&
4780+ (call & 0xC0000000U) == 0x40000000U &&
4781+ nop == 0x01000000U)
4782+ {
4783+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4784+
4785+ if (test_thread_flag(TIF_32BIT))
4786+ dl_resolve &= 0xFFFFFFFFUL;
4787+
4788+ regs->u_regs[UREG_RETPC] = regs->tpc;
4789+ regs->tpc = dl_resolve;
4790+ regs->tnpc = dl_resolve+4;
4791+ return 3;
4792+ }
4793+ } while (0);
4794+#endif
4795+
4796+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4797+ unsigned int sethi, ba, nop;
4798+
4799+ err = get_user(sethi, (unsigned int *)regs->tpc);
4800+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4801+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4802+
4803+ if (err)
4804+ break;
4805+
4806+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4807+ (ba & 0xFFF00000U) == 0x30600000U &&
4808+ nop == 0x01000000U)
4809+ {
4810+ unsigned long addr;
4811+
4812+ addr = (sethi & 0x003FFFFFU) << 10;
4813+ regs->u_regs[UREG_G1] = addr;
4814+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4815+
4816+ if (test_thread_flag(TIF_32BIT))
4817+ addr &= 0xFFFFFFFFUL;
4818+
4819+ regs->tpc = addr;
4820+ regs->tnpc = addr+4;
4821+ return 2;
4822+ }
4823+ } while (0);
4824+
4825+#endif
4826+
4827+ return 1;
4828+}
4829+
4830+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4831+{
4832+ unsigned long i;
4833+
4834+ printk(KERN_ERR "PAX: bytes at PC: ");
4835+ for (i = 0; i < 8; i++) {
4836+ unsigned int c;
4837+ if (get_user(c, (unsigned int *)pc+i))
4838+ printk(KERN_CONT "???????? ");
4839+ else
4840+ printk(KERN_CONT "%08x ", c);
4841+ }
4842+ printk("\n");
4843+}
4844+#endif
4845+
4846 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4847 {
4848 struct mm_struct *mm = current->mm;
4849@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4850 if (!vma)
4851 goto bad_area;
4852
4853+#ifdef CONFIG_PAX_PAGEEXEC
4854+ /* PaX: detect ITLB misses on non-exec pages */
4855+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4856+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4857+ {
4858+ if (address != regs->tpc)
4859+ goto good_area;
4860+
4861+ up_read(&mm->mmap_sem);
4862+ switch (pax_handle_fetch_fault(regs)) {
4863+
4864+#ifdef CONFIG_PAX_EMUPLT
4865+ case 2:
4866+ case 3:
4867+ return;
4868+#endif
4869+
4870+ }
4871+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4872+ do_group_exit(SIGKILL);
4873+ }
4874+#endif
4875+
4876 /* Pure DTLB misses do not tell us whether the fault causing
4877 * load/store/atomic was a write or not, it only says that there
4878 * was no match. So in such a case we (carefully) read the
4879diff -urNp linux-3.1.1/arch/sparc/mm/hugetlbpage.c linux-3.1.1/arch/sparc/mm/hugetlbpage.c
4880--- linux-3.1.1/arch/sparc/mm/hugetlbpage.c 2011-11-11 15:19:27.000000000 -0500
4881+++ linux-3.1.1/arch/sparc/mm/hugetlbpage.c 2011-11-16 18:39:07.000000000 -0500
4882@@ -68,7 +68,7 @@ full_search:
4883 }
4884 return -ENOMEM;
4885 }
4886- if (likely(!vma || addr + len <= vma->vm_start)) {
4887+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4888 /*
4889 * Remember the place where we stopped the search:
4890 */
4891@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4892 /* make sure it can fit in the remaining address space */
4893 if (likely(addr > len)) {
4894 vma = find_vma(mm, addr-len);
4895- if (!vma || addr <= vma->vm_start) {
4896+ if (check_heap_stack_gap(vma, addr - len, len)) {
4897 /* remember the address as a hint for next time */
4898 return (mm->free_area_cache = addr-len);
4899 }
4900@@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4901 if (unlikely(mm->mmap_base < len))
4902 goto bottomup;
4903
4904- addr = (mm->mmap_base-len) & HPAGE_MASK;
4905+ addr = mm->mmap_base - len;
4906
4907 do {
4908+ addr &= HPAGE_MASK;
4909 /*
4910 * Lookup failure means no vma is above this address,
4911 * else if new region fits below vma->vm_start,
4912 * return with success:
4913 */
4914 vma = find_vma(mm, addr);
4915- if (likely(!vma || addr+len <= vma->vm_start)) {
4916+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4917 /* remember the address as a hint for next time */
4918 return (mm->free_area_cache = addr);
4919 }
4920@@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4921 mm->cached_hole_size = vma->vm_start - addr;
4922
4923 /* try just below the current vma->vm_start */
4924- addr = (vma->vm_start-len) & HPAGE_MASK;
4925- } while (likely(len < vma->vm_start));
4926+ addr = skip_heap_stack_gap(vma, len);
4927+ } while (!IS_ERR_VALUE(addr));
4928
4929 bottomup:
4930 /*
4931@@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4932 if (addr) {
4933 addr = ALIGN(addr, HPAGE_SIZE);
4934 vma = find_vma(mm, addr);
4935- if (task_size - len >= addr &&
4936- (!vma || addr + len <= vma->vm_start))
4937+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4938 return addr;
4939 }
4940 if (mm->get_unmapped_area == arch_get_unmapped_area)
4941diff -urNp linux-3.1.1/arch/sparc/mm/init_32.c linux-3.1.1/arch/sparc/mm/init_32.c
4942--- linux-3.1.1/arch/sparc/mm/init_32.c 2011-11-11 15:19:27.000000000 -0500
4943+++ linux-3.1.1/arch/sparc/mm/init_32.c 2011-11-16 18:39:07.000000000 -0500
4944@@ -316,6 +316,9 @@ extern void device_scan(void);
4945 pgprot_t PAGE_SHARED __read_mostly;
4946 EXPORT_SYMBOL(PAGE_SHARED);
4947
4948+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
4949+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
4950+
4951 void __init paging_init(void)
4952 {
4953 switch(sparc_cpu_model) {
4954@@ -344,17 +347,17 @@ void __init paging_init(void)
4955
4956 /* Initialize the protection map with non-constant, MMU dependent values. */
4957 protection_map[0] = PAGE_NONE;
4958- protection_map[1] = PAGE_READONLY;
4959- protection_map[2] = PAGE_COPY;
4960- protection_map[3] = PAGE_COPY;
4961+ protection_map[1] = PAGE_READONLY_NOEXEC;
4962+ protection_map[2] = PAGE_COPY_NOEXEC;
4963+ protection_map[3] = PAGE_COPY_NOEXEC;
4964 protection_map[4] = PAGE_READONLY;
4965 protection_map[5] = PAGE_READONLY;
4966 protection_map[6] = PAGE_COPY;
4967 protection_map[7] = PAGE_COPY;
4968 protection_map[8] = PAGE_NONE;
4969- protection_map[9] = PAGE_READONLY;
4970- protection_map[10] = PAGE_SHARED;
4971- protection_map[11] = PAGE_SHARED;
4972+ protection_map[9] = PAGE_READONLY_NOEXEC;
4973+ protection_map[10] = PAGE_SHARED_NOEXEC;
4974+ protection_map[11] = PAGE_SHARED_NOEXEC;
4975 protection_map[12] = PAGE_READONLY;
4976 protection_map[13] = PAGE_READONLY;
4977 protection_map[14] = PAGE_SHARED;
4978diff -urNp linux-3.1.1/arch/sparc/mm/Makefile linux-3.1.1/arch/sparc/mm/Makefile
4979--- linux-3.1.1/arch/sparc/mm/Makefile 2011-11-11 15:19:27.000000000 -0500
4980+++ linux-3.1.1/arch/sparc/mm/Makefile 2011-11-16 18:39:07.000000000 -0500
4981@@ -2,7 +2,7 @@
4982 #
4983
4984 asflags-y := -ansi
4985-ccflags-y := -Werror
4986+#ccflags-y := -Werror
4987
4988 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
4989 obj-y += fault_$(BITS).o
4990diff -urNp linux-3.1.1/arch/sparc/mm/srmmu.c linux-3.1.1/arch/sparc/mm/srmmu.c
4991--- linux-3.1.1/arch/sparc/mm/srmmu.c 2011-11-11 15:19:27.000000000 -0500
4992+++ linux-3.1.1/arch/sparc/mm/srmmu.c 2011-11-16 18:39:07.000000000 -0500
4993@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
4994 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
4995 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
4996 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
4997+
4998+#ifdef CONFIG_PAX_PAGEEXEC
4999+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5000+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5001+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5002+#endif
5003+
5004 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5005 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5006
5007diff -urNp linux-3.1.1/arch/um/include/asm/kmap_types.h linux-3.1.1/arch/um/include/asm/kmap_types.h
5008--- linux-3.1.1/arch/um/include/asm/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
5009+++ linux-3.1.1/arch/um/include/asm/kmap_types.h 2011-11-16 18:39:07.000000000 -0500
5010@@ -23,6 +23,7 @@ enum km_type {
5011 KM_IRQ1,
5012 KM_SOFTIRQ0,
5013 KM_SOFTIRQ1,
5014+ KM_CLEARPAGE,
5015 KM_TYPE_NR
5016 };
5017
5018diff -urNp linux-3.1.1/arch/um/include/asm/page.h linux-3.1.1/arch/um/include/asm/page.h
5019--- linux-3.1.1/arch/um/include/asm/page.h 2011-11-11 15:19:27.000000000 -0500
5020+++ linux-3.1.1/arch/um/include/asm/page.h 2011-11-16 18:39:07.000000000 -0500
5021@@ -14,6 +14,9 @@
5022 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5023 #define PAGE_MASK (~(PAGE_SIZE-1))
5024
5025+#define ktla_ktva(addr) (addr)
5026+#define ktva_ktla(addr) (addr)
5027+
5028 #ifndef __ASSEMBLY__
5029
5030 struct page;
5031diff -urNp linux-3.1.1/arch/um/kernel/process.c linux-3.1.1/arch/um/kernel/process.c
5032--- linux-3.1.1/arch/um/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
5033+++ linux-3.1.1/arch/um/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
5034@@ -404,22 +404,6 @@ int singlestepping(void * t)
5035 return 2;
5036 }
5037
5038-/*
5039- * Only x86 and x86_64 have an arch_align_stack().
5040- * All other arches have "#define arch_align_stack(x) (x)"
5041- * in their asm/system.h
5042- * As this is included in UML from asm-um/system-generic.h,
5043- * we can use it to behave as the subarch does.
5044- */
5045-#ifndef arch_align_stack
5046-unsigned long arch_align_stack(unsigned long sp)
5047-{
5048- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5049- sp -= get_random_int() % 8192;
5050- return sp & ~0xf;
5051-}
5052-#endif
5053-
5054 unsigned long get_wchan(struct task_struct *p)
5055 {
5056 unsigned long stack_page, sp, ip;
5057diff -urNp linux-3.1.1/arch/um/Makefile linux-3.1.1/arch/um/Makefile
5058--- linux-3.1.1/arch/um/Makefile 2011-11-11 15:19:27.000000000 -0500
5059+++ linux-3.1.1/arch/um/Makefile 2011-11-16 18:39:07.000000000 -0500
5060@@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINE
5061 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
5062 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
5063
5064+ifdef CONSTIFY_PLUGIN
5065+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5066+endif
5067+
5068 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
5069
5070 #This will adjust *FLAGS accordingly to the platform.
5071diff -urNp linux-3.1.1/arch/um/sys-i386/shared/sysdep/system.h linux-3.1.1/arch/um/sys-i386/shared/sysdep/system.h
5072--- linux-3.1.1/arch/um/sys-i386/shared/sysdep/system.h 2011-11-11 15:19:27.000000000 -0500
5073+++ linux-3.1.1/arch/um/sys-i386/shared/sysdep/system.h 2011-11-16 18:39:07.000000000 -0500
5074@@ -17,7 +17,7 @@
5075 # define AT_VECTOR_SIZE_ARCH 1
5076 #endif
5077
5078-extern unsigned long arch_align_stack(unsigned long sp);
5079+#define arch_align_stack(x) ((x) & ~0xfUL)
5080
5081 void default_idle(void);
5082
5083diff -urNp linux-3.1.1/arch/um/sys-i386/syscalls.c linux-3.1.1/arch/um/sys-i386/syscalls.c
5084--- linux-3.1.1/arch/um/sys-i386/syscalls.c 2011-11-11 15:19:27.000000000 -0500
5085+++ linux-3.1.1/arch/um/sys-i386/syscalls.c 2011-11-16 18:39:07.000000000 -0500
5086@@ -11,6 +11,21 @@
5087 #include "asm/uaccess.h"
5088 #include "asm/unistd.h"
5089
5090+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5091+{
5092+ unsigned long pax_task_size = TASK_SIZE;
5093+
5094+#ifdef CONFIG_PAX_SEGMEXEC
5095+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5096+ pax_task_size = SEGMEXEC_TASK_SIZE;
5097+#endif
5098+
5099+ if (len > pax_task_size || addr > pax_task_size - len)
5100+ return -EINVAL;
5101+
5102+ return 0;
5103+}
5104+
5105 /*
5106 * The prototype on i386 is:
5107 *
5108diff -urNp linux-3.1.1/arch/um/sys-x86_64/shared/sysdep/system.h linux-3.1.1/arch/um/sys-x86_64/shared/sysdep/system.h
5109--- linux-3.1.1/arch/um/sys-x86_64/shared/sysdep/system.h 2011-11-11 15:19:27.000000000 -0500
5110+++ linux-3.1.1/arch/um/sys-x86_64/shared/sysdep/system.h 2011-11-16 18:39:07.000000000 -0500
5111@@ -17,7 +17,7 @@
5112 # define AT_VECTOR_SIZE_ARCH 1
5113 #endif
5114
5115-extern unsigned long arch_align_stack(unsigned long sp);
5116+#define arch_align_stack(x) ((x) & ~0xfUL)
5117
5118 void default_idle(void);
5119
5120diff -urNp linux-3.1.1/arch/x86/boot/bitops.h linux-3.1.1/arch/x86/boot/bitops.h
5121--- linux-3.1.1/arch/x86/boot/bitops.h 2011-11-11 15:19:27.000000000 -0500
5122+++ linux-3.1.1/arch/x86/boot/bitops.h 2011-11-16 18:39:07.000000000 -0500
5123@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5124 u8 v;
5125 const u32 *p = (const u32 *)addr;
5126
5127- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5128+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5129 return v;
5130 }
5131
5132@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5133
5134 static inline void set_bit(int nr, void *addr)
5135 {
5136- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5137+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5138 }
5139
5140 #endif /* BOOT_BITOPS_H */
5141diff -urNp linux-3.1.1/arch/x86/boot/boot.h linux-3.1.1/arch/x86/boot/boot.h
5142--- linux-3.1.1/arch/x86/boot/boot.h 2011-11-11 15:19:27.000000000 -0500
5143+++ linux-3.1.1/arch/x86/boot/boot.h 2011-11-16 18:39:07.000000000 -0500
5144@@ -85,7 +85,7 @@ static inline void io_delay(void)
5145 static inline u16 ds(void)
5146 {
5147 u16 seg;
5148- asm("movw %%ds,%0" : "=rm" (seg));
5149+ asm volatile("movw %%ds,%0" : "=rm" (seg));
5150 return seg;
5151 }
5152
5153@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5154 static inline int memcmp(const void *s1, const void *s2, size_t len)
5155 {
5156 u8 diff;
5157- asm("repe; cmpsb; setnz %0"
5158+ asm volatile("repe; cmpsb; setnz %0"
5159 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5160 return diff;
5161 }
5162diff -urNp linux-3.1.1/arch/x86/boot/compressed/head_32.S linux-3.1.1/arch/x86/boot/compressed/head_32.S
5163--- linux-3.1.1/arch/x86/boot/compressed/head_32.S 2011-11-11 15:19:27.000000000 -0500
5164+++ linux-3.1.1/arch/x86/boot/compressed/head_32.S 2011-11-16 18:39:07.000000000 -0500
5165@@ -76,7 +76,7 @@ ENTRY(startup_32)
5166 notl %eax
5167 andl %eax, %ebx
5168 #else
5169- movl $LOAD_PHYSICAL_ADDR, %ebx
5170+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5171 #endif
5172
5173 /* Target address to relocate to for decompression */
5174@@ -162,7 +162,7 @@ relocated:
5175 * and where it was actually loaded.
5176 */
5177 movl %ebp, %ebx
5178- subl $LOAD_PHYSICAL_ADDR, %ebx
5179+ subl $____LOAD_PHYSICAL_ADDR, %ebx
5180 jz 2f /* Nothing to be done if loaded at compiled addr. */
5181 /*
5182 * Process relocations.
5183@@ -170,8 +170,7 @@ relocated:
5184
5185 1: subl $4, %edi
5186 movl (%edi), %ecx
5187- testl %ecx, %ecx
5188- jz 2f
5189+ jecxz 2f
5190 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5191 jmp 1b
5192 2:
5193diff -urNp linux-3.1.1/arch/x86/boot/compressed/head_64.S linux-3.1.1/arch/x86/boot/compressed/head_64.S
5194--- linux-3.1.1/arch/x86/boot/compressed/head_64.S 2011-11-11 15:19:27.000000000 -0500
5195+++ linux-3.1.1/arch/x86/boot/compressed/head_64.S 2011-11-16 18:39:07.000000000 -0500
5196@@ -91,7 +91,7 @@ ENTRY(startup_32)
5197 notl %eax
5198 andl %eax, %ebx
5199 #else
5200- movl $LOAD_PHYSICAL_ADDR, %ebx
5201+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5202 #endif
5203
5204 /* Target address to relocate to for decompression */
5205@@ -233,7 +233,7 @@ ENTRY(startup_64)
5206 notq %rax
5207 andq %rax, %rbp
5208 #else
5209- movq $LOAD_PHYSICAL_ADDR, %rbp
5210+ movq $____LOAD_PHYSICAL_ADDR, %rbp
5211 #endif
5212
5213 /* Target address to relocate to for decompression */
5214diff -urNp linux-3.1.1/arch/x86/boot/compressed/Makefile linux-3.1.1/arch/x86/boot/compressed/Makefile
5215--- linux-3.1.1/arch/x86/boot/compressed/Makefile 2011-11-11 15:19:27.000000000 -0500
5216+++ linux-3.1.1/arch/x86/boot/compressed/Makefile 2011-11-16 18:39:07.000000000 -0500
5217@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5218 KBUILD_CFLAGS += $(cflags-y)
5219 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5220 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5221+ifdef CONSTIFY_PLUGIN
5222+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5223+endif
5224
5225 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5226 GCOV_PROFILE := n
5227diff -urNp linux-3.1.1/arch/x86/boot/compressed/misc.c linux-3.1.1/arch/x86/boot/compressed/misc.c
5228--- linux-3.1.1/arch/x86/boot/compressed/misc.c 2011-11-11 15:19:27.000000000 -0500
5229+++ linux-3.1.1/arch/x86/boot/compressed/misc.c 2011-11-16 18:39:07.000000000 -0500
5230@@ -310,7 +310,7 @@ static void parse_elf(void *output)
5231 case PT_LOAD:
5232 #ifdef CONFIG_RELOCATABLE
5233 dest = output;
5234- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5235+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5236 #else
5237 dest = (void *)(phdr->p_paddr);
5238 #endif
5239@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5240 error("Destination address too large");
5241 #endif
5242 #ifndef CONFIG_RELOCATABLE
5243- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5244+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5245 error("Wrong destination address");
5246 #endif
5247
5248diff -urNp linux-3.1.1/arch/x86/boot/compressed/relocs.c linux-3.1.1/arch/x86/boot/compressed/relocs.c
5249--- linux-3.1.1/arch/x86/boot/compressed/relocs.c 2011-11-11 15:19:27.000000000 -0500
5250+++ linux-3.1.1/arch/x86/boot/compressed/relocs.c 2011-11-16 18:39:07.000000000 -0500
5251@@ -13,8 +13,11 @@
5252
5253 static void die(char *fmt, ...);
5254
5255+#include "../../../../include/generated/autoconf.h"
5256+
5257 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5258 static Elf32_Ehdr ehdr;
5259+static Elf32_Phdr *phdr;
5260 static unsigned long reloc_count, reloc_idx;
5261 static unsigned long *relocs;
5262
5263@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5264 }
5265 }
5266
5267+static void read_phdrs(FILE *fp)
5268+{
5269+ unsigned int i;
5270+
5271+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5272+ if (!phdr) {
5273+ die("Unable to allocate %d program headers\n",
5274+ ehdr.e_phnum);
5275+ }
5276+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5277+ die("Seek to %d failed: %s\n",
5278+ ehdr.e_phoff, strerror(errno));
5279+ }
5280+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5281+ die("Cannot read ELF program headers: %s\n",
5282+ strerror(errno));
5283+ }
5284+ for(i = 0; i < ehdr.e_phnum; i++) {
5285+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5286+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5287+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5288+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5289+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5290+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5291+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5292+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5293+ }
5294+
5295+}
5296+
5297 static void read_shdrs(FILE *fp)
5298 {
5299- int i;
5300+ unsigned int i;
5301 Elf32_Shdr shdr;
5302
5303 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5304@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5305
5306 static void read_strtabs(FILE *fp)
5307 {
5308- int i;
5309+ unsigned int i;
5310 for (i = 0; i < ehdr.e_shnum; i++) {
5311 struct section *sec = &secs[i];
5312 if (sec->shdr.sh_type != SHT_STRTAB) {
5313@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5314
5315 static void read_symtabs(FILE *fp)
5316 {
5317- int i,j;
5318+ unsigned int i,j;
5319 for (i = 0; i < ehdr.e_shnum; i++) {
5320 struct section *sec = &secs[i];
5321 if (sec->shdr.sh_type != SHT_SYMTAB) {
5322@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5323
5324 static void read_relocs(FILE *fp)
5325 {
5326- int i,j;
5327+ unsigned int i,j;
5328+ uint32_t base;
5329+
5330 for (i = 0; i < ehdr.e_shnum; i++) {
5331 struct section *sec = &secs[i];
5332 if (sec->shdr.sh_type != SHT_REL) {
5333@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5334 die("Cannot read symbol table: %s\n",
5335 strerror(errno));
5336 }
5337+ base = 0;
5338+ for (j = 0; j < ehdr.e_phnum; j++) {
5339+ if (phdr[j].p_type != PT_LOAD )
5340+ continue;
5341+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5342+ continue;
5343+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5344+ break;
5345+ }
5346 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5347 Elf32_Rel *rel = &sec->reltab[j];
5348- rel->r_offset = elf32_to_cpu(rel->r_offset);
5349+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5350 rel->r_info = elf32_to_cpu(rel->r_info);
5351 }
5352 }
5353@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5354
5355 static void print_absolute_symbols(void)
5356 {
5357- int i;
5358+ unsigned int i;
5359 printf("Absolute symbols\n");
5360 printf(" Num: Value Size Type Bind Visibility Name\n");
5361 for (i = 0; i < ehdr.e_shnum; i++) {
5362 struct section *sec = &secs[i];
5363 char *sym_strtab;
5364 Elf32_Sym *sh_symtab;
5365- int j;
5366+ unsigned int j;
5367
5368 if (sec->shdr.sh_type != SHT_SYMTAB) {
5369 continue;
5370@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5371
5372 static void print_absolute_relocs(void)
5373 {
5374- int i, printed = 0;
5375+ unsigned int i, printed = 0;
5376
5377 for (i = 0; i < ehdr.e_shnum; i++) {
5378 struct section *sec = &secs[i];
5379 struct section *sec_applies, *sec_symtab;
5380 char *sym_strtab;
5381 Elf32_Sym *sh_symtab;
5382- int j;
5383+ unsigned int j;
5384 if (sec->shdr.sh_type != SHT_REL) {
5385 continue;
5386 }
5387@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5388
5389 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5390 {
5391- int i;
5392+ unsigned int i;
5393 /* Walk through the relocations */
5394 for (i = 0; i < ehdr.e_shnum; i++) {
5395 char *sym_strtab;
5396 Elf32_Sym *sh_symtab;
5397 struct section *sec_applies, *sec_symtab;
5398- int j;
5399+ unsigned int j;
5400 struct section *sec = &secs[i];
5401
5402 if (sec->shdr.sh_type != SHT_REL) {
5403@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5404 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5405 continue;
5406 }
5407+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5408+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5409+ continue;
5410+
5411+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5412+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5413+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5414+ continue;
5415+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5416+ continue;
5417+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5418+ continue;
5419+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5420+ continue;
5421+#endif
5422+
5423 switch (r_type) {
5424 case R_386_NONE:
5425 case R_386_PC32:
5426@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5427
5428 static void emit_relocs(int as_text)
5429 {
5430- int i;
5431+ unsigned int i;
5432 /* Count how many relocations I have and allocate space for them. */
5433 reloc_count = 0;
5434 walk_relocs(count_reloc);
5435@@ -665,6 +725,7 @@ int main(int argc, char **argv)
5436 fname, strerror(errno));
5437 }
5438 read_ehdr(fp);
5439+ read_phdrs(fp);
5440 read_shdrs(fp);
5441 read_strtabs(fp);
5442 read_symtabs(fp);
5443diff -urNp linux-3.1.1/arch/x86/boot/cpucheck.c linux-3.1.1/arch/x86/boot/cpucheck.c
5444--- linux-3.1.1/arch/x86/boot/cpucheck.c 2011-11-11 15:19:27.000000000 -0500
5445+++ linux-3.1.1/arch/x86/boot/cpucheck.c 2011-11-16 18:39:07.000000000 -0500
5446@@ -74,7 +74,7 @@ static int has_fpu(void)
5447 u16 fcw = -1, fsw = -1;
5448 u32 cr0;
5449
5450- asm("movl %%cr0,%0" : "=r" (cr0));
5451+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
5452 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5453 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5454 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5455@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5456 {
5457 u32 f0, f1;
5458
5459- asm("pushfl ; "
5460+ asm volatile("pushfl ; "
5461 "pushfl ; "
5462 "popl %0 ; "
5463 "movl %0,%1 ; "
5464@@ -115,7 +115,7 @@ static void get_flags(void)
5465 set_bit(X86_FEATURE_FPU, cpu.flags);
5466
5467 if (has_eflag(X86_EFLAGS_ID)) {
5468- asm("cpuid"
5469+ asm volatile("cpuid"
5470 : "=a" (max_intel_level),
5471 "=b" (cpu_vendor[0]),
5472 "=d" (cpu_vendor[1]),
5473@@ -124,7 +124,7 @@ static void get_flags(void)
5474
5475 if (max_intel_level >= 0x00000001 &&
5476 max_intel_level <= 0x0000ffff) {
5477- asm("cpuid"
5478+ asm volatile("cpuid"
5479 : "=a" (tfms),
5480 "=c" (cpu.flags[4]),
5481 "=d" (cpu.flags[0])
5482@@ -136,7 +136,7 @@ static void get_flags(void)
5483 cpu.model += ((tfms >> 16) & 0xf) << 4;
5484 }
5485
5486- asm("cpuid"
5487+ asm volatile("cpuid"
5488 : "=a" (max_amd_level)
5489 : "a" (0x80000000)
5490 : "ebx", "ecx", "edx");
5491@@ -144,7 +144,7 @@ static void get_flags(void)
5492 if (max_amd_level >= 0x80000001 &&
5493 max_amd_level <= 0x8000ffff) {
5494 u32 eax = 0x80000001;
5495- asm("cpuid"
5496+ asm volatile("cpuid"
5497 : "+a" (eax),
5498 "=c" (cpu.flags[6]),
5499 "=d" (cpu.flags[1])
5500@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5501 u32 ecx = MSR_K7_HWCR;
5502 u32 eax, edx;
5503
5504- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5505+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5506 eax &= ~(1 << 15);
5507- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5508+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5509
5510 get_flags(); /* Make sure it really did something */
5511 err = check_flags();
5512@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5513 u32 ecx = MSR_VIA_FCR;
5514 u32 eax, edx;
5515
5516- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5517+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5518 eax |= (1<<1)|(1<<7);
5519- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5520+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5521
5522 set_bit(X86_FEATURE_CX8, cpu.flags);
5523 err = check_flags();
5524@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5525 u32 eax, edx;
5526 u32 level = 1;
5527
5528- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5529- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5530- asm("cpuid"
5531+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5532+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5533+ asm volatile("cpuid"
5534 : "+a" (level), "=d" (cpu.flags[0])
5535 : : "ecx", "ebx");
5536- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5537+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5538
5539 err = check_flags();
5540 }
5541diff -urNp linux-3.1.1/arch/x86/boot/header.S linux-3.1.1/arch/x86/boot/header.S
5542--- linux-3.1.1/arch/x86/boot/header.S 2011-11-11 15:19:27.000000000 -0500
5543+++ linux-3.1.1/arch/x86/boot/header.S 2011-11-16 18:39:07.000000000 -0500
5544@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5545 # single linked list of
5546 # struct setup_data
5547
5548-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5549+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5550
5551 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5552 #define VO_INIT_SIZE (VO__end - VO__text)
5553diff -urNp linux-3.1.1/arch/x86/boot/Makefile linux-3.1.1/arch/x86/boot/Makefile
5554--- linux-3.1.1/arch/x86/boot/Makefile 2011-11-11 15:19:27.000000000 -0500
5555+++ linux-3.1.1/arch/x86/boot/Makefile 2011-11-16 18:39:07.000000000 -0500
5556@@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5557 $(call cc-option, -fno-stack-protector) \
5558 $(call cc-option, -mpreferred-stack-boundary=2)
5559 KBUILD_CFLAGS += $(call cc-option, -m32)
5560+ifdef CONSTIFY_PLUGIN
5561+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5562+endif
5563 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5564 GCOV_PROFILE := n
5565
5566diff -urNp linux-3.1.1/arch/x86/boot/memory.c linux-3.1.1/arch/x86/boot/memory.c
5567--- linux-3.1.1/arch/x86/boot/memory.c 2011-11-11 15:19:27.000000000 -0500
5568+++ linux-3.1.1/arch/x86/boot/memory.c 2011-11-16 18:39:07.000000000 -0500
5569@@ -19,7 +19,7 @@
5570
5571 static int detect_memory_e820(void)
5572 {
5573- int count = 0;
5574+ unsigned int count = 0;
5575 struct biosregs ireg, oreg;
5576 struct e820entry *desc = boot_params.e820_map;
5577 static struct e820entry buf; /* static so it is zeroed */
5578diff -urNp linux-3.1.1/arch/x86/boot/video.c linux-3.1.1/arch/x86/boot/video.c
5579--- linux-3.1.1/arch/x86/boot/video.c 2011-11-11 15:19:27.000000000 -0500
5580+++ linux-3.1.1/arch/x86/boot/video.c 2011-11-16 18:39:07.000000000 -0500
5581@@ -96,7 +96,7 @@ static void store_mode_params(void)
5582 static unsigned int get_entry(void)
5583 {
5584 char entry_buf[4];
5585- int i, len = 0;
5586+ unsigned int i, len = 0;
5587 int key;
5588 unsigned int v;
5589
5590diff -urNp linux-3.1.1/arch/x86/boot/video-vesa.c linux-3.1.1/arch/x86/boot/video-vesa.c
5591--- linux-3.1.1/arch/x86/boot/video-vesa.c 2011-11-11 15:19:27.000000000 -0500
5592+++ linux-3.1.1/arch/x86/boot/video-vesa.c 2011-11-16 18:39:07.000000000 -0500
5593@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5594
5595 boot_params.screen_info.vesapm_seg = oreg.es;
5596 boot_params.screen_info.vesapm_off = oreg.di;
5597+ boot_params.screen_info.vesapm_size = oreg.cx;
5598 }
5599
5600 /*
5601diff -urNp linux-3.1.1/arch/x86/crypto/aes-x86_64-asm_64.S linux-3.1.1/arch/x86/crypto/aes-x86_64-asm_64.S
5602--- linux-3.1.1/arch/x86/crypto/aes-x86_64-asm_64.S 2011-11-11 15:19:27.000000000 -0500
5603+++ linux-3.1.1/arch/x86/crypto/aes-x86_64-asm_64.S 2011-11-16 18:39:07.000000000 -0500
5604@@ -8,6 +8,8 @@
5605 * including this sentence is retained in full.
5606 */
5607
5608+#include <asm/alternative-asm.h>
5609+
5610 .extern crypto_ft_tab
5611 .extern crypto_it_tab
5612 .extern crypto_fl_tab
5613@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
5614 je B192; \
5615 leaq 32(r9),r9;
5616
5617+#define ret pax_force_retaddr; ret
5618+
5619 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
5620 movq r1,r2; \
5621 movq r3,r4; \
5622diff -urNp linux-3.1.1/arch/x86/crypto/salsa20-x86_64-asm_64.S linux-3.1.1/arch/x86/crypto/salsa20-x86_64-asm_64.S
5623--- linux-3.1.1/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-11-11 15:19:27.000000000 -0500
5624+++ linux-3.1.1/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-11-16 18:39:07.000000000 -0500
5625@@ -1,3 +1,5 @@
5626+#include <asm/alternative-asm.h>
5627+
5628 # enter ECRYPT_encrypt_bytes
5629 .text
5630 .p2align 5
5631@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
5632 add %r11,%rsp
5633 mov %rdi,%rax
5634 mov %rsi,%rdx
5635+ pax_force_retaddr
5636 ret
5637 # bytesatleast65:
5638 ._bytesatleast65:
5639@@ -891,6 +894,7 @@ ECRYPT_keysetup:
5640 add %r11,%rsp
5641 mov %rdi,%rax
5642 mov %rsi,%rdx
5643+ pax_force_retaddr
5644 ret
5645 # enter ECRYPT_ivsetup
5646 .text
5647@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
5648 add %r11,%rsp
5649 mov %rdi,%rax
5650 mov %rsi,%rdx
5651+ pax_force_retaddr
5652 ret
5653diff -urNp linux-3.1.1/arch/x86/crypto/twofish-x86_64-asm_64.S linux-3.1.1/arch/x86/crypto/twofish-x86_64-asm_64.S
5654--- linux-3.1.1/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-11-11 15:19:27.000000000 -0500
5655+++ linux-3.1.1/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-11-16 18:39:07.000000000 -0500
5656@@ -21,6 +21,7 @@
5657 .text
5658
5659 #include <asm/asm-offsets.h>
5660+#include <asm/alternative-asm.h>
5661
5662 #define a_offset 0
5663 #define b_offset 4
5664@@ -269,6 +270,7 @@ twofish_enc_blk:
5665
5666 popq R1
5667 movq $1,%rax
5668+ pax_force_retaddr
5669 ret
5670
5671 twofish_dec_blk:
5672@@ -321,4 +323,5 @@ twofish_dec_blk:
5673
5674 popq R1
5675 movq $1,%rax
5676+ pax_force_retaddr
5677 ret
5678diff -urNp linux-3.1.1/arch/x86/ia32/ia32_aout.c linux-3.1.1/arch/x86/ia32/ia32_aout.c
5679--- linux-3.1.1/arch/x86/ia32/ia32_aout.c 2011-11-11 15:19:27.000000000 -0500
5680+++ linux-3.1.1/arch/x86/ia32/ia32_aout.c 2011-11-16 18:40:08.000000000 -0500
5681@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5682 unsigned long dump_start, dump_size;
5683 struct user32 dump;
5684
5685+ memset(&dump, 0, sizeof(dump));
5686+
5687 fs = get_fs();
5688 set_fs(KERNEL_DS);
5689 has_dumped = 1;
5690diff -urNp linux-3.1.1/arch/x86/ia32/ia32entry.S linux-3.1.1/arch/x86/ia32/ia32entry.S
5691--- linux-3.1.1/arch/x86/ia32/ia32entry.S 2011-11-11 15:19:27.000000000 -0500
5692+++ linux-3.1.1/arch/x86/ia32/ia32entry.S 2011-11-17 18:27:57.000000000 -0500
5693@@ -13,7 +13,9 @@
5694 #include <asm/thread_info.h>
5695 #include <asm/segment.h>
5696 #include <asm/irqflags.h>
5697+#include <asm/pgtable.h>
5698 #include <linux/linkage.h>
5699+#include <asm/alternative-asm.h>
5700
5701 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5702 #include <linux/elf-em.h>
5703@@ -95,6 +97,29 @@ ENTRY(native_irq_enable_sysexit)
5704 ENDPROC(native_irq_enable_sysexit)
5705 #endif
5706
5707+ .macro pax_enter_kernel_user
5708+#ifdef CONFIG_PAX_MEMORY_UDEREF
5709+ call pax_enter_kernel_user
5710+#endif
5711+ .endm
5712+
5713+ .macro pax_exit_kernel_user
5714+#ifdef CONFIG_PAX_MEMORY_UDEREF
5715+ call pax_exit_kernel_user
5716+#endif
5717+#ifdef CONFIG_PAX_RANDKSTACK
5718+ pushq %rax
5719+ call pax_randomize_kstack
5720+ popq %rax
5721+#endif
5722+ .endm
5723+
5724+.macro pax_erase_kstack
5725+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5726+ call pax_erase_kstack
5727+#endif
5728+.endm
5729+
5730 /*
5731 * 32bit SYSENTER instruction entry.
5732 *
5733@@ -121,7 +146,7 @@ ENTRY(ia32_sysenter_target)
5734 CFI_REGISTER rsp,rbp
5735 SWAPGS_UNSAFE_STACK
5736 movq PER_CPU_VAR(kernel_stack), %rsp
5737- addq $(KERNEL_STACK_OFFSET),%rsp
5738+ pax_enter_kernel_user
5739 /*
5740 * No need to follow this irqs on/off section: the syscall
5741 * disabled irqs, here we enable it straight after entry:
5742@@ -134,7 +159,8 @@ ENTRY(ia32_sysenter_target)
5743 CFI_REL_OFFSET rsp,0
5744 pushfq_cfi
5745 /*CFI_REL_OFFSET rflags,0*/
5746- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5747+ GET_THREAD_INFO(%r10)
5748+ movl TI_sysenter_return(%r10), %r10d
5749 CFI_REGISTER rip,r10
5750 pushq_cfi $__USER32_CS
5751 /*CFI_REL_OFFSET cs,0*/
5752@@ -146,6 +172,12 @@ ENTRY(ia32_sysenter_target)
5753 SAVE_ARGS 0,1,0
5754 /* no need to do an access_ok check here because rbp has been
5755 32bit zero extended */
5756+
5757+#ifdef CONFIG_PAX_MEMORY_UDEREF
5758+ mov $PAX_USER_SHADOW_BASE,%r10
5759+ add %r10,%rbp
5760+#endif
5761+
5762 1: movl (%rbp),%ebp
5763 .section __ex_table,"a"
5764 .quad 1b,ia32_badarg
5765@@ -168,6 +200,8 @@ sysenter_dispatch:
5766 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5767 jnz sysexit_audit
5768 sysexit_from_sys_call:
5769+ pax_exit_kernel_user
5770+ pax_erase_kstack
5771 andl $~TS_COMPAT,TI_status(%r10)
5772 /* clear IF, that popfq doesn't enable interrupts early */
5773 andl $~0x200,EFLAGS-R11(%rsp)
5774@@ -194,6 +228,9 @@ sysexit_from_sys_call:
5775 movl %eax,%esi /* 2nd arg: syscall number */
5776 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5777 call audit_syscall_entry
5778+
5779+ pax_erase_kstack
5780+
5781 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5782 cmpq $(IA32_NR_syscalls-1),%rax
5783 ja ia32_badsys
5784@@ -246,6 +283,9 @@ sysenter_tracesys:
5785 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5786 movq %rsp,%rdi /* &pt_regs -> arg1 */
5787 call syscall_trace_enter
5788+
5789+ pax_erase_kstack
5790+
5791 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5792 RESTORE_REST
5793 cmpq $(IA32_NR_syscalls-1),%rax
5794@@ -277,19 +317,24 @@ ENDPROC(ia32_sysenter_target)
5795 ENTRY(ia32_cstar_target)
5796 CFI_STARTPROC32 simple
5797 CFI_SIGNAL_FRAME
5798- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5799+ CFI_DEF_CFA rsp,0
5800 CFI_REGISTER rip,rcx
5801 /*CFI_REGISTER rflags,r11*/
5802 SWAPGS_UNSAFE_STACK
5803 movl %esp,%r8d
5804 CFI_REGISTER rsp,r8
5805 movq PER_CPU_VAR(kernel_stack),%rsp
5806+
5807+#ifdef CONFIG_PAX_MEMORY_UDEREF
5808+ pax_enter_kernel_user
5809+#endif
5810+
5811 /*
5812 * No need to follow this irqs on/off section: the syscall
5813 * disabled irqs and here we enable it straight after entry:
5814 */
5815 ENABLE_INTERRUPTS(CLBR_NONE)
5816- SAVE_ARGS 8,0,0
5817+ SAVE_ARGS 8*6,0,0
5818 movl %eax,%eax /* zero extension */
5819 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5820 movq %rcx,RIP-ARGOFFSET(%rsp)
5821@@ -305,6 +350,12 @@ ENTRY(ia32_cstar_target)
5822 /* no need to do an access_ok check here because r8 has been
5823 32bit zero extended */
5824 /* hardware stack frame is complete now */
5825+
5826+#ifdef CONFIG_PAX_MEMORY_UDEREF
5827+ mov $PAX_USER_SHADOW_BASE,%r10
5828+ add %r10,%r8
5829+#endif
5830+
5831 1: movl (%r8),%r9d
5832 .section __ex_table,"a"
5833 .quad 1b,ia32_badarg
5834@@ -327,6 +378,8 @@ cstar_dispatch:
5835 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5836 jnz sysretl_audit
5837 sysretl_from_sys_call:
5838+ pax_exit_kernel_user
5839+ pax_erase_kstack
5840 andl $~TS_COMPAT,TI_status(%r10)
5841 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
5842 movl RIP-ARGOFFSET(%rsp),%ecx
5843@@ -364,6 +417,9 @@ cstar_tracesys:
5844 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5845 movq %rsp,%rdi /* &pt_regs -> arg1 */
5846 call syscall_trace_enter
5847+
5848+ pax_erase_kstack
5849+
5850 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5851 RESTORE_REST
5852 xchgl %ebp,%r9d
5853@@ -409,6 +465,7 @@ ENTRY(ia32_syscall)
5854 CFI_REL_OFFSET rip,RIP-RIP
5855 PARAVIRT_ADJUST_EXCEPTION_FRAME
5856 SWAPGS
5857+ pax_enter_kernel_user
5858 /*
5859 * No need to follow this irqs on/off section: the syscall
5860 * disabled irqs and here we enable it straight after entry:
5861@@ -441,6 +498,9 @@ ia32_tracesys:
5862 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5863 movq %rsp,%rdi /* &pt_regs -> arg1 */
5864 call syscall_trace_enter
5865+
5866+ pax_erase_kstack
5867+
5868 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5869 RESTORE_REST
5870 cmpq $(IA32_NR_syscalls-1),%rax
5871@@ -455,6 +515,7 @@ ia32_badsys:
5872
5873 quiet_ni_syscall:
5874 movq $-ENOSYS,%rax
5875+ pax_force_retaddr
5876 ret
5877 CFI_ENDPROC
5878
5879diff -urNp linux-3.1.1/arch/x86/ia32/ia32_signal.c linux-3.1.1/arch/x86/ia32/ia32_signal.c
5880--- linux-3.1.1/arch/x86/ia32/ia32_signal.c 2011-11-11 15:19:27.000000000 -0500
5881+++ linux-3.1.1/arch/x86/ia32/ia32_signal.c 2011-11-16 18:39:07.000000000 -0500
5882@@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const
5883 }
5884 seg = get_fs();
5885 set_fs(KERNEL_DS);
5886- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
5887+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
5888 set_fs(seg);
5889 if (ret >= 0 && uoss_ptr) {
5890 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
5891@@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct
5892 */
5893 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
5894 size_t frame_size,
5895- void **fpstate)
5896+ void __user **fpstate)
5897 {
5898 unsigned long sp;
5899
5900@@ -391,7 +391,7 @@ static void __user *get_sigframe(struct
5901
5902 if (used_math()) {
5903 sp = sp - sig_xstate_ia32_size;
5904- *fpstate = (struct _fpstate_ia32 *) sp;
5905+ *fpstate = (struct _fpstate_ia32 __user *) sp;
5906 if (save_i387_xstate_ia32(*fpstate) < 0)
5907 return (void __user *) -1L;
5908 }
5909@@ -399,7 +399,7 @@ static void __user *get_sigframe(struct
5910 sp -= frame_size;
5911 /* Align the stack pointer according to the i386 ABI,
5912 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5913- sp = ((sp + 4) & -16ul) - 4;
5914+ sp = ((sp - 12) & -16ul) - 4;
5915 return (void __user *) sp;
5916 }
5917
5918@@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_s
5919 * These are actually not used anymore, but left because some
5920 * gdb versions depend on them as a marker.
5921 */
5922- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5923+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
5924 } put_user_catch(err);
5925
5926 if (err)
5927@@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct
5928 0xb8,
5929 __NR_ia32_rt_sigreturn,
5930 0x80cd,
5931- 0,
5932+ 0
5933 };
5934
5935 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5936@@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct
5937
5938 if (ka->sa.sa_flags & SA_RESTORER)
5939 restorer = ka->sa.sa_restorer;
5940+ else if (current->mm->context.vdso)
5941+ /* Return stub is in 32bit vsyscall page */
5942+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5943 else
5944- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5945- rt_sigreturn);
5946+ restorer = &frame->retcode;
5947 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5948
5949 /*
5950 * Not actually used anymore, but left because some gdb
5951 * versions need it.
5952 */
5953- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5954+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
5955 } put_user_catch(err);
5956
5957 if (err)
5958diff -urNp linux-3.1.1/arch/x86/ia32/sys_ia32.c linux-3.1.1/arch/x86/ia32/sys_ia32.c
5959--- linux-3.1.1/arch/x86/ia32/sys_ia32.c 2011-11-11 15:19:27.000000000 -0500
5960+++ linux-3.1.1/arch/x86/ia32/sys_ia32.c 2011-11-16 18:39:07.000000000 -0500
5961@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsign
5962 */
5963 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
5964 {
5965- typeof(ubuf->st_uid) uid = 0;
5966- typeof(ubuf->st_gid) gid = 0;
5967+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
5968+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
5969 SET_UID(uid, stat->uid);
5970 SET_GID(gid, stat->gid);
5971 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
5972@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int
5973 }
5974 set_fs(KERNEL_DS);
5975 ret = sys_rt_sigprocmask(how,
5976- set ? (sigset_t __user *)&s : NULL,
5977- oset ? (sigset_t __user *)&s : NULL,
5978+ set ? (sigset_t __force_user *)&s : NULL,
5979+ oset ? (sigset_t __force_user *)&s : NULL,
5980 sigsetsize);
5981 set_fs(old_fs);
5982 if (ret)
5983@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int
5984 return alarm_setitimer(seconds);
5985 }
5986
5987-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
5988+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
5989 int options)
5990 {
5991 return compat_sys_wait4(pid, stat_addr, options, NULL);
5992@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_inter
5993 mm_segment_t old_fs = get_fs();
5994
5995 set_fs(KERNEL_DS);
5996- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
5997+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
5998 set_fs(old_fs);
5999 if (put_compat_timespec(&t, interval))
6000 return -EFAULT;
6001@@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(comp
6002 mm_segment_t old_fs = get_fs();
6003
6004 set_fs(KERNEL_DS);
6005- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
6006+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
6007 set_fs(old_fs);
6008 if (!ret) {
6009 switch (_NSIG_WORDS) {
6010@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(in
6011 if (copy_siginfo_from_user32(&info, uinfo))
6012 return -EFAULT;
6013 set_fs(KERNEL_DS);
6014- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
6015+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
6016 set_fs(old_fs);
6017 return ret;
6018 }
6019@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_f
6020 return -EFAULT;
6021
6022 set_fs(KERNEL_DS);
6023- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
6024+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
6025 count);
6026 set_fs(old_fs);
6027
6028diff -urNp linux-3.1.1/arch/x86/include/asm/alternative-asm.h linux-3.1.1/arch/x86/include/asm/alternative-asm.h
6029--- linux-3.1.1/arch/x86/include/asm/alternative-asm.h 2011-11-11 15:19:27.000000000 -0500
6030+++ linux-3.1.1/arch/x86/include/asm/alternative-asm.h 2011-11-16 18:39:07.000000000 -0500
6031@@ -15,6 +15,20 @@
6032 .endm
6033 #endif
6034
6035+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
6036+ .macro pax_force_retaddr rip=0
6037+ btsq $63,\rip(%rsp)
6038+ .endm
6039+ .macro pax_force_fptr ptr
6040+ btsq $63,\ptr
6041+ .endm
6042+#else
6043+ .macro pax_force_retaddr rip=0
6044+ .endm
6045+ .macro pax_force_fptr ptr
6046+ .endm
6047+#endif
6048+
6049 .macro altinstruction_entry orig alt feature orig_len alt_len
6050 .long \orig - .
6051 .long \alt - .
6052diff -urNp linux-3.1.1/arch/x86/include/asm/alternative.h linux-3.1.1/arch/x86/include/asm/alternative.h
6053--- linux-3.1.1/arch/x86/include/asm/alternative.h 2011-11-11 15:19:27.000000000 -0500
6054+++ linux-3.1.1/arch/x86/include/asm/alternative.h 2011-11-16 18:39:07.000000000 -0500
6055@@ -89,7 +89,7 @@ static inline int alternatives_text_rese
6056 ".section .discard,\"aw\",@progbits\n" \
6057 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
6058 ".previous\n" \
6059- ".section .altinstr_replacement, \"ax\"\n" \
6060+ ".section .altinstr_replacement, \"a\"\n" \
6061 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6062 ".previous"
6063
6064diff -urNp linux-3.1.1/arch/x86/include/asm/apic.h linux-3.1.1/arch/x86/include/asm/apic.h
6065--- linux-3.1.1/arch/x86/include/asm/apic.h 2011-11-11 15:19:27.000000000 -0500
6066+++ linux-3.1.1/arch/x86/include/asm/apic.h 2011-11-16 18:39:07.000000000 -0500
6067@@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
6068
6069 #ifdef CONFIG_X86_LOCAL_APIC
6070
6071-extern unsigned int apic_verbosity;
6072+extern int apic_verbosity;
6073 extern int local_apic_timer_c2_ok;
6074
6075 extern int disable_apic;
6076diff -urNp linux-3.1.1/arch/x86/include/asm/apm.h linux-3.1.1/arch/x86/include/asm/apm.h
6077--- linux-3.1.1/arch/x86/include/asm/apm.h 2011-11-11 15:19:27.000000000 -0500
6078+++ linux-3.1.1/arch/x86/include/asm/apm.h 2011-11-16 18:39:07.000000000 -0500
6079@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6080 __asm__ __volatile__(APM_DO_ZERO_SEGS
6081 "pushl %%edi\n\t"
6082 "pushl %%ebp\n\t"
6083- "lcall *%%cs:apm_bios_entry\n\t"
6084+ "lcall *%%ss:apm_bios_entry\n\t"
6085 "setc %%al\n\t"
6086 "popl %%ebp\n\t"
6087 "popl %%edi\n\t"
6088@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6089 __asm__ __volatile__(APM_DO_ZERO_SEGS
6090 "pushl %%edi\n\t"
6091 "pushl %%ebp\n\t"
6092- "lcall *%%cs:apm_bios_entry\n\t"
6093+ "lcall *%%ss:apm_bios_entry\n\t"
6094 "setc %%bl\n\t"
6095 "popl %%ebp\n\t"
6096 "popl %%edi\n\t"
6097diff -urNp linux-3.1.1/arch/x86/include/asm/atomic64_32.h linux-3.1.1/arch/x86/include/asm/atomic64_32.h
6098--- linux-3.1.1/arch/x86/include/asm/atomic64_32.h 2011-11-11 15:19:27.000000000 -0500
6099+++ linux-3.1.1/arch/x86/include/asm/atomic64_32.h 2011-11-16 18:39:07.000000000 -0500
6100@@ -12,6 +12,14 @@ typedef struct {
6101 u64 __aligned(8) counter;
6102 } atomic64_t;
6103
6104+#ifdef CONFIG_PAX_REFCOUNT
6105+typedef struct {
6106+ u64 __aligned(8) counter;
6107+} atomic64_unchecked_t;
6108+#else
6109+typedef atomic64_t atomic64_unchecked_t;
6110+#endif
6111+
6112 #define ATOMIC64_INIT(val) { (val) }
6113
6114 #ifdef CONFIG_X86_CMPXCHG64
6115@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
6116 }
6117
6118 /**
6119+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
6120+ * @p: pointer to type atomic64_unchecked_t
6121+ * @o: expected value
6122+ * @n: new value
6123+ *
6124+ * Atomically sets @v to @n if it was equal to @o and returns
6125+ * the old value.
6126+ */
6127+
6128+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
6129+{
6130+ return cmpxchg64(&v->counter, o, n);
6131+}
6132+
6133+/**
6134 * atomic64_xchg - xchg atomic64 variable
6135 * @v: pointer to type atomic64_t
6136 * @n: value to assign
6137@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
6138 }
6139
6140 /**
6141+ * atomic64_set_unchecked - set atomic64 variable
6142+ * @v: pointer to type atomic64_unchecked_t
6143+ * @n: value to assign
6144+ *
6145+ * Atomically sets the value of @v to @n.
6146+ */
6147+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
6148+{
6149+ unsigned high = (unsigned)(i >> 32);
6150+ unsigned low = (unsigned)i;
6151+ asm volatile(ATOMIC64_ALTERNATIVE(set)
6152+ : "+b" (low), "+c" (high)
6153+ : "S" (v)
6154+ : "eax", "edx", "memory"
6155+ );
6156+}
6157+
6158+/**
6159 * atomic64_read - read atomic64 variable
6160 * @v: pointer to type atomic64_t
6161 *
6162@@ -93,6 +134,22 @@ static inline long long atomic64_read(at
6163 }
6164
6165 /**
6166+ * atomic64_read_unchecked - read atomic64 variable
6167+ * @v: pointer to type atomic64_unchecked_t
6168+ *
6169+ * Atomically reads the value of @v and returns it.
6170+ */
6171+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
6172+{
6173+ long long r;
6174+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
6175+ : "=A" (r), "+c" (v)
6176+ : : "memory"
6177+ );
6178+ return r;
6179+ }
6180+
6181+/**
6182 * atomic64_add_return - add and return
6183 * @i: integer value to add
6184 * @v: pointer to type atomic64_t
6185@@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
6186 return i;
6187 }
6188
6189+/**
6190+ * atomic64_add_return_unchecked - add and return
6191+ * @i: integer value to add
6192+ * @v: pointer to type atomic64_unchecked_t
6193+ *
6194+ * Atomically adds @i to @v and returns @i + *@v
6195+ */
6196+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
6197+{
6198+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
6199+ : "+A" (i), "+c" (v)
6200+ : : "memory"
6201+ );
6202+ return i;
6203+}
6204+
6205 /*
6206 * Other variants with different arithmetic operators:
6207 */
6208@@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
6209 return a;
6210 }
6211
6212+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6213+{
6214+ long long a;
6215+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
6216+ : "=A" (a)
6217+ : "S" (v)
6218+ : "memory", "ecx"
6219+ );
6220+ return a;
6221+}
6222+
6223 static inline long long atomic64_dec_return(atomic64_t *v)
6224 {
6225 long long a;
6226@@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6227 }
6228
6229 /**
6230+ * atomic64_add_unchecked - add integer to atomic64 variable
6231+ * @i: integer value to add
6232+ * @v: pointer to type atomic64_unchecked_t
6233+ *
6234+ * Atomically adds @i to @v.
6235+ */
6236+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6237+{
6238+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6239+ : "+A" (i), "+c" (v)
6240+ : : "memory"
6241+ );
6242+ return i;
6243+}
6244+
6245+/**
6246 * atomic64_sub - subtract the atomic64 variable
6247 * @i: integer value to subtract
6248 * @v: pointer to type atomic64_t
6249diff -urNp linux-3.1.1/arch/x86/include/asm/atomic64_64.h linux-3.1.1/arch/x86/include/asm/atomic64_64.h
6250--- linux-3.1.1/arch/x86/include/asm/atomic64_64.h 2011-11-11 15:19:27.000000000 -0500
6251+++ linux-3.1.1/arch/x86/include/asm/atomic64_64.h 2011-11-16 18:39:07.000000000 -0500
6252@@ -18,7 +18,19 @@
6253 */
6254 static inline long atomic64_read(const atomic64_t *v)
6255 {
6256- return (*(volatile long *)&(v)->counter);
6257+ return (*(volatile const long *)&(v)->counter);
6258+}
6259+
6260+/**
6261+ * atomic64_read_unchecked - read atomic64 variable
6262+ * @v: pointer of type atomic64_unchecked_t
6263+ *
6264+ * Atomically reads the value of @v.
6265+ * Doesn't imply a read memory barrier.
6266+ */
6267+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6268+{
6269+ return (*(volatile const long *)&(v)->counter);
6270 }
6271
6272 /**
6273@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6274 }
6275
6276 /**
6277+ * atomic64_set_unchecked - set atomic64 variable
6278+ * @v: pointer to type atomic64_unchecked_t
6279+ * @i: required value
6280+ *
6281+ * Atomically sets the value of @v to @i.
6282+ */
6283+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6284+{
6285+ v->counter = i;
6286+}
6287+
6288+/**
6289 * atomic64_add - add integer to atomic64 variable
6290 * @i: integer value to add
6291 * @v: pointer to type atomic64_t
6292@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6293 */
6294 static inline void atomic64_add(long i, atomic64_t *v)
6295 {
6296+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
6297+
6298+#ifdef CONFIG_PAX_REFCOUNT
6299+ "jno 0f\n"
6300+ LOCK_PREFIX "subq %1,%0\n"
6301+ "int $4\n0:\n"
6302+ _ASM_EXTABLE(0b, 0b)
6303+#endif
6304+
6305+ : "=m" (v->counter)
6306+ : "er" (i), "m" (v->counter));
6307+}
6308+
6309+/**
6310+ * atomic64_add_unchecked - add integer to atomic64 variable
6311+ * @i: integer value to add
6312+ * @v: pointer to type atomic64_unchecked_t
6313+ *
6314+ * Atomically adds @i to @v.
6315+ */
6316+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6317+{
6318 asm volatile(LOCK_PREFIX "addq %1,%0"
6319 : "=m" (v->counter)
6320 : "er" (i), "m" (v->counter));
6321@@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6322 */
6323 static inline void atomic64_sub(long i, atomic64_t *v)
6324 {
6325- asm volatile(LOCK_PREFIX "subq %1,%0"
6326+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6327+
6328+#ifdef CONFIG_PAX_REFCOUNT
6329+ "jno 0f\n"
6330+ LOCK_PREFIX "addq %1,%0\n"
6331+ "int $4\n0:\n"
6332+ _ASM_EXTABLE(0b, 0b)
6333+#endif
6334+
6335+ : "=m" (v->counter)
6336+ : "er" (i), "m" (v->counter));
6337+}
6338+
6339+/**
6340+ * atomic64_sub_unchecked - subtract the atomic64 variable
6341+ * @i: integer value to subtract
6342+ * @v: pointer to type atomic64_unchecked_t
6343+ *
6344+ * Atomically subtracts @i from @v.
6345+ */
6346+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6347+{
6348+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6349 : "=m" (v->counter)
6350 : "er" (i), "m" (v->counter));
6351 }
6352@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6353 {
6354 unsigned char c;
6355
6356- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6357+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
6358+
6359+#ifdef CONFIG_PAX_REFCOUNT
6360+ "jno 0f\n"
6361+ LOCK_PREFIX "addq %2,%0\n"
6362+ "int $4\n0:\n"
6363+ _ASM_EXTABLE(0b, 0b)
6364+#endif
6365+
6366+ "sete %1\n"
6367 : "=m" (v->counter), "=qm" (c)
6368 : "er" (i), "m" (v->counter) : "memory");
6369 return c;
6370@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6371 */
6372 static inline void atomic64_inc(atomic64_t *v)
6373 {
6374+ asm volatile(LOCK_PREFIX "incq %0\n"
6375+
6376+#ifdef CONFIG_PAX_REFCOUNT
6377+ "jno 0f\n"
6378+ LOCK_PREFIX "decq %0\n"
6379+ "int $4\n0:\n"
6380+ _ASM_EXTABLE(0b, 0b)
6381+#endif
6382+
6383+ : "=m" (v->counter)
6384+ : "m" (v->counter));
6385+}
6386+
6387+/**
6388+ * atomic64_inc_unchecked - increment atomic64 variable
6389+ * @v: pointer to type atomic64_unchecked_t
6390+ *
6391+ * Atomically increments @v by 1.
6392+ */
6393+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6394+{
6395 asm volatile(LOCK_PREFIX "incq %0"
6396 : "=m" (v->counter)
6397 : "m" (v->counter));
6398@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6399 */
6400 static inline void atomic64_dec(atomic64_t *v)
6401 {
6402- asm volatile(LOCK_PREFIX "decq %0"
6403+ asm volatile(LOCK_PREFIX "decq %0\n"
6404+
6405+#ifdef CONFIG_PAX_REFCOUNT
6406+ "jno 0f\n"
6407+ LOCK_PREFIX "incq %0\n"
6408+ "int $4\n0:\n"
6409+ _ASM_EXTABLE(0b, 0b)
6410+#endif
6411+
6412+ : "=m" (v->counter)
6413+ : "m" (v->counter));
6414+}
6415+
6416+/**
6417+ * atomic64_dec_unchecked - decrement atomic64 variable
6418+ * @v: pointer to type atomic64_t
6419+ *
6420+ * Atomically decrements @v by 1.
6421+ */
6422+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6423+{
6424+ asm volatile(LOCK_PREFIX "decq %0\n"
6425 : "=m" (v->counter)
6426 : "m" (v->counter));
6427 }
6428@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6429 {
6430 unsigned char c;
6431
6432- asm volatile(LOCK_PREFIX "decq %0; sete %1"
6433+ asm volatile(LOCK_PREFIX "decq %0\n"
6434+
6435+#ifdef CONFIG_PAX_REFCOUNT
6436+ "jno 0f\n"
6437+ LOCK_PREFIX "incq %0\n"
6438+ "int $4\n0:\n"
6439+ _ASM_EXTABLE(0b, 0b)
6440+#endif
6441+
6442+ "sete %1\n"
6443 : "=m" (v->counter), "=qm" (c)
6444 : "m" (v->counter) : "memory");
6445 return c != 0;
6446@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6447 {
6448 unsigned char c;
6449
6450- asm volatile(LOCK_PREFIX "incq %0; sete %1"
6451+ asm volatile(LOCK_PREFIX "incq %0\n"
6452+
6453+#ifdef CONFIG_PAX_REFCOUNT
6454+ "jno 0f\n"
6455+ LOCK_PREFIX "decq %0\n"
6456+ "int $4\n0:\n"
6457+ _ASM_EXTABLE(0b, 0b)
6458+#endif
6459+
6460+ "sete %1\n"
6461 : "=m" (v->counter), "=qm" (c)
6462 : "m" (v->counter) : "memory");
6463 return c != 0;
6464@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6465 {
6466 unsigned char c;
6467
6468- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6469+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
6470+
6471+#ifdef CONFIG_PAX_REFCOUNT
6472+ "jno 0f\n"
6473+ LOCK_PREFIX "subq %2,%0\n"
6474+ "int $4\n0:\n"
6475+ _ASM_EXTABLE(0b, 0b)
6476+#endif
6477+
6478+ "sets %1\n"
6479 : "=m" (v->counter), "=qm" (c)
6480 : "er" (i), "m" (v->counter) : "memory");
6481 return c;
6482@@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6483 static inline long atomic64_add_return(long i, atomic64_t *v)
6484 {
6485 long __i = i;
6486- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6487+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6488+
6489+#ifdef CONFIG_PAX_REFCOUNT
6490+ "jno 0f\n"
6491+ "movq %0, %1\n"
6492+ "int $4\n0:\n"
6493+ _ASM_EXTABLE(0b, 0b)
6494+#endif
6495+
6496+ : "+r" (i), "+m" (v->counter)
6497+ : : "memory");
6498+ return i + __i;
6499+}
6500+
6501+/**
6502+ * atomic64_add_return_unchecked - add and return
6503+ * @i: integer value to add
6504+ * @v: pointer to type atomic64_unchecked_t
6505+ *
6506+ * Atomically adds @i to @v and returns @i + @v
6507+ */
6508+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6509+{
6510+ long __i = i;
6511+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
6512 : "+r" (i), "+m" (v->counter)
6513 : : "memory");
6514 return i + __i;
6515@@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6516 }
6517
6518 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6519+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6520+{
6521+ return atomic64_add_return_unchecked(1, v);
6522+}
6523 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6524
6525 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6526@@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6527 return cmpxchg(&v->counter, old, new);
6528 }
6529
6530+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6531+{
6532+ return cmpxchg(&v->counter, old, new);
6533+}
6534+
6535 static inline long atomic64_xchg(atomic64_t *v, long new)
6536 {
6537 return xchg(&v->counter, new);
6538@@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6539 */
6540 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6541 {
6542- long c, old;
6543+ long c, old, new;
6544 c = atomic64_read(v);
6545 for (;;) {
6546- if (unlikely(c == (u)))
6547+ if (unlikely(c == u))
6548 break;
6549- old = atomic64_cmpxchg((v), c, c + (a));
6550+
6551+ asm volatile("add %2,%0\n"
6552+
6553+#ifdef CONFIG_PAX_REFCOUNT
6554+ "jno 0f\n"
6555+ "sub %2,%0\n"
6556+ "int $4\n0:\n"
6557+ _ASM_EXTABLE(0b, 0b)
6558+#endif
6559+
6560+ : "=r" (new)
6561+ : "0" (c), "ir" (a));
6562+
6563+ old = atomic64_cmpxchg(v, c, new);
6564 if (likely(old == c))
6565 break;
6566 c = old;
6567 }
6568- return c != (u);
6569+ return c != u;
6570 }
6571
6572 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6573diff -urNp linux-3.1.1/arch/x86/include/asm/atomic.h linux-3.1.1/arch/x86/include/asm/atomic.h
6574--- linux-3.1.1/arch/x86/include/asm/atomic.h 2011-11-11 15:19:27.000000000 -0500
6575+++ linux-3.1.1/arch/x86/include/asm/atomic.h 2011-11-16 18:39:07.000000000 -0500
6576@@ -22,7 +22,18 @@
6577 */
6578 static inline int atomic_read(const atomic_t *v)
6579 {
6580- return (*(volatile int *)&(v)->counter);
6581+ return (*(volatile const int *)&(v)->counter);
6582+}
6583+
6584+/**
6585+ * atomic_read_unchecked - read atomic variable
6586+ * @v: pointer of type atomic_unchecked_t
6587+ *
6588+ * Atomically reads the value of @v.
6589+ */
6590+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6591+{
6592+ return (*(volatile const int *)&(v)->counter);
6593 }
6594
6595 /**
6596@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6597 }
6598
6599 /**
6600+ * atomic_set_unchecked - set atomic variable
6601+ * @v: pointer of type atomic_unchecked_t
6602+ * @i: required value
6603+ *
6604+ * Atomically sets the value of @v to @i.
6605+ */
6606+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6607+{
6608+ v->counter = i;
6609+}
6610+
6611+/**
6612 * atomic_add - add integer to atomic variable
6613 * @i: integer value to add
6614 * @v: pointer of type atomic_t
6615@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6616 */
6617 static inline void atomic_add(int i, atomic_t *v)
6618 {
6619- asm volatile(LOCK_PREFIX "addl %1,%0"
6620+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6621+
6622+#ifdef CONFIG_PAX_REFCOUNT
6623+ "jno 0f\n"
6624+ LOCK_PREFIX "subl %1,%0\n"
6625+ "int $4\n0:\n"
6626+ _ASM_EXTABLE(0b, 0b)
6627+#endif
6628+
6629+ : "+m" (v->counter)
6630+ : "ir" (i));
6631+}
6632+
6633+/**
6634+ * atomic_add_unchecked - add integer to atomic variable
6635+ * @i: integer value to add
6636+ * @v: pointer of type atomic_unchecked_t
6637+ *
6638+ * Atomically adds @i to @v.
6639+ */
6640+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6641+{
6642+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6643 : "+m" (v->counter)
6644 : "ir" (i));
6645 }
6646@@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6647 */
6648 static inline void atomic_sub(int i, atomic_t *v)
6649 {
6650- asm volatile(LOCK_PREFIX "subl %1,%0"
6651+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6652+
6653+#ifdef CONFIG_PAX_REFCOUNT
6654+ "jno 0f\n"
6655+ LOCK_PREFIX "addl %1,%0\n"
6656+ "int $4\n0:\n"
6657+ _ASM_EXTABLE(0b, 0b)
6658+#endif
6659+
6660+ : "+m" (v->counter)
6661+ : "ir" (i));
6662+}
6663+
6664+/**
6665+ * atomic_sub_unchecked - subtract integer from atomic variable
6666+ * @i: integer value to subtract
6667+ * @v: pointer of type atomic_unchecked_t
6668+ *
6669+ * Atomically subtracts @i from @v.
6670+ */
6671+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6672+{
6673+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6674 : "+m" (v->counter)
6675 : "ir" (i));
6676 }
6677@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6678 {
6679 unsigned char c;
6680
6681- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6682+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
6683+
6684+#ifdef CONFIG_PAX_REFCOUNT
6685+ "jno 0f\n"
6686+ LOCK_PREFIX "addl %2,%0\n"
6687+ "int $4\n0:\n"
6688+ _ASM_EXTABLE(0b, 0b)
6689+#endif
6690+
6691+ "sete %1\n"
6692 : "+m" (v->counter), "=qm" (c)
6693 : "ir" (i) : "memory");
6694 return c;
6695@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6696 */
6697 static inline void atomic_inc(atomic_t *v)
6698 {
6699- asm volatile(LOCK_PREFIX "incl %0"
6700+ asm volatile(LOCK_PREFIX "incl %0\n"
6701+
6702+#ifdef CONFIG_PAX_REFCOUNT
6703+ "jno 0f\n"
6704+ LOCK_PREFIX "decl %0\n"
6705+ "int $4\n0:\n"
6706+ _ASM_EXTABLE(0b, 0b)
6707+#endif
6708+
6709+ : "+m" (v->counter));
6710+}
6711+
6712+/**
6713+ * atomic_inc_unchecked - increment atomic variable
6714+ * @v: pointer of type atomic_unchecked_t
6715+ *
6716+ * Atomically increments @v by 1.
6717+ */
6718+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6719+{
6720+ asm volatile(LOCK_PREFIX "incl %0\n"
6721 : "+m" (v->counter));
6722 }
6723
6724@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6725 */
6726 static inline void atomic_dec(atomic_t *v)
6727 {
6728- asm volatile(LOCK_PREFIX "decl %0"
6729+ asm volatile(LOCK_PREFIX "decl %0\n"
6730+
6731+#ifdef CONFIG_PAX_REFCOUNT
6732+ "jno 0f\n"
6733+ LOCK_PREFIX "incl %0\n"
6734+ "int $4\n0:\n"
6735+ _ASM_EXTABLE(0b, 0b)
6736+#endif
6737+
6738+ : "+m" (v->counter));
6739+}
6740+
6741+/**
6742+ * atomic_dec_unchecked - decrement atomic variable
6743+ * @v: pointer of type atomic_unchecked_t
6744+ *
6745+ * Atomically decrements @v by 1.
6746+ */
6747+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6748+{
6749+ asm volatile(LOCK_PREFIX "decl %0\n"
6750 : "+m" (v->counter));
6751 }
6752
6753@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6754 {
6755 unsigned char c;
6756
6757- asm volatile(LOCK_PREFIX "decl %0; sete %1"
6758+ asm volatile(LOCK_PREFIX "decl %0\n"
6759+
6760+#ifdef CONFIG_PAX_REFCOUNT
6761+ "jno 0f\n"
6762+ LOCK_PREFIX "incl %0\n"
6763+ "int $4\n0:\n"
6764+ _ASM_EXTABLE(0b, 0b)
6765+#endif
6766+
6767+ "sete %1\n"
6768 : "+m" (v->counter), "=qm" (c)
6769 : : "memory");
6770 return c != 0;
6771@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6772 {
6773 unsigned char c;
6774
6775- asm volatile(LOCK_PREFIX "incl %0; sete %1"
6776+ asm volatile(LOCK_PREFIX "incl %0\n"
6777+
6778+#ifdef CONFIG_PAX_REFCOUNT
6779+ "jno 0f\n"
6780+ LOCK_PREFIX "decl %0\n"
6781+ "int $4\n0:\n"
6782+ _ASM_EXTABLE(0b, 0b)
6783+#endif
6784+
6785+ "sete %1\n"
6786+ : "+m" (v->counter), "=qm" (c)
6787+ : : "memory");
6788+ return c != 0;
6789+}
6790+
6791+/**
6792+ * atomic_inc_and_test_unchecked - increment and test
6793+ * @v: pointer of type atomic_unchecked_t
6794+ *
6795+ * Atomically increments @v by 1
6796+ * and returns true if the result is zero, or false for all
6797+ * other cases.
6798+ */
6799+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6800+{
6801+ unsigned char c;
6802+
6803+ asm volatile(LOCK_PREFIX "incl %0\n"
6804+ "sete %1\n"
6805 : "+m" (v->counter), "=qm" (c)
6806 : : "memory");
6807 return c != 0;
6808@@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6809 {
6810 unsigned char c;
6811
6812- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6813+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
6814+
6815+#ifdef CONFIG_PAX_REFCOUNT
6816+ "jno 0f\n"
6817+ LOCK_PREFIX "subl %2,%0\n"
6818+ "int $4\n0:\n"
6819+ _ASM_EXTABLE(0b, 0b)
6820+#endif
6821+
6822+ "sets %1\n"
6823 : "+m" (v->counter), "=qm" (c)
6824 : "ir" (i) : "memory");
6825 return c;
6826@@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6827 #endif
6828 /* Modern 486+ processor */
6829 __i = i;
6830+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6831+
6832+#ifdef CONFIG_PAX_REFCOUNT
6833+ "jno 0f\n"
6834+ "movl %0, %1\n"
6835+ "int $4\n0:\n"
6836+ _ASM_EXTABLE(0b, 0b)
6837+#endif
6838+
6839+ : "+r" (i), "+m" (v->counter)
6840+ : : "memory");
6841+ return i + __i;
6842+
6843+#ifdef CONFIG_M386
6844+no_xadd: /* Legacy 386 processor */
6845+ local_irq_save(flags);
6846+ __i = atomic_read(v);
6847+ atomic_set(v, i + __i);
6848+ local_irq_restore(flags);
6849+ return i + __i;
6850+#endif
6851+}
6852+
6853+/**
6854+ * atomic_add_return_unchecked - add integer and return
6855+ * @v: pointer of type atomic_unchecked_t
6856+ * @i: integer value to add
6857+ *
6858+ * Atomically adds @i to @v and returns @i + @v
6859+ */
6860+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6861+{
6862+ int __i;
6863+#ifdef CONFIG_M386
6864+ unsigned long flags;
6865+ if (unlikely(boot_cpu_data.x86 <= 3))
6866+ goto no_xadd;
6867+#endif
6868+ /* Modern 486+ processor */
6869+ __i = i;
6870 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6871 : "+r" (i), "+m" (v->counter)
6872 : : "memory");
6873@@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6874 }
6875
6876 #define atomic_inc_return(v) (atomic_add_return(1, v))
6877+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6878+{
6879+ return atomic_add_return_unchecked(1, v);
6880+}
6881 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6882
6883 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6884@@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6885 return cmpxchg(&v->counter, old, new);
6886 }
6887
6888+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6889+{
6890+ return cmpxchg(&v->counter, old, new);
6891+}
6892+
6893 static inline int atomic_xchg(atomic_t *v, int new)
6894 {
6895 return xchg(&v->counter, new);
6896 }
6897
6898+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6899+{
6900+ return xchg(&v->counter, new);
6901+}
6902+
6903 /**
6904 * __atomic_add_unless - add unless the number is already a given value
6905 * @v: pointer of type atomic_t
6906@@ -231,12 +447,25 @@ static inline int atomic_xchg(atomic_t *
6907 */
6908 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
6909 {
6910- int c, old;
6911+ int c, old, new;
6912 c = atomic_read(v);
6913 for (;;) {
6914- if (unlikely(c == (u)))
6915+ if (unlikely(c == u))
6916 break;
6917- old = atomic_cmpxchg((v), c, c + (a));
6918+
6919+ asm volatile("addl %2,%0\n"
6920+
6921+#ifdef CONFIG_PAX_REFCOUNT
6922+ "jno 0f\n"
6923+ "subl %2,%0\n"
6924+ "int $4\n0:\n"
6925+ _ASM_EXTABLE(0b, 0b)
6926+#endif
6927+
6928+ : "=r" (new)
6929+ : "0" (c), "ir" (a));
6930+
6931+ old = atomic_cmpxchg(v, c, new);
6932 if (likely(old == c))
6933 break;
6934 c = old;
6935@@ -244,6 +473,48 @@ static inline int __atomic_add_unless(at
6936 return c;
6937 }
6938
6939+/**
6940+ * atomic_inc_not_zero_hint - increment if not null
6941+ * @v: pointer of type atomic_t
6942+ * @hint: probable value of the atomic before the increment
6943+ *
6944+ * This version of atomic_inc_not_zero() gives a hint of probable
6945+ * value of the atomic. This helps processor to not read the memory
6946+ * before doing the atomic read/modify/write cycle, lowering
6947+ * number of bus transactions on some arches.
6948+ *
6949+ * Returns: 0 if increment was not done, 1 otherwise.
6950+ */
6951+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6952+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6953+{
6954+ int val, c = hint, new;
6955+
6956+ /* sanity test, should be removed by compiler if hint is a constant */
6957+ if (!hint)
6958+ return __atomic_add_unless(v, 1, 0);
6959+
6960+ do {
6961+ asm volatile("incl %0\n"
6962+
6963+#ifdef CONFIG_PAX_REFCOUNT
6964+ "jno 0f\n"
6965+ "decl %0\n"
6966+ "int $4\n0:\n"
6967+ _ASM_EXTABLE(0b, 0b)
6968+#endif
6969+
6970+ : "=r" (new)
6971+ : "0" (c));
6972+
6973+ val = atomic_cmpxchg(v, c, new);
6974+ if (val == c)
6975+ return 1;
6976+ c = val;
6977+ } while (c);
6978+
6979+ return 0;
6980+}
6981
6982 /*
6983 * atomic_dec_if_positive - decrement by 1 if old value positive
6984diff -urNp linux-3.1.1/arch/x86/include/asm/bitops.h linux-3.1.1/arch/x86/include/asm/bitops.h
6985--- linux-3.1.1/arch/x86/include/asm/bitops.h 2011-11-11 15:19:27.000000000 -0500
6986+++ linux-3.1.1/arch/x86/include/asm/bitops.h 2011-11-16 18:39:07.000000000 -0500
6987@@ -38,7 +38,7 @@
6988 * a mask operation on a byte.
6989 */
6990 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6991-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6992+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6993 #define CONST_MASK(nr) (1 << ((nr) & 7))
6994
6995 /**
6996diff -urNp linux-3.1.1/arch/x86/include/asm/boot.h linux-3.1.1/arch/x86/include/asm/boot.h
6997--- linux-3.1.1/arch/x86/include/asm/boot.h 2011-11-11 15:19:27.000000000 -0500
6998+++ linux-3.1.1/arch/x86/include/asm/boot.h 2011-11-16 18:39:07.000000000 -0500
6999@@ -11,10 +11,15 @@
7000 #include <asm/pgtable_types.h>
7001
7002 /* Physical address where kernel should be loaded. */
7003-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7004+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7005 + (CONFIG_PHYSICAL_ALIGN - 1)) \
7006 & ~(CONFIG_PHYSICAL_ALIGN - 1))
7007
7008+#ifndef __ASSEMBLY__
7009+extern unsigned char __LOAD_PHYSICAL_ADDR[];
7010+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
7011+#endif
7012+
7013 /* Minimum kernel alignment, as a power of two */
7014 #ifdef CONFIG_X86_64
7015 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
7016diff -urNp linux-3.1.1/arch/x86/include/asm/cacheflush.h linux-3.1.1/arch/x86/include/asm/cacheflush.h
7017--- linux-3.1.1/arch/x86/include/asm/cacheflush.h 2011-11-11 15:19:27.000000000 -0500
7018+++ linux-3.1.1/arch/x86/include/asm/cacheflush.h 2011-11-16 18:39:07.000000000 -0500
7019@@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
7020 unsigned long pg_flags = pg->flags & _PGMT_MASK;
7021
7022 if (pg_flags == _PGMT_DEFAULT)
7023- return -1;
7024+ return ~0UL;
7025 else if (pg_flags == _PGMT_WC)
7026 return _PAGE_CACHE_WC;
7027 else if (pg_flags == _PGMT_UC_MINUS)
7028diff -urNp linux-3.1.1/arch/x86/include/asm/cache.h linux-3.1.1/arch/x86/include/asm/cache.h
7029--- linux-3.1.1/arch/x86/include/asm/cache.h 2011-11-11 15:19:27.000000000 -0500
7030+++ linux-3.1.1/arch/x86/include/asm/cache.h 2011-11-16 18:39:07.000000000 -0500
7031@@ -5,12 +5,13 @@
7032
7033 /* L1 cache line size */
7034 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7035-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7036+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7037
7038 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7039+#define __read_only __attribute__((__section__(".data..read_only")))
7040
7041 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
7042-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
7043+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
7044
7045 #ifdef CONFIG_X86_VSMP
7046 #ifdef CONFIG_SMP
7047diff -urNp linux-3.1.1/arch/x86/include/asm/checksum_32.h linux-3.1.1/arch/x86/include/asm/checksum_32.h
7048--- linux-3.1.1/arch/x86/include/asm/checksum_32.h 2011-11-11 15:19:27.000000000 -0500
7049+++ linux-3.1.1/arch/x86/include/asm/checksum_32.h 2011-11-16 18:39:07.000000000 -0500
7050@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
7051 int len, __wsum sum,
7052 int *src_err_ptr, int *dst_err_ptr);
7053
7054+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
7055+ int len, __wsum sum,
7056+ int *src_err_ptr, int *dst_err_ptr);
7057+
7058+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
7059+ int len, __wsum sum,
7060+ int *src_err_ptr, int *dst_err_ptr);
7061+
7062 /*
7063 * Note: when you get a NULL pointer exception here this means someone
7064 * passed in an incorrect kernel address to one of these functions.
7065@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
7066 int *err_ptr)
7067 {
7068 might_sleep();
7069- return csum_partial_copy_generic((__force void *)src, dst,
7070+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
7071 len, sum, err_ptr, NULL);
7072 }
7073
7074@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
7075 {
7076 might_sleep();
7077 if (access_ok(VERIFY_WRITE, dst, len))
7078- return csum_partial_copy_generic(src, (__force void *)dst,
7079+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
7080 len, sum, NULL, err_ptr);
7081
7082 if (len)
7083diff -urNp linux-3.1.1/arch/x86/include/asm/cpufeature.h linux-3.1.1/arch/x86/include/asm/cpufeature.h
7084--- linux-3.1.1/arch/x86/include/asm/cpufeature.h 2011-11-11 15:19:27.000000000 -0500
7085+++ linux-3.1.1/arch/x86/include/asm/cpufeature.h 2011-11-16 18:39:07.000000000 -0500
7086@@ -358,7 +358,7 @@ static __always_inline __pure bool __sta
7087 ".section .discard,\"aw\",@progbits\n"
7088 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
7089 ".previous\n"
7090- ".section .altinstr_replacement,\"ax\"\n"
7091+ ".section .altinstr_replacement,\"a\"\n"
7092 "3: movb $1,%0\n"
7093 "4:\n"
7094 ".previous\n"
7095diff -urNp linux-3.1.1/arch/x86/include/asm/desc_defs.h linux-3.1.1/arch/x86/include/asm/desc_defs.h
7096--- linux-3.1.1/arch/x86/include/asm/desc_defs.h 2011-11-11 15:19:27.000000000 -0500
7097+++ linux-3.1.1/arch/x86/include/asm/desc_defs.h 2011-11-16 18:39:07.000000000 -0500
7098@@ -31,6 +31,12 @@ struct desc_struct {
7099 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
7100 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
7101 };
7102+ struct {
7103+ u16 offset_low;
7104+ u16 seg;
7105+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
7106+ unsigned offset_high: 16;
7107+ } gate;
7108 };
7109 } __attribute__((packed));
7110
7111diff -urNp linux-3.1.1/arch/x86/include/asm/desc.h linux-3.1.1/arch/x86/include/asm/desc.h
7112--- linux-3.1.1/arch/x86/include/asm/desc.h 2011-11-11 15:19:27.000000000 -0500
7113+++ linux-3.1.1/arch/x86/include/asm/desc.h 2011-11-16 18:39:07.000000000 -0500
7114@@ -4,6 +4,7 @@
7115 #include <asm/desc_defs.h>
7116 #include <asm/ldt.h>
7117 #include <asm/mmu.h>
7118+#include <asm/pgtable.h>
7119
7120 #include <linux/smp.h>
7121
7122@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_
7123
7124 desc->type = (info->read_exec_only ^ 1) << 1;
7125 desc->type |= info->contents << 2;
7126+ desc->type |= info->seg_not_present ^ 1;
7127
7128 desc->s = 1;
7129 desc->dpl = 0x3;
7130@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_
7131 }
7132
7133 extern struct desc_ptr idt_descr;
7134-extern gate_desc idt_table[];
7135-
7136-struct gdt_page {
7137- struct desc_struct gdt[GDT_ENTRIES];
7138-} __attribute__((aligned(PAGE_SIZE)));
7139-
7140-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
7141+extern gate_desc idt_table[256];
7142
7143+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
7144 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
7145 {
7146- return per_cpu(gdt_page, cpu).gdt;
7147+ return cpu_gdt_table[cpu];
7148 }
7149
7150 #ifdef CONFIG_X86_64
7151@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *
7152 unsigned long base, unsigned dpl, unsigned flags,
7153 unsigned short seg)
7154 {
7155- gate->a = (seg << 16) | (base & 0xffff);
7156- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
7157+ gate->gate.offset_low = base;
7158+ gate->gate.seg = seg;
7159+ gate->gate.reserved = 0;
7160+ gate->gate.type = type;
7161+ gate->gate.s = 0;
7162+ gate->gate.dpl = dpl;
7163+ gate->gate.p = 1;
7164+ gate->gate.offset_high = base >> 16;
7165 }
7166
7167 #endif
7168@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(str
7169
7170 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
7171 {
7172+ pax_open_kernel();
7173 memcpy(&idt[entry], gate, sizeof(*gate));
7174+ pax_close_kernel();
7175 }
7176
7177 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
7178 {
7179+ pax_open_kernel();
7180 memcpy(&ldt[entry], desc, 8);
7181+ pax_close_kernel();
7182 }
7183
7184 static inline void
7185@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struc
7186 default: size = sizeof(*gdt); break;
7187 }
7188
7189+ pax_open_kernel();
7190 memcpy(&gdt[entry], desc, size);
7191+ pax_close_kernel();
7192 }
7193
7194 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7195@@ -207,7 +216,9 @@ static inline void native_set_ldt(const
7196
7197 static inline void native_load_tr_desc(void)
7198 {
7199+ pax_open_kernel();
7200 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
7201+ pax_close_kernel();
7202 }
7203
7204 static inline void native_load_gdt(const struct desc_ptr *dtr)
7205@@ -244,8 +255,10 @@ static inline void native_load_tls(struc
7206 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
7207 unsigned int i;
7208
7209+ pax_open_kernel();
7210 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
7211 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7212+ pax_close_kernel();
7213 }
7214
7215 #define _LDT_empty(info) \
7216@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct
7217 desc->limit = (limit >> 16) & 0xf;
7218 }
7219
7220-static inline void _set_gate(int gate, unsigned type, void *addr,
7221+static inline void _set_gate(int gate, unsigned type, const void *addr,
7222 unsigned dpl, unsigned ist, unsigned seg)
7223 {
7224 gate_desc s;
7225@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, u
7226 * Pentium F0 0F bugfix can have resulted in the mapped
7227 * IDT being write-protected.
7228 */
7229-static inline void set_intr_gate(unsigned int n, void *addr)
7230+static inline void set_intr_gate(unsigned int n, const void *addr)
7231 {
7232 BUG_ON((unsigned)n > 0xFF);
7233 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7234@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsig
7235 /*
7236 * This routine sets up an interrupt gate at directory privilege level 3.
7237 */
7238-static inline void set_system_intr_gate(unsigned int n, void *addr)
7239+static inline void set_system_intr_gate(unsigned int n, const void *addr)
7240 {
7241 BUG_ON((unsigned)n > 0xFF);
7242 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7243 }
7244
7245-static inline void set_system_trap_gate(unsigned int n, void *addr)
7246+static inline void set_system_trap_gate(unsigned int n, const void *addr)
7247 {
7248 BUG_ON((unsigned)n > 0xFF);
7249 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7250 }
7251
7252-static inline void set_trap_gate(unsigned int n, void *addr)
7253+static inline void set_trap_gate(unsigned int n, const void *addr)
7254 {
7255 BUG_ON((unsigned)n > 0xFF);
7256 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7257@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigne
7258 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7259 {
7260 BUG_ON((unsigned)n > 0xFF);
7261- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7262+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7263 }
7264
7265-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7266+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7267 {
7268 BUG_ON((unsigned)n > 0xFF);
7269 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7270 }
7271
7272-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7273+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7274 {
7275 BUG_ON((unsigned)n > 0xFF);
7276 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7277 }
7278
7279+#ifdef CONFIG_X86_32
7280+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7281+{
7282+ struct desc_struct d;
7283+
7284+ if (likely(limit))
7285+ limit = (limit - 1UL) >> PAGE_SHIFT;
7286+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
7287+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7288+}
7289+#endif
7290+
7291 #endif /* _ASM_X86_DESC_H */
7292diff -urNp linux-3.1.1/arch/x86/include/asm/e820.h linux-3.1.1/arch/x86/include/asm/e820.h
7293--- linux-3.1.1/arch/x86/include/asm/e820.h 2011-11-11 15:19:27.000000000 -0500
7294+++ linux-3.1.1/arch/x86/include/asm/e820.h 2011-11-16 18:39:07.000000000 -0500
7295@@ -69,7 +69,7 @@ struct e820map {
7296 #define ISA_START_ADDRESS 0xa0000
7297 #define ISA_END_ADDRESS 0x100000
7298
7299-#define BIOS_BEGIN 0x000a0000
7300+#define BIOS_BEGIN 0x000c0000
7301 #define BIOS_END 0x00100000
7302
7303 #define BIOS_ROM_BASE 0xffe00000
7304diff -urNp linux-3.1.1/arch/x86/include/asm/elf.h linux-3.1.1/arch/x86/include/asm/elf.h
7305--- linux-3.1.1/arch/x86/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
7306+++ linux-3.1.1/arch/x86/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
7307@@ -237,7 +237,25 @@ extern int force_personality32;
7308 the loader. We need to make sure that it is out of the way of the program
7309 that it will "exec", and that there is sufficient room for the brk. */
7310
7311+#ifdef CONFIG_PAX_SEGMEXEC
7312+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7313+#else
7314 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7315+#endif
7316+
7317+#ifdef CONFIG_PAX_ASLR
7318+#ifdef CONFIG_X86_32
7319+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7320+
7321+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7322+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7323+#else
7324+#define PAX_ELF_ET_DYN_BASE 0x400000UL
7325+
7326+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7327+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7328+#endif
7329+#endif
7330
7331 /* This yields a mask that user programs can use to figure out what
7332 instruction set this CPU supports. This could be done in user space,
7333@@ -290,9 +308,7 @@ do { \
7334
7335 #define ARCH_DLINFO \
7336 do { \
7337- if (vdso_enabled) \
7338- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7339- (unsigned long)current->mm->context.vdso); \
7340+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
7341 } while (0)
7342
7343 #define AT_SYSINFO 32
7344@@ -303,7 +319,7 @@ do { \
7345
7346 #endif /* !CONFIG_X86_32 */
7347
7348-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7349+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7350
7351 #define VDSO_ENTRY \
7352 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7353@@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(s
7354 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7355 #define compat_arch_setup_additional_pages syscall32_setup_pages
7356
7357-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7358-#define arch_randomize_brk arch_randomize_brk
7359-
7360 #endif /* _ASM_X86_ELF_H */
7361diff -urNp linux-3.1.1/arch/x86/include/asm/emergency-restart.h linux-3.1.1/arch/x86/include/asm/emergency-restart.h
7362--- linux-3.1.1/arch/x86/include/asm/emergency-restart.h 2011-11-11 15:19:27.000000000 -0500
7363+++ linux-3.1.1/arch/x86/include/asm/emergency-restart.h 2011-11-16 18:39:07.000000000 -0500
7364@@ -15,6 +15,6 @@ enum reboot_type {
7365
7366 extern enum reboot_type reboot_type;
7367
7368-extern void machine_emergency_restart(void);
7369+extern void machine_emergency_restart(void) __noreturn;
7370
7371 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7372diff -urNp linux-3.1.1/arch/x86/include/asm/futex.h linux-3.1.1/arch/x86/include/asm/futex.h
7373--- linux-3.1.1/arch/x86/include/asm/futex.h 2011-11-11 15:19:27.000000000 -0500
7374+++ linux-3.1.1/arch/x86/include/asm/futex.h 2011-11-16 18:39:07.000000000 -0500
7375@@ -12,16 +12,18 @@
7376 #include <asm/system.h>
7377
7378 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7379+ typecheck(u32 __user *, uaddr); \
7380 asm volatile("1:\t" insn "\n" \
7381 "2:\t.section .fixup,\"ax\"\n" \
7382 "3:\tmov\t%3, %1\n" \
7383 "\tjmp\t2b\n" \
7384 "\t.previous\n" \
7385 _ASM_EXTABLE(1b, 3b) \
7386- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7387+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
7388 : "i" (-EFAULT), "0" (oparg), "1" (0))
7389
7390 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7391+ typecheck(u32 __user *, uaddr); \
7392 asm volatile("1:\tmovl %2, %0\n" \
7393 "\tmovl\t%0, %3\n" \
7394 "\t" insn "\n" \
7395@@ -34,7 +36,7 @@
7396 _ASM_EXTABLE(1b, 4b) \
7397 _ASM_EXTABLE(2b, 4b) \
7398 : "=&a" (oldval), "=&r" (ret), \
7399- "+m" (*uaddr), "=&r" (tem) \
7400+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
7401 : "r" (oparg), "i" (-EFAULT), "1" (0))
7402
7403 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7404@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7405
7406 switch (op) {
7407 case FUTEX_OP_SET:
7408- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7409+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7410 break;
7411 case FUTEX_OP_ADD:
7412- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7413+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7414 uaddr, oparg);
7415 break;
7416 case FUTEX_OP_OR:
7417@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7418 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7419 return -EFAULT;
7420
7421- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7422+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7423 "2:\t.section .fixup, \"ax\"\n"
7424 "3:\tmov %3, %0\n"
7425 "\tjmp 2b\n"
7426 "\t.previous\n"
7427 _ASM_EXTABLE(1b, 3b)
7428- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7429+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
7430 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7431 : "memory"
7432 );
7433diff -urNp linux-3.1.1/arch/x86/include/asm/hw_irq.h linux-3.1.1/arch/x86/include/asm/hw_irq.h
7434--- linux-3.1.1/arch/x86/include/asm/hw_irq.h 2011-11-11 15:19:27.000000000 -0500
7435+++ linux-3.1.1/arch/x86/include/asm/hw_irq.h 2011-11-16 18:39:07.000000000 -0500
7436@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
7437 extern void enable_IO_APIC(void);
7438
7439 /* Statistics */
7440-extern atomic_t irq_err_count;
7441-extern atomic_t irq_mis_count;
7442+extern atomic_unchecked_t irq_err_count;
7443+extern atomic_unchecked_t irq_mis_count;
7444
7445 /* EISA */
7446 extern void eisa_set_level_irq(unsigned int irq);
7447diff -urNp linux-3.1.1/arch/x86/include/asm/i387.h linux-3.1.1/arch/x86/include/asm/i387.h
7448--- linux-3.1.1/arch/x86/include/asm/i387.h 2011-11-11 15:19:27.000000000 -0500
7449+++ linux-3.1.1/arch/x86/include/asm/i387.h 2011-11-16 18:39:07.000000000 -0500
7450@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7451 {
7452 int err;
7453
7454+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7455+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7456+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
7457+#endif
7458+
7459 /* See comment in fxsave() below. */
7460 #ifdef CONFIG_AS_FXSAVEQ
7461 asm volatile("1: fxrstorq %[fx]\n\t"
7462@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7463 {
7464 int err;
7465
7466+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7467+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7468+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7469+#endif
7470+
7471 /*
7472 * Clear the bytes not touched by the fxsave and reserved
7473 * for the SW usage.
7474@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7475 #endif /* CONFIG_X86_64 */
7476
7477 /* We need a safe address that is cheap to find and that is already
7478- in L1 during context switch. The best choices are unfortunately
7479- different for UP and SMP */
7480-#ifdef CONFIG_SMP
7481-#define safe_address (__per_cpu_offset[0])
7482-#else
7483-#define safe_address (kstat_cpu(0).cpustat.user)
7484-#endif
7485+ in L1 during context switch. */
7486+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7487
7488 /*
7489 * These must be called with preempt disabled
7490@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7491 struct thread_info *me = current_thread_info();
7492 preempt_disable();
7493 if (me->status & TS_USEDFPU)
7494- __save_init_fpu(me->task);
7495+ __save_init_fpu(current);
7496 else
7497 clts();
7498 }
7499diff -urNp linux-3.1.1/arch/x86/include/asm/io.h linux-3.1.1/arch/x86/include/asm/io.h
7500--- linux-3.1.1/arch/x86/include/asm/io.h 2011-11-11 15:19:27.000000000 -0500
7501+++ linux-3.1.1/arch/x86/include/asm/io.h 2011-11-16 18:39:07.000000000 -0500
7502@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
7503
7504 #include <linux/vmalloc.h>
7505
7506+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7507+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7508+{
7509+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7510+}
7511+
7512+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7513+{
7514+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7515+}
7516+
7517 /*
7518 * Convert a virtual cached pointer to an uncached pointer
7519 */
7520diff -urNp linux-3.1.1/arch/x86/include/asm/irqflags.h linux-3.1.1/arch/x86/include/asm/irqflags.h
7521--- linux-3.1.1/arch/x86/include/asm/irqflags.h 2011-11-11 15:19:27.000000000 -0500
7522+++ linux-3.1.1/arch/x86/include/asm/irqflags.h 2011-11-16 18:39:07.000000000 -0500
7523@@ -141,6 +141,11 @@ static inline notrace unsigned long arch
7524 sti; \
7525 sysexit
7526
7527+#define GET_CR0_INTO_RDI mov %cr0, %rdi
7528+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7529+#define GET_CR3_INTO_RDI mov %cr3, %rdi
7530+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7531+
7532 #else
7533 #define INTERRUPT_RETURN iret
7534 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7535diff -urNp linux-3.1.1/arch/x86/include/asm/kprobes.h linux-3.1.1/arch/x86/include/asm/kprobes.h
7536--- linux-3.1.1/arch/x86/include/asm/kprobes.h 2011-11-11 15:19:27.000000000 -0500
7537+++ linux-3.1.1/arch/x86/include/asm/kprobes.h 2011-11-16 18:39:07.000000000 -0500
7538@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7539 #define RELATIVEJUMP_SIZE 5
7540 #define RELATIVECALL_OPCODE 0xe8
7541 #define RELATIVE_ADDR_SIZE 4
7542-#define MAX_STACK_SIZE 64
7543-#define MIN_STACK_SIZE(ADDR) \
7544- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7545- THREAD_SIZE - (unsigned long)(ADDR))) \
7546- ? (MAX_STACK_SIZE) \
7547- : (((unsigned long)current_thread_info()) + \
7548- THREAD_SIZE - (unsigned long)(ADDR)))
7549+#define MAX_STACK_SIZE 64UL
7550+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7551
7552 #define flush_insn_slot(p) do { } while (0)
7553
7554diff -urNp linux-3.1.1/arch/x86/include/asm/kvm_host.h linux-3.1.1/arch/x86/include/asm/kvm_host.h
7555--- linux-3.1.1/arch/x86/include/asm/kvm_host.h 2011-11-11 15:19:27.000000000 -0500
7556+++ linux-3.1.1/arch/x86/include/asm/kvm_host.h 2011-11-16 18:39:07.000000000 -0500
7557@@ -456,7 +456,7 @@ struct kvm_arch {
7558 unsigned int n_requested_mmu_pages;
7559 unsigned int n_max_mmu_pages;
7560 unsigned int indirect_shadow_pages;
7561- atomic_t invlpg_counter;
7562+ atomic_unchecked_t invlpg_counter;
7563 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7564 /*
7565 * Hash table of struct kvm_mmu_page.
7566@@ -636,7 +636,7 @@ struct kvm_x86_ops {
7567 enum x86_intercept_stage stage);
7568
7569 const struct trace_print_flags *exit_reasons_str;
7570-};
7571+} __do_const;
7572
7573 struct kvm_arch_async_pf {
7574 u32 token;
7575diff -urNp linux-3.1.1/arch/x86/include/asm/local.h linux-3.1.1/arch/x86/include/asm/local.h
7576--- linux-3.1.1/arch/x86/include/asm/local.h 2011-11-11 15:19:27.000000000 -0500
7577+++ linux-3.1.1/arch/x86/include/asm/local.h 2011-11-16 18:39:07.000000000 -0500
7578@@ -18,26 +18,58 @@ typedef struct {
7579
7580 static inline void local_inc(local_t *l)
7581 {
7582- asm volatile(_ASM_INC "%0"
7583+ asm volatile(_ASM_INC "%0\n"
7584+
7585+#ifdef CONFIG_PAX_REFCOUNT
7586+ "jno 0f\n"
7587+ _ASM_DEC "%0\n"
7588+ "int $4\n0:\n"
7589+ _ASM_EXTABLE(0b, 0b)
7590+#endif
7591+
7592 : "+m" (l->a.counter));
7593 }
7594
7595 static inline void local_dec(local_t *l)
7596 {
7597- asm volatile(_ASM_DEC "%0"
7598+ asm volatile(_ASM_DEC "%0\n"
7599+
7600+#ifdef CONFIG_PAX_REFCOUNT
7601+ "jno 0f\n"
7602+ _ASM_INC "%0\n"
7603+ "int $4\n0:\n"
7604+ _ASM_EXTABLE(0b, 0b)
7605+#endif
7606+
7607 : "+m" (l->a.counter));
7608 }
7609
7610 static inline void local_add(long i, local_t *l)
7611 {
7612- asm volatile(_ASM_ADD "%1,%0"
7613+ asm volatile(_ASM_ADD "%1,%0\n"
7614+
7615+#ifdef CONFIG_PAX_REFCOUNT
7616+ "jno 0f\n"
7617+ _ASM_SUB "%1,%0\n"
7618+ "int $4\n0:\n"
7619+ _ASM_EXTABLE(0b, 0b)
7620+#endif
7621+
7622 : "+m" (l->a.counter)
7623 : "ir" (i));
7624 }
7625
7626 static inline void local_sub(long i, local_t *l)
7627 {
7628- asm volatile(_ASM_SUB "%1,%0"
7629+ asm volatile(_ASM_SUB "%1,%0\n"
7630+
7631+#ifdef CONFIG_PAX_REFCOUNT
7632+ "jno 0f\n"
7633+ _ASM_ADD "%1,%0\n"
7634+ "int $4\n0:\n"
7635+ _ASM_EXTABLE(0b, 0b)
7636+#endif
7637+
7638 : "+m" (l->a.counter)
7639 : "ir" (i));
7640 }
7641@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7642 {
7643 unsigned char c;
7644
7645- asm volatile(_ASM_SUB "%2,%0; sete %1"
7646+ asm volatile(_ASM_SUB "%2,%0\n"
7647+
7648+#ifdef CONFIG_PAX_REFCOUNT
7649+ "jno 0f\n"
7650+ _ASM_ADD "%2,%0\n"
7651+ "int $4\n0:\n"
7652+ _ASM_EXTABLE(0b, 0b)
7653+#endif
7654+
7655+ "sete %1\n"
7656 : "+m" (l->a.counter), "=qm" (c)
7657 : "ir" (i) : "memory");
7658 return c;
7659@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7660 {
7661 unsigned char c;
7662
7663- asm volatile(_ASM_DEC "%0; sete %1"
7664+ asm volatile(_ASM_DEC "%0\n"
7665+
7666+#ifdef CONFIG_PAX_REFCOUNT
7667+ "jno 0f\n"
7668+ _ASM_INC "%0\n"
7669+ "int $4\n0:\n"
7670+ _ASM_EXTABLE(0b, 0b)
7671+#endif
7672+
7673+ "sete %1\n"
7674 : "+m" (l->a.counter), "=qm" (c)
7675 : : "memory");
7676 return c != 0;
7677@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7678 {
7679 unsigned char c;
7680
7681- asm volatile(_ASM_INC "%0; sete %1"
7682+ asm volatile(_ASM_INC "%0\n"
7683+
7684+#ifdef CONFIG_PAX_REFCOUNT
7685+ "jno 0f\n"
7686+ _ASM_DEC "%0\n"
7687+ "int $4\n0:\n"
7688+ _ASM_EXTABLE(0b, 0b)
7689+#endif
7690+
7691+ "sete %1\n"
7692 : "+m" (l->a.counter), "=qm" (c)
7693 : : "memory");
7694 return c != 0;
7695@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7696 {
7697 unsigned char c;
7698
7699- asm volatile(_ASM_ADD "%2,%0; sets %1"
7700+ asm volatile(_ASM_ADD "%2,%0\n"
7701+
7702+#ifdef CONFIG_PAX_REFCOUNT
7703+ "jno 0f\n"
7704+ _ASM_SUB "%2,%0\n"
7705+ "int $4\n0:\n"
7706+ _ASM_EXTABLE(0b, 0b)
7707+#endif
7708+
7709+ "sets %1\n"
7710 : "+m" (l->a.counter), "=qm" (c)
7711 : "ir" (i) : "memory");
7712 return c;
7713@@ -133,7 +201,15 @@ static inline long local_add_return(long
7714 #endif
7715 /* Modern 486+ processor */
7716 __i = i;
7717- asm volatile(_ASM_XADD "%0, %1;"
7718+ asm volatile(_ASM_XADD "%0, %1\n"
7719+
7720+#ifdef CONFIG_PAX_REFCOUNT
7721+ "jno 0f\n"
7722+ _ASM_MOV "%0,%1\n"
7723+ "int $4\n0:\n"
7724+ _ASM_EXTABLE(0b, 0b)
7725+#endif
7726+
7727 : "+r" (i), "+m" (l->a.counter)
7728 : : "memory");
7729 return i + __i;
7730diff -urNp linux-3.1.1/arch/x86/include/asm/mman.h linux-3.1.1/arch/x86/include/asm/mman.h
7731--- linux-3.1.1/arch/x86/include/asm/mman.h 2011-11-11 15:19:27.000000000 -0500
7732+++ linux-3.1.1/arch/x86/include/asm/mman.h 2011-11-16 18:39:07.000000000 -0500
7733@@ -5,4 +5,14 @@
7734
7735 #include <asm-generic/mman.h>
7736
7737+#ifdef __KERNEL__
7738+#ifndef __ASSEMBLY__
7739+#ifdef CONFIG_X86_32
7740+#define arch_mmap_check i386_mmap_check
7741+int i386_mmap_check(unsigned long addr, unsigned long len,
7742+ unsigned long flags);
7743+#endif
7744+#endif
7745+#endif
7746+
7747 #endif /* _ASM_X86_MMAN_H */
7748diff -urNp linux-3.1.1/arch/x86/include/asm/mmu_context.h linux-3.1.1/arch/x86/include/asm/mmu_context.h
7749--- linux-3.1.1/arch/x86/include/asm/mmu_context.h 2011-11-11 15:19:27.000000000 -0500
7750+++ linux-3.1.1/arch/x86/include/asm/mmu_context.h 2011-11-16 18:39:07.000000000 -0500
7751@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
7752
7753 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7754 {
7755+
7756+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7757+ unsigned int i;
7758+ pgd_t *pgd;
7759+
7760+ pax_open_kernel();
7761+ pgd = get_cpu_pgd(smp_processor_id());
7762+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7763+ set_pgd_batched(pgd+i, native_make_pgd(0));
7764+ pax_close_kernel();
7765+#endif
7766+
7767 #ifdef CONFIG_SMP
7768 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7769 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7770@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
7771 struct task_struct *tsk)
7772 {
7773 unsigned cpu = smp_processor_id();
7774+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7775+ int tlbstate = TLBSTATE_OK;
7776+#endif
7777
7778 if (likely(prev != next)) {
7779 #ifdef CONFIG_SMP
7780+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7781+ tlbstate = percpu_read(cpu_tlbstate.state);
7782+#endif
7783 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7784 percpu_write(cpu_tlbstate.active_mm, next);
7785 #endif
7786 cpumask_set_cpu(cpu, mm_cpumask(next));
7787
7788 /* Re-load page tables */
7789+#ifdef CONFIG_PAX_PER_CPU_PGD
7790+ pax_open_kernel();
7791+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7792+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7793+ pax_close_kernel();
7794+ load_cr3(get_cpu_pgd(cpu));
7795+#else
7796 load_cr3(next->pgd);
7797+#endif
7798
7799 /* stop flush ipis for the previous mm */
7800 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7801@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
7802 */
7803 if (unlikely(prev->context.ldt != next->context.ldt))
7804 load_LDT_nolock(&next->context);
7805- }
7806+
7807+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7808+ if (!(__supported_pte_mask & _PAGE_NX)) {
7809+ smp_mb__before_clear_bit();
7810+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7811+ smp_mb__after_clear_bit();
7812+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7813+ }
7814+#endif
7815+
7816+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7817+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7818+ prev->context.user_cs_limit != next->context.user_cs_limit))
7819+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7820 #ifdef CONFIG_SMP
7821+ else if (unlikely(tlbstate != TLBSTATE_OK))
7822+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7823+#endif
7824+#endif
7825+
7826+ }
7827 else {
7828+
7829+#ifdef CONFIG_PAX_PER_CPU_PGD
7830+ pax_open_kernel();
7831+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7832+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7833+ pax_close_kernel();
7834+ load_cr3(get_cpu_pgd(cpu));
7835+#endif
7836+
7837+#ifdef CONFIG_SMP
7838 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7839 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7840
7841@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
7842 * tlb flush IPI delivery. We must reload CR3
7843 * to make sure to use no freed page tables.
7844 */
7845+
7846+#ifndef CONFIG_PAX_PER_CPU_PGD
7847 load_cr3(next->pgd);
7848+#endif
7849+
7850 load_LDT_nolock(&next->context);
7851+
7852+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7853+ if (!(__supported_pte_mask & _PAGE_NX))
7854+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7855+#endif
7856+
7857+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7858+#ifdef CONFIG_PAX_PAGEEXEC
7859+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7860+#endif
7861+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7862+#endif
7863+
7864 }
7865- }
7866 #endif
7867+ }
7868 }
7869
7870 #define activate_mm(prev, next) \
7871diff -urNp linux-3.1.1/arch/x86/include/asm/mmu.h linux-3.1.1/arch/x86/include/asm/mmu.h
7872--- linux-3.1.1/arch/x86/include/asm/mmu.h 2011-11-11 15:19:27.000000000 -0500
7873+++ linux-3.1.1/arch/x86/include/asm/mmu.h 2011-11-16 18:39:07.000000000 -0500
7874@@ -9,7 +9,7 @@
7875 * we put the segment information here.
7876 */
7877 typedef struct {
7878- void *ldt;
7879+ struct desc_struct *ldt;
7880 int size;
7881
7882 #ifdef CONFIG_X86_64
7883@@ -18,7 +18,19 @@ typedef struct {
7884 #endif
7885
7886 struct mutex lock;
7887- void *vdso;
7888+ unsigned long vdso;
7889+
7890+#ifdef CONFIG_X86_32
7891+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7892+ unsigned long user_cs_base;
7893+ unsigned long user_cs_limit;
7894+
7895+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7896+ cpumask_t cpu_user_cs_mask;
7897+#endif
7898+
7899+#endif
7900+#endif
7901 } mm_context_t;
7902
7903 #ifdef CONFIG_SMP
7904diff -urNp linux-3.1.1/arch/x86/include/asm/module.h linux-3.1.1/arch/x86/include/asm/module.h
7905--- linux-3.1.1/arch/x86/include/asm/module.h 2011-11-11 15:19:27.000000000 -0500
7906+++ linux-3.1.1/arch/x86/include/asm/module.h 2011-11-16 18:39:07.000000000 -0500
7907@@ -5,6 +5,7 @@
7908
7909 #ifdef CONFIG_X86_64
7910 /* X86_64 does not define MODULE_PROC_FAMILY */
7911+#define MODULE_PROC_FAMILY ""
7912 #elif defined CONFIG_M386
7913 #define MODULE_PROC_FAMILY "386 "
7914 #elif defined CONFIG_M486
7915@@ -59,8 +60,18 @@
7916 #error unknown processor family
7917 #endif
7918
7919-#ifdef CONFIG_X86_32
7920-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7921+#ifdef CONFIG_PAX_KERNEXEC
7922+#define MODULE_PAX_KERNEXEC "KERNEXEC "
7923+#else
7924+#define MODULE_PAX_KERNEXEC ""
7925 #endif
7926
7927+#ifdef CONFIG_PAX_MEMORY_UDEREF
7928+#define MODULE_PAX_UDEREF "UDEREF "
7929+#else
7930+#define MODULE_PAX_UDEREF ""
7931+#endif
7932+
7933+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
7934+
7935 #endif /* _ASM_X86_MODULE_H */
7936diff -urNp linux-3.1.1/arch/x86/include/asm/page_64_types.h linux-3.1.1/arch/x86/include/asm/page_64_types.h
7937--- linux-3.1.1/arch/x86/include/asm/page_64_types.h 2011-11-11 15:19:27.000000000 -0500
7938+++ linux-3.1.1/arch/x86/include/asm/page_64_types.h 2011-11-16 18:39:07.000000000 -0500
7939@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7940
7941 /* duplicated to the one in bootmem.h */
7942 extern unsigned long max_pfn;
7943-extern unsigned long phys_base;
7944+extern const unsigned long phys_base;
7945
7946 extern unsigned long __phys_addr(unsigned long);
7947 #define __phys_reloc_hide(x) (x)
7948diff -urNp linux-3.1.1/arch/x86/include/asm/paravirt.h linux-3.1.1/arch/x86/include/asm/paravirt.h
7949--- linux-3.1.1/arch/x86/include/asm/paravirt.h 2011-11-11 15:19:27.000000000 -0500
7950+++ linux-3.1.1/arch/x86/include/asm/paravirt.h 2011-11-16 18:39:07.000000000 -0500
7951@@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp,
7952 val);
7953 }
7954
7955+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
7956+{
7957+ pgdval_t val = native_pgd_val(pgd);
7958+
7959+ if (sizeof(pgdval_t) > sizeof(long))
7960+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
7961+ val, (u64)val >> 32);
7962+ else
7963+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
7964+ val);
7965+}
7966+
7967 static inline void pgd_clear(pgd_t *pgdp)
7968 {
7969 set_pgd(pgdp, __pgd(0));
7970@@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned
7971 pv_mmu_ops.set_fixmap(idx, phys, flags);
7972 }
7973
7974+#ifdef CONFIG_PAX_KERNEXEC
7975+static inline unsigned long pax_open_kernel(void)
7976+{
7977+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7978+}
7979+
7980+static inline unsigned long pax_close_kernel(void)
7981+{
7982+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7983+}
7984+#else
7985+static inline unsigned long pax_open_kernel(void) { return 0; }
7986+static inline unsigned long pax_close_kernel(void) { return 0; }
7987+#endif
7988+
7989 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7990
7991 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7992@@ -964,7 +991,7 @@ extern void default_banner(void);
7993
7994 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7995 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7996-#define PARA_INDIRECT(addr) *%cs:addr
7997+#define PARA_INDIRECT(addr) *%ss:addr
7998 #endif
7999
8000 #define INTERRUPT_RETURN \
8001@@ -1041,6 +1068,21 @@ extern void default_banner(void);
8002 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
8003 CLBR_NONE, \
8004 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
8005+
8006+#define GET_CR0_INTO_RDI \
8007+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
8008+ mov %rax,%rdi
8009+
8010+#define SET_RDI_INTO_CR0 \
8011+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
8012+
8013+#define GET_CR3_INTO_RDI \
8014+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
8015+ mov %rax,%rdi
8016+
8017+#define SET_RDI_INTO_CR3 \
8018+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
8019+
8020 #endif /* CONFIG_X86_32 */
8021
8022 #endif /* __ASSEMBLY__ */
8023diff -urNp linux-3.1.1/arch/x86/include/asm/paravirt_types.h linux-3.1.1/arch/x86/include/asm/paravirt_types.h
8024--- linux-3.1.1/arch/x86/include/asm/paravirt_types.h 2011-11-11 15:19:27.000000000 -0500
8025+++ linux-3.1.1/arch/x86/include/asm/paravirt_types.h 2011-11-16 18:39:07.000000000 -0500
8026@@ -84,20 +84,20 @@ struct pv_init_ops {
8027 */
8028 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
8029 unsigned long addr, unsigned len);
8030-};
8031+} __no_const;
8032
8033
8034 struct pv_lazy_ops {
8035 /* Set deferred update mode, used for batching operations. */
8036 void (*enter)(void);
8037 void (*leave)(void);
8038-};
8039+} __no_const;
8040
8041 struct pv_time_ops {
8042 unsigned long long (*sched_clock)(void);
8043 unsigned long long (*steal_clock)(int cpu);
8044 unsigned long (*get_tsc_khz)(void);
8045-};
8046+} __no_const;
8047
8048 struct pv_cpu_ops {
8049 /* hooks for various privileged instructions */
8050@@ -193,7 +193,7 @@ struct pv_cpu_ops {
8051
8052 void (*start_context_switch)(struct task_struct *prev);
8053 void (*end_context_switch)(struct task_struct *next);
8054-};
8055+} __no_const;
8056
8057 struct pv_irq_ops {
8058 /*
8059@@ -224,7 +224,7 @@ struct pv_apic_ops {
8060 unsigned long start_eip,
8061 unsigned long start_esp);
8062 #endif
8063-};
8064+} __no_const;
8065
8066 struct pv_mmu_ops {
8067 unsigned long (*read_cr2)(void);
8068@@ -313,6 +313,7 @@ struct pv_mmu_ops {
8069 struct paravirt_callee_save make_pud;
8070
8071 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
8072+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
8073 #endif /* PAGETABLE_LEVELS == 4 */
8074 #endif /* PAGETABLE_LEVELS >= 3 */
8075
8076@@ -324,6 +325,12 @@ struct pv_mmu_ops {
8077 an mfn. We can tell which is which from the index. */
8078 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
8079 phys_addr_t phys, pgprot_t flags);
8080+
8081+#ifdef CONFIG_PAX_KERNEXEC
8082+ unsigned long (*pax_open_kernel)(void);
8083+ unsigned long (*pax_close_kernel)(void);
8084+#endif
8085+
8086 };
8087
8088 struct arch_spinlock;
8089@@ -334,7 +341,7 @@ struct pv_lock_ops {
8090 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
8091 int (*spin_trylock)(struct arch_spinlock *lock);
8092 void (*spin_unlock)(struct arch_spinlock *lock);
8093-};
8094+} __no_const;
8095
8096 /* This contains all the paravirt structures: we get a convenient
8097 * number for each function using the offset which we use to indicate
8098diff -urNp linux-3.1.1/arch/x86/include/asm/pgalloc.h linux-3.1.1/arch/x86/include/asm/pgalloc.h
8099--- linux-3.1.1/arch/x86/include/asm/pgalloc.h 2011-11-11 15:19:27.000000000 -0500
8100+++ linux-3.1.1/arch/x86/include/asm/pgalloc.h 2011-11-16 18:39:07.000000000 -0500
8101@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
8102 pmd_t *pmd, pte_t *pte)
8103 {
8104 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8105+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
8106+}
8107+
8108+static inline void pmd_populate_user(struct mm_struct *mm,
8109+ pmd_t *pmd, pte_t *pte)
8110+{
8111+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8112 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
8113 }
8114
8115diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable-2level.h linux-3.1.1/arch/x86/include/asm/pgtable-2level.h
8116--- linux-3.1.1/arch/x86/include/asm/pgtable-2level.h 2011-11-11 15:19:27.000000000 -0500
8117+++ linux-3.1.1/arch/x86/include/asm/pgtable-2level.h 2011-11-16 18:39:07.000000000 -0500
8118@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
8119
8120 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8121 {
8122+ pax_open_kernel();
8123 *pmdp = pmd;
8124+ pax_close_kernel();
8125 }
8126
8127 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
8128diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable_32.h linux-3.1.1/arch/x86/include/asm/pgtable_32.h
8129--- linux-3.1.1/arch/x86/include/asm/pgtable_32.h 2011-11-11 15:19:27.000000000 -0500
8130+++ linux-3.1.1/arch/x86/include/asm/pgtable_32.h 2011-11-16 18:39:07.000000000 -0500
8131@@ -25,9 +25,6 @@
8132 struct mm_struct;
8133 struct vm_area_struct;
8134
8135-extern pgd_t swapper_pg_dir[1024];
8136-extern pgd_t initial_page_table[1024];
8137-
8138 static inline void pgtable_cache_init(void) { }
8139 static inline void check_pgt_cache(void) { }
8140 void paging_init(void);
8141@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
8142 # include <asm/pgtable-2level.h>
8143 #endif
8144
8145+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
8146+extern pgd_t initial_page_table[PTRS_PER_PGD];
8147+#ifdef CONFIG_X86_PAE
8148+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
8149+#endif
8150+
8151 #if defined(CONFIG_HIGHPTE)
8152 #define pte_offset_map(dir, address) \
8153 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
8154@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
8155 /* Clear a kernel PTE and flush it from the TLB */
8156 #define kpte_clear_flush(ptep, vaddr) \
8157 do { \
8158+ pax_open_kernel(); \
8159 pte_clear(&init_mm, (vaddr), (ptep)); \
8160+ pax_close_kernel(); \
8161 __flush_tlb_one((vaddr)); \
8162 } while (0)
8163
8164@@ -74,6 +79,9 @@ do { \
8165
8166 #endif /* !__ASSEMBLY__ */
8167
8168+#define HAVE_ARCH_UNMAPPED_AREA
8169+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
8170+
8171 /*
8172 * kern_addr_valid() is (1) for FLATMEM and (0) for
8173 * SPARSEMEM and DISCONTIGMEM
8174diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable_32_types.h linux-3.1.1/arch/x86/include/asm/pgtable_32_types.h
8175--- linux-3.1.1/arch/x86/include/asm/pgtable_32_types.h 2011-11-11 15:19:27.000000000 -0500
8176+++ linux-3.1.1/arch/x86/include/asm/pgtable_32_types.h 2011-11-16 18:39:07.000000000 -0500
8177@@ -8,7 +8,7 @@
8178 */
8179 #ifdef CONFIG_X86_PAE
8180 # include <asm/pgtable-3level_types.h>
8181-# define PMD_SIZE (1UL << PMD_SHIFT)
8182+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
8183 # define PMD_MASK (~(PMD_SIZE - 1))
8184 #else
8185 # include <asm/pgtable-2level_types.h>
8186@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
8187 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
8188 #endif
8189
8190+#ifdef CONFIG_PAX_KERNEXEC
8191+#ifndef __ASSEMBLY__
8192+extern unsigned char MODULES_EXEC_VADDR[];
8193+extern unsigned char MODULES_EXEC_END[];
8194+#endif
8195+#include <asm/boot.h>
8196+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
8197+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
8198+#else
8199+#define ktla_ktva(addr) (addr)
8200+#define ktva_ktla(addr) (addr)
8201+#endif
8202+
8203 #define MODULES_VADDR VMALLOC_START
8204 #define MODULES_END VMALLOC_END
8205 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8206diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable-3level.h linux-3.1.1/arch/x86/include/asm/pgtable-3level.h
8207--- linux-3.1.1/arch/x86/include/asm/pgtable-3level.h 2011-11-11 15:19:27.000000000 -0500
8208+++ linux-3.1.1/arch/x86/include/asm/pgtable-3level.h 2011-11-16 18:39:07.000000000 -0500
8209@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8210
8211 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8212 {
8213+ pax_open_kernel();
8214 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8215+ pax_close_kernel();
8216 }
8217
8218 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8219 {
8220+ pax_open_kernel();
8221 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8222+ pax_close_kernel();
8223 }
8224
8225 /*
8226diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable_64.h linux-3.1.1/arch/x86/include/asm/pgtable_64.h
8227--- linux-3.1.1/arch/x86/include/asm/pgtable_64.h 2011-11-11 15:19:27.000000000 -0500
8228+++ linux-3.1.1/arch/x86/include/asm/pgtable_64.h 2011-11-16 18:39:07.000000000 -0500
8229@@ -16,10 +16,13 @@
8230
8231 extern pud_t level3_kernel_pgt[512];
8232 extern pud_t level3_ident_pgt[512];
8233+extern pud_t level3_vmalloc_pgt[512];
8234+extern pud_t level3_vmemmap_pgt[512];
8235+extern pud_t level2_vmemmap_pgt[512];
8236 extern pmd_t level2_kernel_pgt[512];
8237 extern pmd_t level2_fixmap_pgt[512];
8238-extern pmd_t level2_ident_pgt[512];
8239-extern pgd_t init_level4_pgt[];
8240+extern pmd_t level2_ident_pgt[512*2];
8241+extern pgd_t init_level4_pgt[512];
8242
8243 #define swapper_pg_dir init_level4_pgt
8244
8245@@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8246
8247 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8248 {
8249+ pax_open_kernel();
8250 *pmdp = pmd;
8251+ pax_close_kernel();
8252 }
8253
8254 static inline void native_pmd_clear(pmd_t *pmd)
8255@@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_
8256
8257 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8258 {
8259+ pax_open_kernel();
8260+ *pgdp = pgd;
8261+ pax_close_kernel();
8262+}
8263+
8264+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8265+{
8266 *pgdp = pgd;
8267 }
8268
8269diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable_64_types.h linux-3.1.1/arch/x86/include/asm/pgtable_64_types.h
8270--- linux-3.1.1/arch/x86/include/asm/pgtable_64_types.h 2011-11-11 15:19:27.000000000 -0500
8271+++ linux-3.1.1/arch/x86/include/asm/pgtable_64_types.h 2011-11-16 18:39:07.000000000 -0500
8272@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8273 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8274 #define MODULES_END _AC(0xffffffffff000000, UL)
8275 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8276+#define MODULES_EXEC_VADDR MODULES_VADDR
8277+#define MODULES_EXEC_END MODULES_END
8278+
8279+#define ktla_ktva(addr) (addr)
8280+#define ktva_ktla(addr) (addr)
8281
8282 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8283diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable.h linux-3.1.1/arch/x86/include/asm/pgtable.h
8284--- linux-3.1.1/arch/x86/include/asm/pgtable.h 2011-11-11 15:19:27.000000000 -0500
8285+++ linux-3.1.1/arch/x86/include/asm/pgtable.h 2011-11-16 18:39:07.000000000 -0500
8286@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm
8287
8288 #ifndef __PAGETABLE_PUD_FOLDED
8289 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
8290+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
8291 #define pgd_clear(pgd) native_pgd_clear(pgd)
8292 #endif
8293
8294@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm
8295
8296 #define arch_end_context_switch(prev) do {} while(0)
8297
8298+#define pax_open_kernel() native_pax_open_kernel()
8299+#define pax_close_kernel() native_pax_close_kernel()
8300 #endif /* CONFIG_PARAVIRT */
8301
8302+#define __HAVE_ARCH_PAX_OPEN_KERNEL
8303+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8304+
8305+#ifdef CONFIG_PAX_KERNEXEC
8306+static inline unsigned long native_pax_open_kernel(void)
8307+{
8308+ unsigned long cr0;
8309+
8310+ preempt_disable();
8311+ barrier();
8312+ cr0 = read_cr0() ^ X86_CR0_WP;
8313+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
8314+ write_cr0(cr0);
8315+ return cr0 ^ X86_CR0_WP;
8316+}
8317+
8318+static inline unsigned long native_pax_close_kernel(void)
8319+{
8320+ unsigned long cr0;
8321+
8322+ cr0 = read_cr0() ^ X86_CR0_WP;
8323+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8324+ write_cr0(cr0);
8325+ barrier();
8326+ preempt_enable_no_resched();
8327+ return cr0 ^ X86_CR0_WP;
8328+}
8329+#else
8330+static inline unsigned long native_pax_open_kernel(void) { return 0; }
8331+static inline unsigned long native_pax_close_kernel(void) { return 0; }
8332+#endif
8333+
8334 /*
8335 * The following only work if pte_present() is true.
8336 * Undefined behaviour if not..
8337 */
8338+static inline int pte_user(pte_t pte)
8339+{
8340+ return pte_val(pte) & _PAGE_USER;
8341+}
8342+
8343 static inline int pte_dirty(pte_t pte)
8344 {
8345 return pte_flags(pte) & _PAGE_DIRTY;
8346@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t
8347 return pte_clear_flags(pte, _PAGE_RW);
8348 }
8349
8350+static inline pte_t pte_mkread(pte_t pte)
8351+{
8352+ return __pte(pte_val(pte) | _PAGE_USER);
8353+}
8354+
8355 static inline pte_t pte_mkexec(pte_t pte)
8356 {
8357- return pte_clear_flags(pte, _PAGE_NX);
8358+#ifdef CONFIG_X86_PAE
8359+ if (__supported_pte_mask & _PAGE_NX)
8360+ return pte_clear_flags(pte, _PAGE_NX);
8361+ else
8362+#endif
8363+ return pte_set_flags(pte, _PAGE_USER);
8364+}
8365+
8366+static inline pte_t pte_exprotect(pte_t pte)
8367+{
8368+#ifdef CONFIG_X86_PAE
8369+ if (__supported_pte_mask & _PAGE_NX)
8370+ return pte_set_flags(pte, _PAGE_NX);
8371+ else
8372+#endif
8373+ return pte_clear_flags(pte, _PAGE_USER);
8374 }
8375
8376 static inline pte_t pte_mkdirty(pte_t pte)
8377@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long
8378 #endif
8379
8380 #ifndef __ASSEMBLY__
8381+
8382+#ifdef CONFIG_PAX_PER_CPU_PGD
8383+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8384+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8385+{
8386+ return cpu_pgd[cpu];
8387+}
8388+#endif
8389+
8390 #include <linux/mm_types.h>
8391
8392 static inline int pte_none(pte_t pte)
8393@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *p
8394
8395 static inline int pgd_bad(pgd_t pgd)
8396 {
8397- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8398+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8399 }
8400
8401 static inline int pgd_none(pgd_t pgd)
8402@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
8403 * pgd_offset() returns a (pgd_t *)
8404 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8405 */
8406-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8407+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8408+
8409+#ifdef CONFIG_PAX_PER_CPU_PGD
8410+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8411+#endif
8412+
8413 /*
8414 * a shortcut which implies the use of the kernel's pgd, instead
8415 * of a process's
8416@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
8417 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8418 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8419
8420+#ifdef CONFIG_X86_32
8421+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8422+#else
8423+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8424+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8425+
8426+#ifdef CONFIG_PAX_MEMORY_UDEREF
8427+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8428+#else
8429+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8430+#endif
8431+
8432+#endif
8433+
8434 #ifndef __ASSEMBLY__
8435
8436 extern int direct_gbpages;
8437@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(st
8438 * dst and src can be on the same page, but the range must not overlap,
8439 * and must not cross a page boundary.
8440 */
8441-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8442+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8443 {
8444- memcpy(dst, src, count * sizeof(pgd_t));
8445+ pax_open_kernel();
8446+ while (count--)
8447+ *dst++ = *src++;
8448+ pax_close_kernel();
8449 }
8450
8451+#ifdef CONFIG_PAX_PER_CPU_PGD
8452+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8453+#endif
8454+
8455+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8456+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8457+#else
8458+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8459+#endif
8460
8461 #include <asm-generic/pgtable.h>
8462 #endif /* __ASSEMBLY__ */
8463diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable_types.h linux-3.1.1/arch/x86/include/asm/pgtable_types.h
8464--- linux-3.1.1/arch/x86/include/asm/pgtable_types.h 2011-11-11 15:19:27.000000000 -0500
8465+++ linux-3.1.1/arch/x86/include/asm/pgtable_types.h 2011-11-16 18:39:07.000000000 -0500
8466@@ -16,13 +16,12 @@
8467 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8468 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8469 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8470-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8471+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8472 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8473 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8474 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8475-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8476-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8477-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8478+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8479+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8480 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8481
8482 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8483@@ -40,7 +39,6 @@
8484 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8485 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8486 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8487-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8488 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8489 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8490 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8491@@ -57,8 +55,10 @@
8492
8493 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8494 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8495-#else
8496+#elif defined(CONFIG_KMEMCHECK)
8497 #define _PAGE_NX (_AT(pteval_t, 0))
8498+#else
8499+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8500 #endif
8501
8502 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8503@@ -96,6 +96,9 @@
8504 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8505 _PAGE_ACCESSED)
8506
8507+#define PAGE_READONLY_NOEXEC PAGE_READONLY
8508+#define PAGE_SHARED_NOEXEC PAGE_SHARED
8509+
8510 #define __PAGE_KERNEL_EXEC \
8511 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8512 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8513@@ -106,7 +109,7 @@
8514 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8515 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8516 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8517-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8518+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8519 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
8520 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
8521 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8522@@ -168,8 +171,8 @@
8523 * bits are combined, this will alow user to access the high address mapped
8524 * VDSO in the presence of CONFIG_COMPAT_VDSO
8525 */
8526-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8527-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8528+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8529+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8530 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8531 #endif
8532
8533@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8534 {
8535 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8536 }
8537+#endif
8538
8539+#if PAGETABLE_LEVELS == 3
8540+#include <asm-generic/pgtable-nopud.h>
8541+#endif
8542+
8543+#if PAGETABLE_LEVELS == 2
8544+#include <asm-generic/pgtable-nopmd.h>
8545+#endif
8546+
8547+#ifndef __ASSEMBLY__
8548 #if PAGETABLE_LEVELS > 3
8549 typedef struct { pudval_t pud; } pud_t;
8550
8551@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pu
8552 return pud.pud;
8553 }
8554 #else
8555-#include <asm-generic/pgtable-nopud.h>
8556-
8557 static inline pudval_t native_pud_val(pud_t pud)
8558 {
8559 return native_pgd_val(pud.pgd);
8560@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pm
8561 return pmd.pmd;
8562 }
8563 #else
8564-#include <asm-generic/pgtable-nopmd.h>
8565-
8566 static inline pmdval_t native_pmd_val(pmd_t pmd)
8567 {
8568 return native_pgd_val(pmd.pud.pgd);
8569@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
8570
8571 extern pteval_t __supported_pte_mask;
8572 extern void set_nx(void);
8573-extern int nx_enabled;
8574
8575 #define pgprot_writecombine pgprot_writecombine
8576 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8577diff -urNp linux-3.1.1/arch/x86/include/asm/processor.h linux-3.1.1/arch/x86/include/asm/processor.h
8578--- linux-3.1.1/arch/x86/include/asm/processor.h 2011-11-11 15:19:27.000000000 -0500
8579+++ linux-3.1.1/arch/x86/include/asm/processor.h 2011-11-16 18:39:07.000000000 -0500
8580@@ -266,7 +266,7 @@ struct tss_struct {
8581
8582 } ____cacheline_aligned;
8583
8584-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8585+extern struct tss_struct init_tss[NR_CPUS];
8586
8587 /*
8588 * Save the original ist values for checking stack pointers during debugging
8589@@ -858,11 +858,18 @@ static inline void spin_lock_prefetch(co
8590 */
8591 #define TASK_SIZE PAGE_OFFSET
8592 #define TASK_SIZE_MAX TASK_SIZE
8593+
8594+#ifdef CONFIG_PAX_SEGMEXEC
8595+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8596+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8597+#else
8598 #define STACK_TOP TASK_SIZE
8599-#define STACK_TOP_MAX STACK_TOP
8600+#endif
8601+
8602+#define STACK_TOP_MAX TASK_SIZE
8603
8604 #define INIT_THREAD { \
8605- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8606+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8607 .vm86_info = NULL, \
8608 .sysenter_cs = __KERNEL_CS, \
8609 .io_bitmap_ptr = NULL, \
8610@@ -876,7 +883,7 @@ static inline void spin_lock_prefetch(co
8611 */
8612 #define INIT_TSS { \
8613 .x86_tss = { \
8614- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8615+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8616 .ss0 = __KERNEL_DS, \
8617 .ss1 = __KERNEL_CS, \
8618 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8619@@ -887,11 +894,7 @@ static inline void spin_lock_prefetch(co
8620 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8621
8622 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8623-#define KSTK_TOP(info) \
8624-({ \
8625- unsigned long *__ptr = (unsigned long *)(info); \
8626- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8627-})
8628+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8629
8630 /*
8631 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8632@@ -906,7 +909,7 @@ extern unsigned long thread_saved_pc(str
8633 #define task_pt_regs(task) \
8634 ({ \
8635 struct pt_regs *__regs__; \
8636- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8637+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8638 __regs__ - 1; \
8639 })
8640
8641@@ -916,13 +919,13 @@ extern unsigned long thread_saved_pc(str
8642 /*
8643 * User space process size. 47bits minus one guard page.
8644 */
8645-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8646+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8647
8648 /* This decides where the kernel will search for a free chunk of vm
8649 * space during mmap's.
8650 */
8651 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8652- 0xc0000000 : 0xFFFFe000)
8653+ 0xc0000000 : 0xFFFFf000)
8654
8655 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8656 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8657@@ -933,11 +936,11 @@ extern unsigned long thread_saved_pc(str
8658 #define STACK_TOP_MAX TASK_SIZE_MAX
8659
8660 #define INIT_THREAD { \
8661- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8662+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8663 }
8664
8665 #define INIT_TSS { \
8666- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8667+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8668 }
8669
8670 /*
8671@@ -959,6 +962,10 @@ extern void start_thread(struct pt_regs
8672 */
8673 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8674
8675+#ifdef CONFIG_PAX_SEGMEXEC
8676+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8677+#endif
8678+
8679 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8680
8681 /* Get/set a process' ability to use the timestamp counter instruction */
8682diff -urNp linux-3.1.1/arch/x86/include/asm/ptrace.h linux-3.1.1/arch/x86/include/asm/ptrace.h
8683--- linux-3.1.1/arch/x86/include/asm/ptrace.h 2011-11-11 15:19:27.000000000 -0500
8684+++ linux-3.1.1/arch/x86/include/asm/ptrace.h 2011-11-16 18:39:07.000000000 -0500
8685@@ -156,28 +156,29 @@ static inline unsigned long regs_return_
8686 }
8687
8688 /*
8689- * user_mode_vm(regs) determines whether a register set came from user mode.
8690+ * user_mode(regs) determines whether a register set came from user mode.
8691 * This is true if V8086 mode was enabled OR if the register set was from
8692 * protected mode with RPL-3 CS value. This tricky test checks that with
8693 * one comparison. Many places in the kernel can bypass this full check
8694- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8695+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8696+ * be used.
8697 */
8698-static inline int user_mode(struct pt_regs *regs)
8699+static inline int user_mode_novm(struct pt_regs *regs)
8700 {
8701 #ifdef CONFIG_X86_32
8702 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8703 #else
8704- return !!(regs->cs & 3);
8705+ return !!(regs->cs & SEGMENT_RPL_MASK);
8706 #endif
8707 }
8708
8709-static inline int user_mode_vm(struct pt_regs *regs)
8710+static inline int user_mode(struct pt_regs *regs)
8711 {
8712 #ifdef CONFIG_X86_32
8713 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8714 USER_RPL;
8715 #else
8716- return user_mode(regs);
8717+ return user_mode_novm(regs);
8718 #endif
8719 }
8720
8721@@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_r
8722 #ifdef CONFIG_X86_64
8723 static inline bool user_64bit_mode(struct pt_regs *regs)
8724 {
8725+ unsigned long cs = regs->cs & 0xffff;
8726 #ifndef CONFIG_PARAVIRT
8727 /*
8728 * On non-paravirt systems, this is the only long mode CPL 3
8729 * selector. We do not allow long mode selectors in the LDT.
8730 */
8731- return regs->cs == __USER_CS;
8732+ return cs == __USER_CS;
8733 #else
8734 /* Headers are too twisted for this to go in paravirt.h. */
8735- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
8736+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
8737 #endif
8738 }
8739 #endif
8740diff -urNp linux-3.1.1/arch/x86/include/asm/reboot.h linux-3.1.1/arch/x86/include/asm/reboot.h
8741--- linux-3.1.1/arch/x86/include/asm/reboot.h 2011-11-11 15:19:27.000000000 -0500
8742+++ linux-3.1.1/arch/x86/include/asm/reboot.h 2011-11-16 18:39:07.000000000 -0500
8743@@ -6,19 +6,19 @@
8744 struct pt_regs;
8745
8746 struct machine_ops {
8747- void (*restart)(char *cmd);
8748- void (*halt)(void);
8749- void (*power_off)(void);
8750+ void (* __noreturn restart)(char *cmd);
8751+ void (* __noreturn halt)(void);
8752+ void (* __noreturn power_off)(void);
8753 void (*shutdown)(void);
8754 void (*crash_shutdown)(struct pt_regs *);
8755- void (*emergency_restart)(void);
8756-};
8757+ void (* __noreturn emergency_restart)(void);
8758+} __no_const;
8759
8760 extern struct machine_ops machine_ops;
8761
8762 void native_machine_crash_shutdown(struct pt_regs *regs);
8763 void native_machine_shutdown(void);
8764-void machine_real_restart(unsigned int type);
8765+void machine_real_restart(unsigned int type) __noreturn;
8766 /* These must match dispatch_table in reboot_32.S */
8767 #define MRR_BIOS 0
8768 #define MRR_APM 1
8769diff -urNp linux-3.1.1/arch/x86/include/asm/rwsem.h linux-3.1.1/arch/x86/include/asm/rwsem.h
8770--- linux-3.1.1/arch/x86/include/asm/rwsem.h 2011-11-11 15:19:27.000000000 -0500
8771+++ linux-3.1.1/arch/x86/include/asm/rwsem.h 2011-11-16 18:39:07.000000000 -0500
8772@@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8773 {
8774 asm volatile("# beginning down_read\n\t"
8775 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8776+
8777+#ifdef CONFIG_PAX_REFCOUNT
8778+ "jno 0f\n"
8779+ LOCK_PREFIX _ASM_DEC "(%1)\n"
8780+ "int $4\n0:\n"
8781+ _ASM_EXTABLE(0b, 0b)
8782+#endif
8783+
8784 /* adds 0x00000001 */
8785 " jns 1f\n"
8786 " call call_rwsem_down_read_failed\n"
8787@@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8788 "1:\n\t"
8789 " mov %1,%2\n\t"
8790 " add %3,%2\n\t"
8791+
8792+#ifdef CONFIG_PAX_REFCOUNT
8793+ "jno 0f\n"
8794+ "sub %3,%2\n"
8795+ "int $4\n0:\n"
8796+ _ASM_EXTABLE(0b, 0b)
8797+#endif
8798+
8799 " jle 2f\n\t"
8800 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8801 " jnz 1b\n\t"
8802@@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8803 long tmp;
8804 asm volatile("# beginning down_write\n\t"
8805 LOCK_PREFIX " xadd %1,(%2)\n\t"
8806+
8807+#ifdef CONFIG_PAX_REFCOUNT
8808+ "jno 0f\n"
8809+ "mov %1,(%2)\n"
8810+ "int $4\n0:\n"
8811+ _ASM_EXTABLE(0b, 0b)
8812+#endif
8813+
8814 /* adds 0xffff0001, returns the old value */
8815 " test %1,%1\n\t"
8816 /* was the count 0 before? */
8817@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8818 long tmp;
8819 asm volatile("# beginning __up_read\n\t"
8820 LOCK_PREFIX " xadd %1,(%2)\n\t"
8821+
8822+#ifdef CONFIG_PAX_REFCOUNT
8823+ "jno 0f\n"
8824+ "mov %1,(%2)\n"
8825+ "int $4\n0:\n"
8826+ _ASM_EXTABLE(0b, 0b)
8827+#endif
8828+
8829 /* subtracts 1, returns the old value */
8830 " jns 1f\n\t"
8831 " call call_rwsem_wake\n" /* expects old value in %edx */
8832@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8833 long tmp;
8834 asm volatile("# beginning __up_write\n\t"
8835 LOCK_PREFIX " xadd %1,(%2)\n\t"
8836+
8837+#ifdef CONFIG_PAX_REFCOUNT
8838+ "jno 0f\n"
8839+ "mov %1,(%2)\n"
8840+ "int $4\n0:\n"
8841+ _ASM_EXTABLE(0b, 0b)
8842+#endif
8843+
8844 /* subtracts 0xffff0001, returns the old value */
8845 " jns 1f\n\t"
8846 " call call_rwsem_wake\n" /* expects old value in %edx */
8847@@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8848 {
8849 asm volatile("# beginning __downgrade_write\n\t"
8850 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8851+
8852+#ifdef CONFIG_PAX_REFCOUNT
8853+ "jno 0f\n"
8854+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8855+ "int $4\n0:\n"
8856+ _ASM_EXTABLE(0b, 0b)
8857+#endif
8858+
8859 /*
8860 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8861 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8862@@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8863 */
8864 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8865 {
8866- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8867+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8868+
8869+#ifdef CONFIG_PAX_REFCOUNT
8870+ "jno 0f\n"
8871+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
8872+ "int $4\n0:\n"
8873+ _ASM_EXTABLE(0b, 0b)
8874+#endif
8875+
8876 : "+m" (sem->count)
8877 : "er" (delta));
8878 }
8879@@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8880 {
8881 long tmp = delta;
8882
8883- asm volatile(LOCK_PREFIX "xadd %0,%1"
8884+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8885+
8886+#ifdef CONFIG_PAX_REFCOUNT
8887+ "jno 0f\n"
8888+ "mov %0,%1\n"
8889+ "int $4\n0:\n"
8890+ _ASM_EXTABLE(0b, 0b)
8891+#endif
8892+
8893 : "+r" (tmp), "+m" (sem->count)
8894 : : "memory");
8895
8896diff -urNp linux-3.1.1/arch/x86/include/asm/segment.h linux-3.1.1/arch/x86/include/asm/segment.h
8897--- linux-3.1.1/arch/x86/include/asm/segment.h 2011-11-11 15:19:27.000000000 -0500
8898+++ linux-3.1.1/arch/x86/include/asm/segment.h 2011-11-16 18:39:07.000000000 -0500
8899@@ -64,10 +64,15 @@
8900 * 26 - ESPFIX small SS
8901 * 27 - per-cpu [ offset to per-cpu data area ]
8902 * 28 - stack_canary-20 [ for stack protector ]
8903- * 29 - unused
8904- * 30 - unused
8905+ * 29 - PCI BIOS CS
8906+ * 30 - PCI BIOS DS
8907 * 31 - TSS for double fault handler
8908 */
8909+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
8910+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
8911+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
8912+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
8913+
8914 #define GDT_ENTRY_TLS_MIN 6
8915 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
8916
8917@@ -79,6 +84,8 @@
8918
8919 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8920
8921+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8922+
8923 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8924
8925 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8926@@ -104,6 +111,12 @@
8927 #define __KERNEL_STACK_CANARY 0
8928 #endif
8929
8930+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8931+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8932+
8933+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8934+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8935+
8936 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8937
8938 /*
8939@@ -141,7 +154,7 @@
8940 */
8941
8942 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8943-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8944+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8945
8946
8947 #else
8948@@ -165,6 +178,8 @@
8949 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
8950 #define __USER32_DS __USER_DS
8951
8952+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8953+
8954 #define GDT_ENTRY_TSS 8 /* needs two entries */
8955 #define GDT_ENTRY_LDT 10 /* needs two entries */
8956 #define GDT_ENTRY_TLS_MIN 12
8957@@ -185,6 +200,7 @@
8958 #endif
8959
8960 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8961+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8962 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8963 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8964 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8965diff -urNp linux-3.1.1/arch/x86/include/asm/smp.h linux-3.1.1/arch/x86/include/asm/smp.h
8966--- linux-3.1.1/arch/x86/include/asm/smp.h 2011-11-11 15:19:27.000000000 -0500
8967+++ linux-3.1.1/arch/x86/include/asm/smp.h 2011-11-16 18:39:07.000000000 -0500
8968@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8969 /* cpus sharing the last level cache: */
8970 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8971 DECLARE_PER_CPU(u16, cpu_llc_id);
8972-DECLARE_PER_CPU(int, cpu_number);
8973+DECLARE_PER_CPU(unsigned int, cpu_number);
8974
8975 static inline struct cpumask *cpu_sibling_mask(int cpu)
8976 {
8977@@ -77,7 +77,7 @@ struct smp_ops {
8978
8979 void (*send_call_func_ipi)(const struct cpumask *mask);
8980 void (*send_call_func_single_ipi)(int cpu);
8981-};
8982+} __no_const;
8983
8984 /* Globals due to paravirt */
8985 extern void set_cpu_sibling_map(int cpu);
8986@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8987 extern int safe_smp_processor_id(void);
8988
8989 #elif defined(CONFIG_X86_64_SMP)
8990-#define raw_smp_processor_id() (percpu_read(cpu_number))
8991-
8992-#define stack_smp_processor_id() \
8993-({ \
8994- struct thread_info *ti; \
8995- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8996- ti->cpu; \
8997-})
8998+#define raw_smp_processor_id() (percpu_read(cpu_number))
8999+#define stack_smp_processor_id() raw_smp_processor_id()
9000 #define safe_smp_processor_id() smp_processor_id()
9001
9002 #endif
9003diff -urNp linux-3.1.1/arch/x86/include/asm/spinlock.h linux-3.1.1/arch/x86/include/asm/spinlock.h
9004--- linux-3.1.1/arch/x86/include/asm/spinlock.h 2011-11-11 15:19:27.000000000 -0500
9005+++ linux-3.1.1/arch/x86/include/asm/spinlock.h 2011-11-16 18:39:07.000000000 -0500
9006@@ -248,6 +248,14 @@ static inline int arch_write_can_lock(ar
9007 static inline void arch_read_lock(arch_rwlock_t *rw)
9008 {
9009 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
9010+
9011+#ifdef CONFIG_PAX_REFCOUNT
9012+ "jno 0f\n"
9013+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
9014+ "int $4\n0:\n"
9015+ _ASM_EXTABLE(0b, 0b)
9016+#endif
9017+
9018 "jns 1f\n"
9019 "call __read_lock_failed\n\t"
9020 "1:\n"
9021@@ -257,6 +265,14 @@ static inline void arch_read_lock(arch_r
9022 static inline void arch_write_lock(arch_rwlock_t *rw)
9023 {
9024 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
9025+
9026+#ifdef CONFIG_PAX_REFCOUNT
9027+ "jno 0f\n"
9028+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
9029+ "int $4\n0:\n"
9030+ _ASM_EXTABLE(0b, 0b)
9031+#endif
9032+
9033 "jz 1f\n"
9034 "call __write_lock_failed\n\t"
9035 "1:\n"
9036@@ -286,13 +302,29 @@ static inline int arch_write_trylock(arc
9037
9038 static inline void arch_read_unlock(arch_rwlock_t *rw)
9039 {
9040- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
9041+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
9042+
9043+#ifdef CONFIG_PAX_REFCOUNT
9044+ "jno 0f\n"
9045+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
9046+ "int $4\n0:\n"
9047+ _ASM_EXTABLE(0b, 0b)
9048+#endif
9049+
9050 :"+m" (rw->lock) : : "memory");
9051 }
9052
9053 static inline void arch_write_unlock(arch_rwlock_t *rw)
9054 {
9055- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
9056+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
9057+
9058+#ifdef CONFIG_PAX_REFCOUNT
9059+ "jno 0f\n"
9060+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
9061+ "int $4\n0:\n"
9062+ _ASM_EXTABLE(0b, 0b)
9063+#endif
9064+
9065 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
9066 }
9067
9068diff -urNp linux-3.1.1/arch/x86/include/asm/stackprotector.h linux-3.1.1/arch/x86/include/asm/stackprotector.h
9069--- linux-3.1.1/arch/x86/include/asm/stackprotector.h 2011-11-11 15:19:27.000000000 -0500
9070+++ linux-3.1.1/arch/x86/include/asm/stackprotector.h 2011-11-16 18:39:07.000000000 -0500
9071@@ -48,7 +48,7 @@
9072 * head_32 for boot CPU and setup_per_cpu_areas() for others.
9073 */
9074 #define GDT_STACK_CANARY_INIT \
9075- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
9076+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
9077
9078 /*
9079 * Initialize the stackprotector canary value.
9080@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
9081
9082 static inline void load_stack_canary_segment(void)
9083 {
9084-#ifdef CONFIG_X86_32
9085+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
9086 asm volatile ("mov %0, %%gs" : : "r" (0));
9087 #endif
9088 }
9089diff -urNp linux-3.1.1/arch/x86/include/asm/stacktrace.h linux-3.1.1/arch/x86/include/asm/stacktrace.h
9090--- linux-3.1.1/arch/x86/include/asm/stacktrace.h 2011-11-11 15:19:27.000000000 -0500
9091+++ linux-3.1.1/arch/x86/include/asm/stacktrace.h 2011-11-16 18:39:07.000000000 -0500
9092@@ -11,28 +11,20 @@
9093
9094 extern int kstack_depth_to_print;
9095
9096-struct thread_info;
9097+struct task_struct;
9098 struct stacktrace_ops;
9099
9100-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
9101- unsigned long *stack,
9102- unsigned long bp,
9103- const struct stacktrace_ops *ops,
9104- void *data,
9105- unsigned long *end,
9106- int *graph);
9107-
9108-extern unsigned long
9109-print_context_stack(struct thread_info *tinfo,
9110- unsigned long *stack, unsigned long bp,
9111- const struct stacktrace_ops *ops, void *data,
9112- unsigned long *end, int *graph);
9113-
9114-extern unsigned long
9115-print_context_stack_bp(struct thread_info *tinfo,
9116- unsigned long *stack, unsigned long bp,
9117- const struct stacktrace_ops *ops, void *data,
9118- unsigned long *end, int *graph);
9119+typedef unsigned long walk_stack_t(struct task_struct *task,
9120+ void *stack_start,
9121+ unsigned long *stack,
9122+ unsigned long bp,
9123+ const struct stacktrace_ops *ops,
9124+ void *data,
9125+ unsigned long *end,
9126+ int *graph);
9127+
9128+extern walk_stack_t print_context_stack;
9129+extern walk_stack_t print_context_stack_bp;
9130
9131 /* Generic stack tracer with callbacks */
9132
9133@@ -40,7 +32,7 @@ struct stacktrace_ops {
9134 void (*address)(void *data, unsigned long address, int reliable);
9135 /* On negative return stop dumping */
9136 int (*stack)(void *data, char *name);
9137- walk_stack_t walk_stack;
9138+ walk_stack_t *walk_stack;
9139 };
9140
9141 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
9142diff -urNp linux-3.1.1/arch/x86/include/asm/sys_ia32.h linux-3.1.1/arch/x86/include/asm/sys_ia32.h
9143--- linux-3.1.1/arch/x86/include/asm/sys_ia32.h 2011-11-11 15:19:27.000000000 -0500
9144+++ linux-3.1.1/arch/x86/include/asm/sys_ia32.h 2011-11-16 18:39:07.000000000 -0500
9145@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int
9146 compat_sigset_t __user *, unsigned int);
9147 asmlinkage long sys32_alarm(unsigned int);
9148
9149-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
9150+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
9151 asmlinkage long sys32_sysfs(int, u32, u32);
9152
9153 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
9154diff -urNp linux-3.1.1/arch/x86/include/asm/system.h linux-3.1.1/arch/x86/include/asm/system.h
9155--- linux-3.1.1/arch/x86/include/asm/system.h 2011-11-11 15:19:27.000000000 -0500
9156+++ linux-3.1.1/arch/x86/include/asm/system.h 2011-11-16 18:39:07.000000000 -0500
9157@@ -129,7 +129,7 @@ do { \
9158 "call __switch_to\n\t" \
9159 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
9160 __switch_canary \
9161- "movq %P[thread_info](%%rsi),%%r8\n\t" \
9162+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
9163 "movq %%rax,%%rdi\n\t" \
9164 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
9165 "jnz ret_from_fork\n\t" \
9166@@ -140,7 +140,7 @@ do { \
9167 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
9168 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
9169 [_tif_fork] "i" (_TIF_FORK), \
9170- [thread_info] "i" (offsetof(struct task_struct, stack)), \
9171+ [thread_info] "m" (current_tinfo), \
9172 [current_task] "m" (current_task) \
9173 __switch_canary_iparam \
9174 : "memory", "cc" __EXTRA_CLOBBER)
9175@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
9176 {
9177 unsigned long __limit;
9178 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
9179- return __limit + 1;
9180+ return __limit;
9181 }
9182
9183 static inline void native_clts(void)
9184@@ -397,12 +397,12 @@ void enable_hlt(void);
9185
9186 void cpu_idle_wait(void);
9187
9188-extern unsigned long arch_align_stack(unsigned long sp);
9189+#define arch_align_stack(x) ((x) & ~0xfUL)
9190 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
9191
9192 void default_idle(void);
9193
9194-void stop_this_cpu(void *dummy);
9195+void stop_this_cpu(void *dummy) __noreturn;
9196
9197 /*
9198 * Force strict CPU ordering.
9199diff -urNp linux-3.1.1/arch/x86/include/asm/thread_info.h linux-3.1.1/arch/x86/include/asm/thread_info.h
9200--- linux-3.1.1/arch/x86/include/asm/thread_info.h 2011-11-11 15:19:27.000000000 -0500
9201+++ linux-3.1.1/arch/x86/include/asm/thread_info.h 2011-11-16 18:39:07.000000000 -0500
9202@@ -10,6 +10,7 @@
9203 #include <linux/compiler.h>
9204 #include <asm/page.h>
9205 #include <asm/types.h>
9206+#include <asm/percpu.h>
9207
9208 /*
9209 * low level task data that entry.S needs immediate access to
9210@@ -24,7 +25,6 @@ struct exec_domain;
9211 #include <linux/atomic.h>
9212
9213 struct thread_info {
9214- struct task_struct *task; /* main task structure */
9215 struct exec_domain *exec_domain; /* execution domain */
9216 __u32 flags; /* low level flags */
9217 __u32 status; /* thread synchronous flags */
9218@@ -34,18 +34,12 @@ struct thread_info {
9219 mm_segment_t addr_limit;
9220 struct restart_block restart_block;
9221 void __user *sysenter_return;
9222-#ifdef CONFIG_X86_32
9223- unsigned long previous_esp; /* ESP of the previous stack in
9224- case of nested (IRQ) stacks
9225- */
9226- __u8 supervisor_stack[0];
9227-#endif
9228+ unsigned long lowest_stack;
9229 int uaccess_err;
9230 };
9231
9232-#define INIT_THREAD_INFO(tsk) \
9233+#define INIT_THREAD_INFO \
9234 { \
9235- .task = &tsk, \
9236 .exec_domain = &default_exec_domain, \
9237 .flags = 0, \
9238 .cpu = 0, \
9239@@ -56,7 +50,7 @@ struct thread_info {
9240 }, \
9241 }
9242
9243-#define init_thread_info (init_thread_union.thread_info)
9244+#define init_thread_info (init_thread_union.stack)
9245 #define init_stack (init_thread_union.stack)
9246
9247 #else /* !__ASSEMBLY__ */
9248@@ -170,6 +164,23 @@ struct thread_info {
9249 ret; \
9250 })
9251
9252+#ifdef __ASSEMBLY__
9253+/* how to get the thread information struct from ASM */
9254+#define GET_THREAD_INFO(reg) \
9255+ mov PER_CPU_VAR(current_tinfo), reg
9256+
9257+/* use this one if reg already contains %esp */
9258+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9259+#else
9260+/* how to get the thread information struct from C */
9261+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9262+
9263+static __always_inline struct thread_info *current_thread_info(void)
9264+{
9265+ return percpu_read_stable(current_tinfo);
9266+}
9267+#endif
9268+
9269 #ifdef CONFIG_X86_32
9270
9271 #define STACK_WARN (THREAD_SIZE/8)
9272@@ -180,35 +191,13 @@ struct thread_info {
9273 */
9274 #ifndef __ASSEMBLY__
9275
9276-
9277 /* how to get the current stack pointer from C */
9278 register unsigned long current_stack_pointer asm("esp") __used;
9279
9280-/* how to get the thread information struct from C */
9281-static inline struct thread_info *current_thread_info(void)
9282-{
9283- return (struct thread_info *)
9284- (current_stack_pointer & ~(THREAD_SIZE - 1));
9285-}
9286-
9287-#else /* !__ASSEMBLY__ */
9288-
9289-/* how to get the thread information struct from ASM */
9290-#define GET_THREAD_INFO(reg) \
9291- movl $-THREAD_SIZE, reg; \
9292- andl %esp, reg
9293-
9294-/* use this one if reg already contains %esp */
9295-#define GET_THREAD_INFO_WITH_ESP(reg) \
9296- andl $-THREAD_SIZE, reg
9297-
9298 #endif
9299
9300 #else /* X86_32 */
9301
9302-#include <asm/percpu.h>
9303-#define KERNEL_STACK_OFFSET (5*8)
9304-
9305 /*
9306 * macros/functions for gaining access to the thread information structure
9307 * preempt_count needs to be 1 initially, until the scheduler is functional.
9308@@ -216,21 +205,8 @@ static inline struct thread_info *curren
9309 #ifndef __ASSEMBLY__
9310 DECLARE_PER_CPU(unsigned long, kernel_stack);
9311
9312-static inline struct thread_info *current_thread_info(void)
9313-{
9314- struct thread_info *ti;
9315- ti = (void *)(percpu_read_stable(kernel_stack) +
9316- KERNEL_STACK_OFFSET - THREAD_SIZE);
9317- return ti;
9318-}
9319-
9320-#else /* !__ASSEMBLY__ */
9321-
9322-/* how to get the thread information struct from ASM */
9323-#define GET_THREAD_INFO(reg) \
9324- movq PER_CPU_VAR(kernel_stack),reg ; \
9325- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9326-
9327+/* how to get the current stack pointer from C */
9328+register unsigned long current_stack_pointer asm("rsp") __used;
9329 #endif
9330
9331 #endif /* !X86_32 */
9332@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9333 extern void free_thread_info(struct thread_info *ti);
9334 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9335 #define arch_task_cache_init arch_task_cache_init
9336+
9337+#define __HAVE_THREAD_FUNCTIONS
9338+#define task_thread_info(task) (&(task)->tinfo)
9339+#define task_stack_page(task) ((task)->stack)
9340+#define setup_thread_stack(p, org) do {} while (0)
9341+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9342+
9343+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9344+extern struct task_struct *alloc_task_struct_node(int node);
9345+extern void free_task_struct(struct task_struct *);
9346+
9347 #endif
9348 #endif /* _ASM_X86_THREAD_INFO_H */
9349diff -urNp linux-3.1.1/arch/x86/include/asm/uaccess_32.h linux-3.1.1/arch/x86/include/asm/uaccess_32.h
9350--- linux-3.1.1/arch/x86/include/asm/uaccess_32.h 2011-11-11 15:19:27.000000000 -0500
9351+++ linux-3.1.1/arch/x86/include/asm/uaccess_32.h 2011-11-16 18:40:08.000000000 -0500
9352@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_u
9353 static __always_inline unsigned long __must_check
9354 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9355 {
9356+ pax_track_stack();
9357+
9358+ if ((long)n < 0)
9359+ return n;
9360+
9361 if (__builtin_constant_p(n)) {
9362 unsigned long ret;
9363
9364@@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to,
9365 return ret;
9366 }
9367 }
9368+ if (!__builtin_constant_p(n))
9369+ check_object_size(from, n, true);
9370 return __copy_to_user_ll(to, from, n);
9371 }
9372
9373@@ -82,12 +89,16 @@ static __always_inline unsigned long __m
9374 __copy_to_user(void __user *to, const void *from, unsigned long n)
9375 {
9376 might_fault();
9377+
9378 return __copy_to_user_inatomic(to, from, n);
9379 }
9380
9381 static __always_inline unsigned long
9382 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9383 {
9384+ if ((long)n < 0)
9385+ return n;
9386+
9387 /* Avoid zeroing the tail if the copy fails..
9388 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9389 * but as the zeroing behaviour is only significant when n is not
9390@@ -137,6 +148,12 @@ static __always_inline unsigned long
9391 __copy_from_user(void *to, const void __user *from, unsigned long n)
9392 {
9393 might_fault();
9394+
9395+ pax_track_stack();
9396+
9397+ if ((long)n < 0)
9398+ return n;
9399+
9400 if (__builtin_constant_p(n)) {
9401 unsigned long ret;
9402
9403@@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __
9404 return ret;
9405 }
9406 }
9407+ if (!__builtin_constant_p(n))
9408+ check_object_size(to, n, false);
9409 return __copy_from_user_ll(to, from, n);
9410 }
9411
9412@@ -159,6 +178,10 @@ static __always_inline unsigned long __c
9413 const void __user *from, unsigned long n)
9414 {
9415 might_fault();
9416+
9417+ if ((long)n < 0)
9418+ return n;
9419+
9420 if (__builtin_constant_p(n)) {
9421 unsigned long ret;
9422
9423@@ -181,15 +204,19 @@ static __always_inline unsigned long
9424 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9425 unsigned long n)
9426 {
9427- return __copy_from_user_ll_nocache_nozero(to, from, n);
9428-}
9429+ if ((long)n < 0)
9430+ return n;
9431
9432-unsigned long __must_check copy_to_user(void __user *to,
9433- const void *from, unsigned long n);
9434-unsigned long __must_check _copy_from_user(void *to,
9435- const void __user *from,
9436- unsigned long n);
9437+ return __copy_from_user_ll_nocache_nozero(to, from, n);
9438+}
9439
9440+extern void copy_to_user_overflow(void)
9441+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9442+ __compiletime_error("copy_to_user() buffer size is not provably correct")
9443+#else
9444+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
9445+#endif
9446+;
9447
9448 extern void copy_from_user_overflow(void)
9449 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9450@@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void
9451 #endif
9452 ;
9453
9454-static inline unsigned long __must_check copy_from_user(void *to,
9455- const void __user *from,
9456- unsigned long n)
9457+/**
9458+ * copy_to_user: - Copy a block of data into user space.
9459+ * @to: Destination address, in user space.
9460+ * @from: Source address, in kernel space.
9461+ * @n: Number of bytes to copy.
9462+ *
9463+ * Context: User context only. This function may sleep.
9464+ *
9465+ * Copy data from kernel space to user space.
9466+ *
9467+ * Returns number of bytes that could not be copied.
9468+ * On success, this will be zero.
9469+ */
9470+static inline unsigned long __must_check
9471+copy_to_user(void __user *to, const void *from, unsigned long n)
9472+{
9473+ int sz = __compiletime_object_size(from);
9474+
9475+ if (unlikely(sz != -1 && sz < n))
9476+ copy_to_user_overflow();
9477+ else if (access_ok(VERIFY_WRITE, to, n))
9478+ n = __copy_to_user(to, from, n);
9479+ return n;
9480+}
9481+
9482+/**
9483+ * copy_from_user: - Copy a block of data from user space.
9484+ * @to: Destination address, in kernel space.
9485+ * @from: Source address, in user space.
9486+ * @n: Number of bytes to copy.
9487+ *
9488+ * Context: User context only. This function may sleep.
9489+ *
9490+ * Copy data from user space to kernel space.
9491+ *
9492+ * Returns number of bytes that could not be copied.
9493+ * On success, this will be zero.
9494+ *
9495+ * If some data could not be copied, this function will pad the copied
9496+ * data to the requested size using zero bytes.
9497+ */
9498+static inline unsigned long __must_check
9499+copy_from_user(void *to, const void __user *from, unsigned long n)
9500 {
9501 int sz = __compiletime_object_size(to);
9502
9503- if (likely(sz == -1 || sz >= n))
9504- n = _copy_from_user(to, from, n);
9505- else
9506+ if (unlikely(sz != -1 && sz < n))
9507 copy_from_user_overflow();
9508-
9509+ else if (access_ok(VERIFY_READ, from, n))
9510+ n = __copy_from_user(to, from, n);
9511+ else if ((long)n > 0) {
9512+ if (!__builtin_constant_p(n))
9513+ check_object_size(to, n, false);
9514+ memset(to, 0, n);
9515+ }
9516 return n;
9517 }
9518
9519diff -urNp linux-3.1.1/arch/x86/include/asm/uaccess_64.h linux-3.1.1/arch/x86/include/asm/uaccess_64.h
9520--- linux-3.1.1/arch/x86/include/asm/uaccess_64.h 2011-11-11 15:19:27.000000000 -0500
9521+++ linux-3.1.1/arch/x86/include/asm/uaccess_64.h 2011-11-16 18:40:08.000000000 -0500
9522@@ -10,6 +10,9 @@
9523 #include <asm/alternative.h>
9524 #include <asm/cpufeature.h>
9525 #include <asm/page.h>
9526+#include <asm/pgtable.h>
9527+
9528+#define set_fs(x) (current_thread_info()->addr_limit = (x))
9529
9530 /*
9531 * Copy To/From Userspace
9532@@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *
9533 return ret;
9534 }
9535
9536-__must_check unsigned long
9537-_copy_to_user(void __user *to, const void *from, unsigned len);
9538-__must_check unsigned long
9539-_copy_from_user(void *to, const void __user *from, unsigned len);
9540+static __always_inline __must_check unsigned long
9541+__copy_to_user(void __user *to, const void *from, unsigned len);
9542+static __always_inline __must_check unsigned long
9543+__copy_from_user(void *to, const void __user *from, unsigned len);
9544 __must_check unsigned long
9545 copy_in_user(void __user *to, const void __user *from, unsigned len);
9546
9547 static inline unsigned long __must_check copy_from_user(void *to,
9548 const void __user *from,
9549- unsigned long n)
9550+ unsigned n)
9551 {
9552- int sz = __compiletime_object_size(to);
9553-
9554 might_fault();
9555- if (likely(sz == -1 || sz >= n))
9556- n = _copy_from_user(to, from, n);
9557-#ifdef CONFIG_DEBUG_VM
9558- else
9559- WARN(1, "Buffer overflow detected!\n");
9560-#endif
9561+
9562+ if (access_ok(VERIFY_READ, from, n))
9563+ n = __copy_from_user(to, from, n);
9564+ else if ((int)n > 0) {
9565+ if (!__builtin_constant_p(n))
9566+ check_object_size(to, n, false);
9567+ memset(to, 0, n);
9568+ }
9569 return n;
9570 }
9571
9572@@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const
9573 {
9574 might_fault();
9575
9576- return _copy_to_user(dst, src, size);
9577+ if (access_ok(VERIFY_WRITE, dst, size))
9578+ size = __copy_to_user(dst, src, size);
9579+ return size;
9580 }
9581
9582 static __always_inline __must_check
9583-int __copy_from_user(void *dst, const void __user *src, unsigned size)
9584+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9585 {
9586- int ret = 0;
9587+ int sz = __compiletime_object_size(dst);
9588+ unsigned ret = 0;
9589
9590 might_fault();
9591- if (!__builtin_constant_p(size))
9592- return copy_user_generic(dst, (__force void *)src, size);
9593+
9594+ pax_track_stack();
9595+
9596+ if ((int)size < 0)
9597+ return size;
9598+
9599+#ifdef CONFIG_PAX_MEMORY_UDEREF
9600+ if (!__access_ok(VERIFY_READ, src, size))
9601+ return size;
9602+#endif
9603+
9604+ if (unlikely(sz != -1 && sz < size)) {
9605+#ifdef CONFIG_DEBUG_VM
9606+ WARN(1, "Buffer overflow detected!\n");
9607+#endif
9608+ return size;
9609+ }
9610+
9611+ if (!__builtin_constant_p(size)) {
9612+ check_object_size(dst, size, false);
9613+
9614+#ifdef CONFIG_PAX_MEMORY_UDEREF
9615+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9616+ src += PAX_USER_SHADOW_BASE;
9617+#endif
9618+
9619+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
9620+ }
9621 switch (size) {
9622- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9623+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9624 ret, "b", "b", "=q", 1);
9625 return ret;
9626- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9627+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9628 ret, "w", "w", "=r", 2);
9629 return ret;
9630- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9631+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9632 ret, "l", "k", "=r", 4);
9633 return ret;
9634- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9635+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9636 ret, "q", "", "=r", 8);
9637 return ret;
9638 case 10:
9639- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9640+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9641 ret, "q", "", "=r", 10);
9642 if (unlikely(ret))
9643 return ret;
9644 __get_user_asm(*(u16 *)(8 + (char *)dst),
9645- (u16 __user *)(8 + (char __user *)src),
9646+ (const u16 __user *)(8 + (const char __user *)src),
9647 ret, "w", "w", "=r", 2);
9648 return ret;
9649 case 16:
9650- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9651+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9652 ret, "q", "", "=r", 16);
9653 if (unlikely(ret))
9654 return ret;
9655 __get_user_asm(*(u64 *)(8 + (char *)dst),
9656- (u64 __user *)(8 + (char __user *)src),
9657+ (const u64 __user *)(8 + (const char __user *)src),
9658 ret, "q", "", "=r", 8);
9659 return ret;
9660 default:
9661- return copy_user_generic(dst, (__force void *)src, size);
9662+
9663+#ifdef CONFIG_PAX_MEMORY_UDEREF
9664+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9665+ src += PAX_USER_SHADOW_BASE;
9666+#endif
9667+
9668+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
9669 }
9670 }
9671
9672 static __always_inline __must_check
9673-int __copy_to_user(void __user *dst, const void *src, unsigned size)
9674+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9675 {
9676- int ret = 0;
9677+ int sz = __compiletime_object_size(src);
9678+ unsigned ret = 0;
9679
9680 might_fault();
9681- if (!__builtin_constant_p(size))
9682- return copy_user_generic((__force void *)dst, src, size);
9683+
9684+ pax_track_stack();
9685+
9686+ if ((int)size < 0)
9687+ return size;
9688+
9689+#ifdef CONFIG_PAX_MEMORY_UDEREF
9690+ if (!__access_ok(VERIFY_WRITE, dst, size))
9691+ return size;
9692+#endif
9693+
9694+ if (unlikely(sz != -1 && sz < size)) {
9695+#ifdef CONFIG_DEBUG_VM
9696+ WARN(1, "Buffer overflow detected!\n");
9697+#endif
9698+ return size;
9699+ }
9700+
9701+ if (!__builtin_constant_p(size)) {
9702+ check_object_size(src, size, true);
9703+
9704+#ifdef CONFIG_PAX_MEMORY_UDEREF
9705+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9706+ dst += PAX_USER_SHADOW_BASE;
9707+#endif
9708+
9709+ return copy_user_generic((__force_kernel void *)dst, src, size);
9710+ }
9711 switch (size) {
9712- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9713+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9714 ret, "b", "b", "iq", 1);
9715 return ret;
9716- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9717+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9718 ret, "w", "w", "ir", 2);
9719 return ret;
9720- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9721+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9722 ret, "l", "k", "ir", 4);
9723 return ret;
9724- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9725+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9726 ret, "q", "", "er", 8);
9727 return ret;
9728 case 10:
9729- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9730+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9731 ret, "q", "", "er", 10);
9732 if (unlikely(ret))
9733 return ret;
9734 asm("":::"memory");
9735- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9736+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9737 ret, "w", "w", "ir", 2);
9738 return ret;
9739 case 16:
9740- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9741+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9742 ret, "q", "", "er", 16);
9743 if (unlikely(ret))
9744 return ret;
9745 asm("":::"memory");
9746- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9747+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9748 ret, "q", "", "er", 8);
9749 return ret;
9750 default:
9751- return copy_user_generic((__force void *)dst, src, size);
9752+
9753+#ifdef CONFIG_PAX_MEMORY_UDEREF
9754+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9755+ dst += PAX_USER_SHADOW_BASE;
9756+#endif
9757+
9758+ return copy_user_generic((__force_kernel void *)dst, src, size);
9759 }
9760 }
9761
9762 static __always_inline __must_check
9763-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9764+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9765 {
9766- int ret = 0;
9767+ unsigned ret = 0;
9768
9769 might_fault();
9770- if (!__builtin_constant_p(size))
9771- return copy_user_generic((__force void *)dst,
9772- (__force void *)src, size);
9773+
9774+ if ((int)size < 0)
9775+ return size;
9776+
9777+#ifdef CONFIG_PAX_MEMORY_UDEREF
9778+ if (!__access_ok(VERIFY_READ, src, size))
9779+ return size;
9780+ if (!__access_ok(VERIFY_WRITE, dst, size))
9781+ return size;
9782+#endif
9783+
9784+ if (!__builtin_constant_p(size)) {
9785+
9786+#ifdef CONFIG_PAX_MEMORY_UDEREF
9787+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9788+ src += PAX_USER_SHADOW_BASE;
9789+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9790+ dst += PAX_USER_SHADOW_BASE;
9791+#endif
9792+
9793+ return copy_user_generic((__force_kernel void *)dst,
9794+ (__force_kernel const void *)src, size);
9795+ }
9796 switch (size) {
9797 case 1: {
9798 u8 tmp;
9799- __get_user_asm(tmp, (u8 __user *)src,
9800+ __get_user_asm(tmp, (const u8 __user *)src,
9801 ret, "b", "b", "=q", 1);
9802 if (likely(!ret))
9803 __put_user_asm(tmp, (u8 __user *)dst,
9804@@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, con
9805 }
9806 case 2: {
9807 u16 tmp;
9808- __get_user_asm(tmp, (u16 __user *)src,
9809+ __get_user_asm(tmp, (const u16 __user *)src,
9810 ret, "w", "w", "=r", 2);
9811 if (likely(!ret))
9812 __put_user_asm(tmp, (u16 __user *)dst,
9813@@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, con
9814
9815 case 4: {
9816 u32 tmp;
9817- __get_user_asm(tmp, (u32 __user *)src,
9818+ __get_user_asm(tmp, (const u32 __user *)src,
9819 ret, "l", "k", "=r", 4);
9820 if (likely(!ret))
9821 __put_user_asm(tmp, (u32 __user *)dst,
9822@@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, con
9823 }
9824 case 8: {
9825 u64 tmp;
9826- __get_user_asm(tmp, (u64 __user *)src,
9827+ __get_user_asm(tmp, (const u64 __user *)src,
9828 ret, "q", "", "=r", 8);
9829 if (likely(!ret))
9830 __put_user_asm(tmp, (u64 __user *)dst,
9831@@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, con
9832 return ret;
9833 }
9834 default:
9835- return copy_user_generic((__force void *)dst,
9836- (__force void *)src, size);
9837+
9838+#ifdef CONFIG_PAX_MEMORY_UDEREF
9839+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9840+ src += PAX_USER_SHADOW_BASE;
9841+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9842+ dst += PAX_USER_SHADOW_BASE;
9843+#endif
9844+
9845+ return copy_user_generic((__force_kernel void *)dst,
9846+ (__force_kernel const void *)src, size);
9847 }
9848 }
9849
9850@@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(
9851 static __must_check __always_inline int
9852 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9853 {
9854- return copy_user_generic(dst, (__force const void *)src, size);
9855+ pax_track_stack();
9856+
9857+ if ((int)size < 0)
9858+ return size;
9859+
9860+#ifdef CONFIG_PAX_MEMORY_UDEREF
9861+ if (!__access_ok(VERIFY_READ, src, size))
9862+ return size;
9863+
9864+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9865+ src += PAX_USER_SHADOW_BASE;
9866+#endif
9867+
9868+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
9869 }
9870
9871-static __must_check __always_inline int
9872+static __must_check __always_inline unsigned long
9873 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9874 {
9875- return copy_user_generic((__force void *)dst, src, size);
9876+ if ((int)size < 0)
9877+ return size;
9878+
9879+#ifdef CONFIG_PAX_MEMORY_UDEREF
9880+ if (!__access_ok(VERIFY_WRITE, dst, size))
9881+ return size;
9882+
9883+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9884+ dst += PAX_USER_SHADOW_BASE;
9885+#endif
9886+
9887+ return copy_user_generic((__force_kernel void *)dst, src, size);
9888 }
9889
9890-extern long __copy_user_nocache(void *dst, const void __user *src,
9891+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9892 unsigned size, int zerorest);
9893
9894-static inline int
9895-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9896+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9897 {
9898 might_sleep();
9899+
9900+ if ((int)size < 0)
9901+ return size;
9902+
9903+#ifdef CONFIG_PAX_MEMORY_UDEREF
9904+ if (!__access_ok(VERIFY_READ, src, size))
9905+ return size;
9906+#endif
9907+
9908 return __copy_user_nocache(dst, src, size, 1);
9909 }
9910
9911-static inline int
9912-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9913+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9914 unsigned size)
9915 {
9916+ if ((int)size < 0)
9917+ return size;
9918+
9919+#ifdef CONFIG_PAX_MEMORY_UDEREF
9920+ if (!__access_ok(VERIFY_READ, src, size))
9921+ return size;
9922+#endif
9923+
9924 return __copy_user_nocache(dst, src, size, 0);
9925 }
9926
9927-unsigned long
9928-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9929+extern unsigned long
9930+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest);
9931
9932 #endif /* _ASM_X86_UACCESS_64_H */
9933diff -urNp linux-3.1.1/arch/x86/include/asm/uaccess.h linux-3.1.1/arch/x86/include/asm/uaccess.h
9934--- linux-3.1.1/arch/x86/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
9935+++ linux-3.1.1/arch/x86/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
9936@@ -7,12 +7,15 @@
9937 #include <linux/compiler.h>
9938 #include <linux/thread_info.h>
9939 #include <linux/string.h>
9940+#include <linux/sched.h>
9941 #include <asm/asm.h>
9942 #include <asm/page.h>
9943
9944 #define VERIFY_READ 0
9945 #define VERIFY_WRITE 1
9946
9947+extern void check_object_size(const void *ptr, unsigned long n, bool to);
9948+
9949 /*
9950 * The fs value determines whether argument validity checking should be
9951 * performed or not. If get_fs() == USER_DS, checking is performed, with
9952@@ -28,7 +31,12 @@
9953
9954 #define get_ds() (KERNEL_DS)
9955 #define get_fs() (current_thread_info()->addr_limit)
9956+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9957+void __set_fs(mm_segment_t x);
9958+void set_fs(mm_segment_t x);
9959+#else
9960 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9961+#endif
9962
9963 #define segment_eq(a, b) ((a).seg == (b).seg)
9964
9965@@ -76,7 +84,33 @@
9966 * checks that the pointer is in the user space range - after calling
9967 * this function, memory access functions may still return -EFAULT.
9968 */
9969-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9970+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9971+#define access_ok(type, addr, size) \
9972+({ \
9973+ long __size = size; \
9974+ unsigned long __addr = (unsigned long)addr; \
9975+ unsigned long __addr_ao = __addr & PAGE_MASK; \
9976+ unsigned long __end_ao = __addr + __size - 1; \
9977+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9978+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9979+ while(__addr_ao <= __end_ao) { \
9980+ char __c_ao; \
9981+ __addr_ao += PAGE_SIZE; \
9982+ if (__size > PAGE_SIZE) \
9983+ cond_resched(); \
9984+ if (__get_user(__c_ao, (char __user *)__addr)) \
9985+ break; \
9986+ if (type != VERIFY_WRITE) { \
9987+ __addr = __addr_ao; \
9988+ continue; \
9989+ } \
9990+ if (__put_user(__c_ao, (char __user *)__addr)) \
9991+ break; \
9992+ __addr = __addr_ao; \
9993+ } \
9994+ } \
9995+ __ret_ao; \
9996+})
9997
9998 /*
9999 * The exception table consists of pairs of addresses: the first is the
10000@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
10001 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10002 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10003
10004-
10005+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10006+#define __copyuser_seg "gs;"
10007+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10008+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10009+#else
10010+#define __copyuser_seg
10011+#define __COPYUSER_SET_ES
10012+#define __COPYUSER_RESTORE_ES
10013+#endif
10014
10015 #ifdef CONFIG_X86_32
10016 #define __put_user_asm_u64(x, addr, err, errret) \
10017- asm volatile("1: movl %%eax,0(%2)\n" \
10018- "2: movl %%edx,4(%2)\n" \
10019+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10020+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10021 "3:\n" \
10022 ".section .fixup,\"ax\"\n" \
10023 "4: movl %3,%0\n" \
10024@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10025 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10026
10027 #define __put_user_asm_ex_u64(x, addr) \
10028- asm volatile("1: movl %%eax,0(%1)\n" \
10029- "2: movl %%edx,4(%1)\n" \
10030+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10031+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10032 "3:\n" \
10033 _ASM_EXTABLE(1b, 2b - 1b) \
10034 _ASM_EXTABLE(2b, 3b - 2b) \
10035@@ -252,7 +294,7 @@ extern void __put_user_8(void);
10036 __typeof__(*(ptr)) __pu_val; \
10037 __chk_user_ptr(ptr); \
10038 might_fault(); \
10039- __pu_val = x; \
10040+ __pu_val = (x); \
10041 switch (sizeof(*(ptr))) { \
10042 case 1: \
10043 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10044@@ -373,7 +415,7 @@ do { \
10045 } while (0)
10046
10047 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10048- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10049+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10050 "2:\n" \
10051 ".section .fixup,\"ax\"\n" \
10052 "3: mov %3,%0\n" \
10053@@ -381,7 +423,7 @@ do { \
10054 " jmp 2b\n" \
10055 ".previous\n" \
10056 _ASM_EXTABLE(1b, 3b) \
10057- : "=r" (err), ltype(x) \
10058+ : "=r" (err), ltype (x) \
10059 : "m" (__m(addr)), "i" (errret), "0" (err))
10060
10061 #define __get_user_size_ex(x, ptr, size) \
10062@@ -406,7 +448,7 @@ do { \
10063 } while (0)
10064
10065 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10066- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10067+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10068 "2:\n" \
10069 _ASM_EXTABLE(1b, 2b - 1b) \
10070 : ltype(x) : "m" (__m(addr)))
10071@@ -423,13 +465,24 @@ do { \
10072 int __gu_err; \
10073 unsigned long __gu_val; \
10074 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10075- (x) = (__force __typeof__(*(ptr)))__gu_val; \
10076+ (x) = (__typeof__(*(ptr)))__gu_val; \
10077 __gu_err; \
10078 })
10079
10080 /* FIXME: this hack is definitely wrong -AK */
10081 struct __large_struct { unsigned long buf[100]; };
10082-#define __m(x) (*(struct __large_struct __user *)(x))
10083+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10084+#define ____m(x) \
10085+({ \
10086+ unsigned long ____x = (unsigned long)(x); \
10087+ if (____x < PAX_USER_SHADOW_BASE) \
10088+ ____x += PAX_USER_SHADOW_BASE; \
10089+ (void __user *)____x; \
10090+})
10091+#else
10092+#define ____m(x) (x)
10093+#endif
10094+#define __m(x) (*(struct __large_struct __user *)____m(x))
10095
10096 /*
10097 * Tell gcc we read from memory instead of writing: this is because
10098@@ -437,7 +490,7 @@ struct __large_struct { unsigned long bu
10099 * aliasing issues.
10100 */
10101 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10102- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10103+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10104 "2:\n" \
10105 ".section .fixup,\"ax\"\n" \
10106 "3: mov %3,%0\n" \
10107@@ -445,10 +498,10 @@ struct __large_struct { unsigned long bu
10108 ".previous\n" \
10109 _ASM_EXTABLE(1b, 3b) \
10110 : "=r"(err) \
10111- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10112+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10113
10114 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10115- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10116+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10117 "2:\n" \
10118 _ASM_EXTABLE(1b, 2b - 1b) \
10119 : : ltype(x), "m" (__m(addr)))
10120@@ -487,8 +540,12 @@ struct __large_struct { unsigned long bu
10121 * On error, the variable @x is set to zero.
10122 */
10123
10124+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10125+#define __get_user(x, ptr) get_user((x), (ptr))
10126+#else
10127 #define __get_user(x, ptr) \
10128 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10129+#endif
10130
10131 /**
10132 * __put_user: - Write a simple value into user space, with less checking.
10133@@ -510,8 +567,12 @@ struct __large_struct { unsigned long bu
10134 * Returns zero on success, or -EFAULT on error.
10135 */
10136
10137+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10138+#define __put_user(x, ptr) put_user((x), (ptr))
10139+#else
10140 #define __put_user(x, ptr) \
10141 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10142+#endif
10143
10144 #define __get_user_unaligned __get_user
10145 #define __put_user_unaligned __put_user
10146@@ -529,7 +590,7 @@ struct __large_struct { unsigned long bu
10147 #define get_user_ex(x, ptr) do { \
10148 unsigned long __gue_val; \
10149 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10150- (x) = (__force __typeof__(*(ptr)))__gue_val; \
10151+ (x) = (__typeof__(*(ptr)))__gue_val; \
10152 } while (0)
10153
10154 #ifdef CONFIG_X86_WP_WORKS_OK
10155diff -urNp linux-3.1.1/arch/x86/include/asm/vdso.h linux-3.1.1/arch/x86/include/asm/vdso.h
10156--- linux-3.1.1/arch/x86/include/asm/vdso.h 2011-11-11 15:19:27.000000000 -0500
10157+++ linux-3.1.1/arch/x86/include/asm/vdso.h 2011-11-16 18:39:07.000000000 -0500
10158@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
10159 #define VDSO32_SYMBOL(base, name) \
10160 ({ \
10161 extern const char VDSO32_##name[]; \
10162- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
10163+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
10164 })
10165 #endif
10166
10167diff -urNp linux-3.1.1/arch/x86/include/asm/x86_init.h linux-3.1.1/arch/x86/include/asm/x86_init.h
10168--- linux-3.1.1/arch/x86/include/asm/x86_init.h 2011-11-11 15:19:27.000000000 -0500
10169+++ linux-3.1.1/arch/x86/include/asm/x86_init.h 2011-11-16 18:39:07.000000000 -0500
10170@@ -28,7 +28,7 @@ struct x86_init_mpparse {
10171 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
10172 void (*find_smp_config)(void);
10173 void (*get_smp_config)(unsigned int early);
10174-};
10175+} __no_const;
10176
10177 /**
10178 * struct x86_init_resources - platform specific resource related ops
10179@@ -42,7 +42,7 @@ struct x86_init_resources {
10180 void (*probe_roms)(void);
10181 void (*reserve_resources)(void);
10182 char *(*memory_setup)(void);
10183-};
10184+} __no_const;
10185
10186 /**
10187 * struct x86_init_irqs - platform specific interrupt setup
10188@@ -55,7 +55,7 @@ struct x86_init_irqs {
10189 void (*pre_vector_init)(void);
10190 void (*intr_init)(void);
10191 void (*trap_init)(void);
10192-};
10193+} __no_const;
10194
10195 /**
10196 * struct x86_init_oem - oem platform specific customizing functions
10197@@ -65,7 +65,7 @@ struct x86_init_irqs {
10198 struct x86_init_oem {
10199 void (*arch_setup)(void);
10200 void (*banner)(void);
10201-};
10202+} __no_const;
10203
10204 /**
10205 * struct x86_init_mapping - platform specific initial kernel pagetable setup
10206@@ -76,7 +76,7 @@ struct x86_init_oem {
10207 */
10208 struct x86_init_mapping {
10209 void (*pagetable_reserve)(u64 start, u64 end);
10210-};
10211+} __no_const;
10212
10213 /**
10214 * struct x86_init_paging - platform specific paging functions
10215@@ -86,7 +86,7 @@ struct x86_init_mapping {
10216 struct x86_init_paging {
10217 void (*pagetable_setup_start)(pgd_t *base);
10218 void (*pagetable_setup_done)(pgd_t *base);
10219-};
10220+} __no_const;
10221
10222 /**
10223 * struct x86_init_timers - platform specific timer setup
10224@@ -101,7 +101,7 @@ struct x86_init_timers {
10225 void (*tsc_pre_init)(void);
10226 void (*timer_init)(void);
10227 void (*wallclock_init)(void);
10228-};
10229+} __no_const;
10230
10231 /**
10232 * struct x86_init_iommu - platform specific iommu setup
10233@@ -109,7 +109,7 @@ struct x86_init_timers {
10234 */
10235 struct x86_init_iommu {
10236 int (*iommu_init)(void);
10237-};
10238+} __no_const;
10239
10240 /**
10241 * struct x86_init_pci - platform specific pci init functions
10242@@ -123,7 +123,7 @@ struct x86_init_pci {
10243 int (*init)(void);
10244 void (*init_irq)(void);
10245 void (*fixup_irqs)(void);
10246-};
10247+} __no_const;
10248
10249 /**
10250 * struct x86_init_ops - functions for platform specific setup
10251@@ -139,7 +139,7 @@ struct x86_init_ops {
10252 struct x86_init_timers timers;
10253 struct x86_init_iommu iommu;
10254 struct x86_init_pci pci;
10255-};
10256+} __no_const;
10257
10258 /**
10259 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10260@@ -147,7 +147,7 @@ struct x86_init_ops {
10261 */
10262 struct x86_cpuinit_ops {
10263 void (*setup_percpu_clockev)(void);
10264-};
10265+} __no_const;
10266
10267 /**
10268 * struct x86_platform_ops - platform specific runtime functions
10269@@ -166,7 +166,7 @@ struct x86_platform_ops {
10270 bool (*is_untracked_pat_range)(u64 start, u64 end);
10271 void (*nmi_init)(void);
10272 int (*i8042_detect)(void);
10273-};
10274+} __no_const;
10275
10276 struct pci_dev;
10277
10278@@ -174,7 +174,7 @@ struct x86_msi_ops {
10279 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10280 void (*teardown_msi_irq)(unsigned int irq);
10281 void (*teardown_msi_irqs)(struct pci_dev *dev);
10282-};
10283+} __no_const;
10284
10285 extern struct x86_init_ops x86_init;
10286 extern struct x86_cpuinit_ops x86_cpuinit;
10287diff -urNp linux-3.1.1/arch/x86/include/asm/xsave.h linux-3.1.1/arch/x86/include/asm/xsave.h
10288--- linux-3.1.1/arch/x86/include/asm/xsave.h 2011-11-11 15:19:27.000000000 -0500
10289+++ linux-3.1.1/arch/x86/include/asm/xsave.h 2011-11-16 18:39:07.000000000 -0500
10290@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10291 {
10292 int err;
10293
10294+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10295+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10296+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10297+#endif
10298+
10299 /*
10300 * Clear the xsave header first, so that reserved fields are
10301 * initialized to zero.
10302@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsav
10303 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
10304 {
10305 int err;
10306- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
10307+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
10308 u32 lmask = mask;
10309 u32 hmask = mask >> 32;
10310
10311+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10312+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10313+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10314+#endif
10315+
10316 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10317 "2:\n"
10318 ".section .fixup,\"ax\"\n"
10319diff -urNp linux-3.1.1/arch/x86/Kconfig linux-3.1.1/arch/x86/Kconfig
10320--- linux-3.1.1/arch/x86/Kconfig 2011-11-11 15:19:27.000000000 -0500
10321+++ linux-3.1.1/arch/x86/Kconfig 2011-11-16 18:40:08.000000000 -0500
10322@@ -236,7 +236,7 @@ config X86_HT
10323
10324 config X86_32_LAZY_GS
10325 def_bool y
10326- depends on X86_32 && !CC_STACKPROTECTOR
10327+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10328
10329 config ARCH_HWEIGHT_CFLAGS
10330 string
10331@@ -1019,7 +1019,7 @@ choice
10332
10333 config NOHIGHMEM
10334 bool "off"
10335- depends on !X86_NUMAQ
10336+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10337 ---help---
10338 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10339 However, the address space of 32-bit x86 processors is only 4
10340@@ -1056,7 +1056,7 @@ config NOHIGHMEM
10341
10342 config HIGHMEM4G
10343 bool "4GB"
10344- depends on !X86_NUMAQ
10345+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10346 ---help---
10347 Select this if you have a 32-bit processor and between 1 and 4
10348 gigabytes of physical RAM.
10349@@ -1110,7 +1110,7 @@ config PAGE_OFFSET
10350 hex
10351 default 0xB0000000 if VMSPLIT_3G_OPT
10352 default 0x80000000 if VMSPLIT_2G
10353- default 0x78000000 if VMSPLIT_2G_OPT
10354+ default 0x70000000 if VMSPLIT_2G_OPT
10355 default 0x40000000 if VMSPLIT_1G
10356 default 0xC0000000
10357 depends on X86_32
10358@@ -1484,6 +1484,7 @@ config SECCOMP
10359
10360 config CC_STACKPROTECTOR
10361 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10362+ depends on X86_64 || !PAX_MEMORY_UDEREF
10363 ---help---
10364 This option turns on the -fstack-protector GCC feature. This
10365 feature puts, at the beginning of functions, a canary value on
10366@@ -1541,6 +1542,7 @@ config KEXEC_JUMP
10367 config PHYSICAL_START
10368 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10369 default "0x1000000"
10370+ range 0x400000 0x40000000
10371 ---help---
10372 This gives the physical address where the kernel is loaded.
10373
10374@@ -1604,6 +1606,7 @@ config X86_NEED_RELOCS
10375 config PHYSICAL_ALIGN
10376 hex "Alignment value to which kernel should be aligned" if X86_32
10377 default "0x1000000"
10378+ range 0x400000 0x1000000 if PAX_KERNEXEC
10379 range 0x2000 0x1000000
10380 ---help---
10381 This value puts the alignment restrictions on physical address
10382@@ -1635,9 +1638,10 @@ config HOTPLUG_CPU
10383 Say N if you want to disable CPU hotplug.
10384
10385 config COMPAT_VDSO
10386- def_bool y
10387+ def_bool n
10388 prompt "Compat VDSO support"
10389 depends on X86_32 || IA32_EMULATION
10390+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10391 ---help---
10392 Map the 32-bit VDSO to the predictable old-style address too.
10393
10394diff -urNp linux-3.1.1/arch/x86/Kconfig.cpu linux-3.1.1/arch/x86/Kconfig.cpu
10395--- linux-3.1.1/arch/x86/Kconfig.cpu 2011-11-11 15:19:27.000000000 -0500
10396+++ linux-3.1.1/arch/x86/Kconfig.cpu 2011-11-16 18:39:07.000000000 -0500
10397@@ -341,7 +341,7 @@ config X86_PPRO_FENCE
10398
10399 config X86_F00F_BUG
10400 def_bool y
10401- depends on M586MMX || M586TSC || M586 || M486 || M386
10402+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10403
10404 config X86_INVD_BUG
10405 def_bool y
10406@@ -365,7 +365,7 @@ config X86_POPAD_OK
10407
10408 config X86_ALIGNMENT_16
10409 def_bool y
10410- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10411+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10412
10413 config X86_INTEL_USERCOPY
10414 def_bool y
10415@@ -411,7 +411,7 @@ config X86_CMPXCHG64
10416 # generates cmov.
10417 config X86_CMOV
10418 def_bool y
10419- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10420+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10421
10422 config X86_MINIMUM_CPU_FAMILY
10423 int
10424diff -urNp linux-3.1.1/arch/x86/Kconfig.debug linux-3.1.1/arch/x86/Kconfig.debug
10425--- linux-3.1.1/arch/x86/Kconfig.debug 2011-11-11 15:19:27.000000000 -0500
10426+++ linux-3.1.1/arch/x86/Kconfig.debug 2011-11-16 18:39:07.000000000 -0500
10427@@ -81,7 +81,7 @@ config X86_PTDUMP
10428 config DEBUG_RODATA
10429 bool "Write protect kernel read-only data structures"
10430 default y
10431- depends on DEBUG_KERNEL
10432+ depends on DEBUG_KERNEL && BROKEN
10433 ---help---
10434 Mark the kernel read-only data as write-protected in the pagetables,
10435 in order to catch accidental (and incorrect) writes to such const
10436@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
10437
10438 config DEBUG_SET_MODULE_RONX
10439 bool "Set loadable kernel module data as NX and text as RO"
10440- depends on MODULES
10441+ depends on MODULES && BROKEN
10442 ---help---
10443 This option helps catch unintended modifications to loadable
10444 kernel module's text and read-only data. It also prevents execution
10445diff -urNp linux-3.1.1/arch/x86/kernel/acpi/realmode/Makefile linux-3.1.1/arch/x86/kernel/acpi/realmode/Makefile
10446--- linux-3.1.1/arch/x86/kernel/acpi/realmode/Makefile 2011-11-11 15:19:27.000000000 -0500
10447+++ linux-3.1.1/arch/x86/kernel/acpi/realmode/Makefile 2011-11-16 18:39:07.000000000 -0500
10448@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10449 $(call cc-option, -fno-stack-protector) \
10450 $(call cc-option, -mpreferred-stack-boundary=2)
10451 KBUILD_CFLAGS += $(call cc-option, -m32)
10452+ifdef CONSTIFY_PLUGIN
10453+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10454+endif
10455 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10456 GCOV_PROFILE := n
10457
10458diff -urNp linux-3.1.1/arch/x86/kernel/acpi/realmode/wakeup.S linux-3.1.1/arch/x86/kernel/acpi/realmode/wakeup.S
10459--- linux-3.1.1/arch/x86/kernel/acpi/realmode/wakeup.S 2011-11-11 15:19:27.000000000 -0500
10460+++ linux-3.1.1/arch/x86/kernel/acpi/realmode/wakeup.S 2011-11-16 18:40:08.000000000 -0500
10461@@ -108,6 +108,9 @@ wakeup_code:
10462 /* Do any other stuff... */
10463
10464 #ifndef CONFIG_64BIT
10465+ /* Recheck NX bit overrides (64bit path does this in trampoline */
10466+ call verify_cpu
10467+
10468 /* This could also be done in C code... */
10469 movl pmode_cr3, %eax
10470 movl %eax, %cr3
10471@@ -131,6 +134,7 @@ wakeup_code:
10472 movl pmode_cr0, %eax
10473 movl %eax, %cr0
10474 jmp pmode_return
10475+# include "../../verify_cpu.S"
10476 #else
10477 pushw $0
10478 pushw trampoline_segment
10479diff -urNp linux-3.1.1/arch/x86/kernel/acpi/sleep.c linux-3.1.1/arch/x86/kernel/acpi/sleep.c
10480--- linux-3.1.1/arch/x86/kernel/acpi/sleep.c 2011-11-11 15:19:27.000000000 -0500
10481+++ linux-3.1.1/arch/x86/kernel/acpi/sleep.c 2011-11-16 18:39:07.000000000 -0500
10482@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10483 header->trampoline_segment = trampoline_address() >> 4;
10484 #ifdef CONFIG_SMP
10485 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10486+
10487+ pax_open_kernel();
10488 early_gdt_descr.address =
10489 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10490+ pax_close_kernel();
10491+
10492 initial_gs = per_cpu_offset(smp_processor_id());
10493 #endif
10494 initial_code = (unsigned long)wakeup_long64;
10495diff -urNp linux-3.1.1/arch/x86/kernel/acpi/wakeup_32.S linux-3.1.1/arch/x86/kernel/acpi/wakeup_32.S
10496--- linux-3.1.1/arch/x86/kernel/acpi/wakeup_32.S 2011-11-11 15:19:27.000000000 -0500
10497+++ linux-3.1.1/arch/x86/kernel/acpi/wakeup_32.S 2011-11-16 18:39:07.000000000 -0500
10498@@ -30,13 +30,11 @@ wakeup_pmode_return:
10499 # and restore the stack ... but you need gdt for this to work
10500 movl saved_context_esp, %esp
10501
10502- movl %cs:saved_magic, %eax
10503- cmpl $0x12345678, %eax
10504+ cmpl $0x12345678, saved_magic
10505 jne bogus_magic
10506
10507 # jump to place where we left off
10508- movl saved_eip, %eax
10509- jmp *%eax
10510+ jmp *(saved_eip)
10511
10512 bogus_magic:
10513 jmp bogus_magic
10514diff -urNp linux-3.1.1/arch/x86/kernel/alternative.c linux-3.1.1/arch/x86/kernel/alternative.c
10515--- linux-3.1.1/arch/x86/kernel/alternative.c 2011-11-11 15:19:27.000000000 -0500
10516+++ linux-3.1.1/arch/x86/kernel/alternative.c 2011-11-16 18:39:07.000000000 -0500
10517@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives
10518 */
10519 for (a = start; a < end; a++) {
10520 instr = (u8 *)&a->instr_offset + a->instr_offset;
10521+
10522+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10523+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10524+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
10525+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10526+#endif
10527+
10528 replacement = (u8 *)&a->repl_offset + a->repl_offset;
10529 BUG_ON(a->replacementlen > a->instrlen);
10530 BUG_ON(a->instrlen > sizeof(insnbuf));
10531@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const
10532 for (poff = start; poff < end; poff++) {
10533 u8 *ptr = (u8 *)poff + *poff;
10534
10535+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10536+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10537+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
10538+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10539+#endif
10540+
10541 if (!*poff || ptr < text || ptr >= text_end)
10542 continue;
10543 /* turn DS segment override prefix into lock prefix */
10544- if (*ptr == 0x3e)
10545+ if (*ktla_ktva(ptr) == 0x3e)
10546 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10547 };
10548 mutex_unlock(&text_mutex);
10549@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(cons
10550 for (poff = start; poff < end; poff++) {
10551 u8 *ptr = (u8 *)poff + *poff;
10552
10553+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10554+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10555+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
10556+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10557+#endif
10558+
10559 if (!*poff || ptr < text || ptr >= text_end)
10560 continue;
10561 /* turn lock prefix into DS segment override prefix */
10562- if (*ptr == 0xf0)
10563+ if (*ktla_ktva(ptr) == 0xf0)
10564 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10565 };
10566 mutex_unlock(&text_mutex);
10567@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(str
10568
10569 BUG_ON(p->len > MAX_PATCH_LEN);
10570 /* prep the buffer with the original instructions */
10571- memcpy(insnbuf, p->instr, p->len);
10572+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10573 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10574 (unsigned long)p->instr, p->len);
10575
10576@@ -568,7 +587,7 @@ void __init alternative_instructions(voi
10577 if (smp_alt_once)
10578 free_init_pages("SMP alternatives",
10579 (unsigned long)__smp_locks,
10580- (unsigned long)__smp_locks_end);
10581+ PAGE_ALIGN((unsigned long)__smp_locks_end));
10582
10583 restart_nmi();
10584 }
10585@@ -585,13 +604,17 @@ void __init alternative_instructions(voi
10586 * instructions. And on the local CPU you need to be protected again NMI or MCE
10587 * handlers seeing an inconsistent instruction while you patch.
10588 */
10589-void *__init_or_module text_poke_early(void *addr, const void *opcode,
10590+void *__kprobes text_poke_early(void *addr, const void *opcode,
10591 size_t len)
10592 {
10593 unsigned long flags;
10594 local_irq_save(flags);
10595- memcpy(addr, opcode, len);
10596+
10597+ pax_open_kernel();
10598+ memcpy(ktla_ktva(addr), opcode, len);
10599 sync_core();
10600+ pax_close_kernel();
10601+
10602 local_irq_restore(flags);
10603 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10604 that causes hangs on some VIA CPUs. */
10605@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(v
10606 */
10607 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10608 {
10609- unsigned long flags;
10610- char *vaddr;
10611+ unsigned char *vaddr = ktla_ktva(addr);
10612 struct page *pages[2];
10613- int i;
10614+ size_t i;
10615
10616 if (!core_kernel_text((unsigned long)addr)) {
10617- pages[0] = vmalloc_to_page(addr);
10618- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10619+ pages[0] = vmalloc_to_page(vaddr);
10620+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10621 } else {
10622- pages[0] = virt_to_page(addr);
10623+ pages[0] = virt_to_page(vaddr);
10624 WARN_ON(!PageReserved(pages[0]));
10625- pages[1] = virt_to_page(addr + PAGE_SIZE);
10626+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10627 }
10628 BUG_ON(!pages[0]);
10629- local_irq_save(flags);
10630- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10631- if (pages[1])
10632- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10633- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10634- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10635- clear_fixmap(FIX_TEXT_POKE0);
10636- if (pages[1])
10637- clear_fixmap(FIX_TEXT_POKE1);
10638- local_flush_tlb();
10639- sync_core();
10640- /* Could also do a CLFLUSH here to speed up CPU recovery; but
10641- that causes hangs on some VIA CPUs. */
10642+ text_poke_early(addr, opcode, len);
10643 for (i = 0; i < len; i++)
10644- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10645- local_irq_restore(flags);
10646+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10647 return addr;
10648 }
10649
10650diff -urNp linux-3.1.1/arch/x86/kernel/apic/apic.c linux-3.1.1/arch/x86/kernel/apic/apic.c
10651--- linux-3.1.1/arch/x86/kernel/apic/apic.c 2011-11-11 15:19:27.000000000 -0500
10652+++ linux-3.1.1/arch/x86/kernel/apic/apic.c 2011-11-16 18:40:08.000000000 -0500
10653@@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
10654 /*
10655 * Debug level, exported for io_apic.c
10656 */
10657-unsigned int apic_verbosity;
10658+int apic_verbosity;
10659
10660 int pic_mode;
10661
10662@@ -1835,7 +1835,7 @@ void smp_error_interrupt(struct pt_regs
10663 apic_write(APIC_ESR, 0);
10664 v1 = apic_read(APIC_ESR);
10665 ack_APIC_irq();
10666- atomic_inc(&irq_err_count);
10667+ atomic_inc_unchecked(&irq_err_count);
10668
10669 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
10670 smp_processor_id(), v0 , v1);
10671@@ -2209,6 +2209,8 @@ static int __cpuinit apic_cluster_num(vo
10672 u16 *bios_cpu_apicid;
10673 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10674
10675+ pax_track_stack();
10676+
10677 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10678 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10679
10680diff -urNp linux-3.1.1/arch/x86/kernel/apic/io_apic.c linux-3.1.1/arch/x86/kernel/apic/io_apic.c
10681--- linux-3.1.1/arch/x86/kernel/apic/io_apic.c 2011-11-11 15:19:27.000000000 -0500
10682+++ linux-3.1.1/arch/x86/kernel/apic/io_apic.c 2011-11-16 18:39:07.000000000 -0500
10683@@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10684 }
10685 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10686
10687-void lock_vector_lock(void)
10688+void lock_vector_lock(void) __acquires(vector_lock)
10689 {
10690 /* Used to the online set of cpus does not change
10691 * during assign_irq_vector.
10692@@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
10693 raw_spin_lock(&vector_lock);
10694 }
10695
10696-void unlock_vector_lock(void)
10697+void unlock_vector_lock(void) __releases(vector_lock)
10698 {
10699 raw_spin_unlock(&vector_lock);
10700 }
10701@@ -2405,7 +2405,7 @@ static void ack_apic_edge(struct irq_dat
10702 ack_APIC_irq();
10703 }
10704
10705-atomic_t irq_mis_count;
10706+atomic_unchecked_t irq_mis_count;
10707
10708 /*
10709 * IO-APIC versions below 0x20 don't support EOI register.
10710@@ -2513,7 +2513,7 @@ static void ack_apic_level(struct irq_da
10711 * at the cpu.
10712 */
10713 if (!(v & (1 << (i & 0x1f)))) {
10714- atomic_inc(&irq_mis_count);
10715+ atomic_inc_unchecked(&irq_mis_count);
10716
10717 eoi_ioapic_irq(irq, cfg);
10718 }
10719diff -urNp linux-3.1.1/arch/x86/kernel/apm_32.c linux-3.1.1/arch/x86/kernel/apm_32.c
10720--- linux-3.1.1/arch/x86/kernel/apm_32.c 2011-11-11 15:19:27.000000000 -0500
10721+++ linux-3.1.1/arch/x86/kernel/apm_32.c 2011-11-16 18:39:07.000000000 -0500
10722@@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
10723 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10724 * even though they are called in protected mode.
10725 */
10726-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10727+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10728 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10729
10730 static const char driver_version[] = "1.16ac"; /* no spaces */
10731@@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
10732 BUG_ON(cpu != 0);
10733 gdt = get_cpu_gdt_table(cpu);
10734 save_desc_40 = gdt[0x40 / 8];
10735+
10736+ pax_open_kernel();
10737 gdt[0x40 / 8] = bad_bios_desc;
10738+ pax_close_kernel();
10739
10740 apm_irq_save(flags);
10741 APM_DO_SAVE_SEGS;
10742@@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
10743 &call->esi);
10744 APM_DO_RESTORE_SEGS;
10745 apm_irq_restore(flags);
10746+
10747+ pax_open_kernel();
10748 gdt[0x40 / 8] = save_desc_40;
10749+ pax_close_kernel();
10750+
10751 put_cpu();
10752
10753 return call->eax & 0xff;
10754@@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void
10755 BUG_ON(cpu != 0);
10756 gdt = get_cpu_gdt_table(cpu);
10757 save_desc_40 = gdt[0x40 / 8];
10758+
10759+ pax_open_kernel();
10760 gdt[0x40 / 8] = bad_bios_desc;
10761+ pax_close_kernel();
10762
10763 apm_irq_save(flags);
10764 APM_DO_SAVE_SEGS;
10765@@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void
10766 &call->eax);
10767 APM_DO_RESTORE_SEGS;
10768 apm_irq_restore(flags);
10769+
10770+ pax_open_kernel();
10771 gdt[0x40 / 8] = save_desc_40;
10772+ pax_close_kernel();
10773+
10774 put_cpu();
10775 return error;
10776 }
10777@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
10778 * code to that CPU.
10779 */
10780 gdt = get_cpu_gdt_table(0);
10781+
10782+ pax_open_kernel();
10783 set_desc_base(&gdt[APM_CS >> 3],
10784 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10785 set_desc_base(&gdt[APM_CS_16 >> 3],
10786 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10787 set_desc_base(&gdt[APM_DS >> 3],
10788 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10789+ pax_close_kernel();
10790
10791 proc_create("apm", 0, NULL, &apm_file_ops);
10792
10793diff -urNp linux-3.1.1/arch/x86/kernel/asm-offsets_64.c linux-3.1.1/arch/x86/kernel/asm-offsets_64.c
10794--- linux-3.1.1/arch/x86/kernel/asm-offsets_64.c 2011-11-11 15:19:27.000000000 -0500
10795+++ linux-3.1.1/arch/x86/kernel/asm-offsets_64.c 2011-11-16 18:39:07.000000000 -0500
10796@@ -69,6 +69,7 @@ int main(void)
10797 BLANK();
10798 #undef ENTRY
10799
10800+ DEFINE(TSS_size, sizeof(struct tss_struct));
10801 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10802 BLANK();
10803
10804diff -urNp linux-3.1.1/arch/x86/kernel/asm-offsets.c linux-3.1.1/arch/x86/kernel/asm-offsets.c
10805--- linux-3.1.1/arch/x86/kernel/asm-offsets.c 2011-11-11 15:19:27.000000000 -0500
10806+++ linux-3.1.1/arch/x86/kernel/asm-offsets.c 2011-11-16 18:39:07.000000000 -0500
10807@@ -33,6 +33,8 @@ void common(void) {
10808 OFFSET(TI_status, thread_info, status);
10809 OFFSET(TI_addr_limit, thread_info, addr_limit);
10810 OFFSET(TI_preempt_count, thread_info, preempt_count);
10811+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10812+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10813
10814 BLANK();
10815 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10816@@ -53,8 +55,26 @@ void common(void) {
10817 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10818 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10819 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10820+
10821+#ifdef CONFIG_PAX_KERNEXEC
10822+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10823+#endif
10824+
10825+#ifdef CONFIG_PAX_MEMORY_UDEREF
10826+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10827+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10828+#ifdef CONFIG_X86_64
10829+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
10830+#endif
10831 #endif
10832
10833+#endif
10834+
10835+ BLANK();
10836+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10837+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10838+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10839+
10840 #ifdef CONFIG_XEN
10841 BLANK();
10842 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10843diff -urNp linux-3.1.1/arch/x86/kernel/cpu/amd.c linux-3.1.1/arch/x86/kernel/cpu/amd.c
10844--- linux-3.1.1/arch/x86/kernel/cpu/amd.c 2011-11-11 15:19:27.000000000 -0500
10845+++ linux-3.1.1/arch/x86/kernel/cpu/amd.c 2011-11-16 18:39:07.000000000 -0500
10846@@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10847 unsigned int size)
10848 {
10849 /* AMD errata T13 (order #21922) */
10850- if ((c->x86 == 6)) {
10851+ if (c->x86 == 6) {
10852 /* Duron Rev A0 */
10853 if (c->x86_model == 3 && c->x86_mask == 0)
10854 size = 64;
10855diff -urNp linux-3.1.1/arch/x86/kernel/cpu/common.c linux-3.1.1/arch/x86/kernel/cpu/common.c
10856--- linux-3.1.1/arch/x86/kernel/cpu/common.c 2011-11-11 15:19:27.000000000 -0500
10857+++ linux-3.1.1/arch/x86/kernel/cpu/common.c 2011-11-16 18:39:07.000000000 -0500
10858@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10859
10860 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10861
10862-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10863-#ifdef CONFIG_X86_64
10864- /*
10865- * We need valid kernel segments for data and code in long mode too
10866- * IRET will check the segment types kkeil 2000/10/28
10867- * Also sysret mandates a special GDT layout
10868- *
10869- * TLS descriptors are currently at a different place compared to i386.
10870- * Hopefully nobody expects them at a fixed place (Wine?)
10871- */
10872- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10873- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10874- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10875- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10876- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10877- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10878-#else
10879- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10880- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10881- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10882- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10883- /*
10884- * Segments used for calling PnP BIOS have byte granularity.
10885- * They code segments and data segments have fixed 64k limits,
10886- * the transfer segment sizes are set at run time.
10887- */
10888- /* 32-bit code */
10889- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10890- /* 16-bit code */
10891- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10892- /* 16-bit data */
10893- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10894- /* 16-bit data */
10895- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10896- /* 16-bit data */
10897- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10898- /*
10899- * The APM segments have byte granularity and their bases
10900- * are set at run time. All have 64k limits.
10901- */
10902- /* 32-bit code */
10903- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10904- /* 16-bit code */
10905- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10906- /* data */
10907- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10908-
10909- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10910- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10911- GDT_STACK_CANARY_INIT
10912-#endif
10913-} };
10914-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10915-
10916 static int __init x86_xsave_setup(char *s)
10917 {
10918 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10919@@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
10920 {
10921 struct desc_ptr gdt_descr;
10922
10923- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10924+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10925 gdt_descr.size = GDT_SIZE - 1;
10926 load_gdt(&gdt_descr);
10927 /* Reload the per-cpu base */
10928@@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struc
10929 /* Filter out anything that depends on CPUID levels we don't have */
10930 filter_cpuid_features(c, true);
10931
10932+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10933+ setup_clear_cpu_cap(X86_FEATURE_SEP);
10934+#endif
10935+
10936 /* If the model name is still unset, do table lookup. */
10937 if (!c->x86_model_id[0]) {
10938 const char *p;
10939@@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(cha
10940 }
10941 __setup("clearcpuid=", setup_disablecpuid);
10942
10943+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10944+EXPORT_PER_CPU_SYMBOL(current_tinfo);
10945+
10946 #ifdef CONFIG_X86_64
10947 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10948
10949@@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10950 EXPORT_PER_CPU_SYMBOL(current_task);
10951
10952 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10953- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10954+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10955 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10956
10957 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10958@@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(str
10959 {
10960 memset(regs, 0, sizeof(struct pt_regs));
10961 regs->fs = __KERNEL_PERCPU;
10962- regs->gs = __KERNEL_STACK_CANARY;
10963+ savesegment(gs, regs->gs);
10964
10965 return regs;
10966 }
10967@@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
10968 int i;
10969
10970 cpu = stack_smp_processor_id();
10971- t = &per_cpu(init_tss, cpu);
10972+ t = init_tss + cpu;
10973 oist = &per_cpu(orig_ist, cpu);
10974
10975 #ifdef CONFIG_NUMA
10976@@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
10977 switch_to_new_gdt(cpu);
10978 loadsegment(fs, 0);
10979
10980- load_idt((const struct desc_ptr *)&idt_descr);
10981+ load_idt(&idt_descr);
10982
10983 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10984 syscall_init();
10985@@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
10986 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10987 barrier();
10988
10989- x86_configure_nx();
10990 if (cpu != 0)
10991 enable_x2apic();
10992
10993@@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
10994 {
10995 int cpu = smp_processor_id();
10996 struct task_struct *curr = current;
10997- struct tss_struct *t = &per_cpu(init_tss, cpu);
10998+ struct tss_struct *t = init_tss + cpu;
10999 struct thread_struct *thread = &curr->thread;
11000
11001 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
11002diff -urNp linux-3.1.1/arch/x86/kernel/cpu/intel.c linux-3.1.1/arch/x86/kernel/cpu/intel.c
11003--- linux-3.1.1/arch/x86/kernel/cpu/intel.c 2011-11-11 15:19:27.000000000 -0500
11004+++ linux-3.1.1/arch/x86/kernel/cpu/intel.c 2011-11-16 18:39:07.000000000 -0500
11005@@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug
11006 * Update the IDT descriptor and reload the IDT so that
11007 * it uses the read-only mapped virtual address.
11008 */
11009- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
11010+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
11011 load_idt(&idt_descr);
11012 }
11013 #endif
11014diff -urNp linux-3.1.1/arch/x86/kernel/cpu/Makefile linux-3.1.1/arch/x86/kernel/cpu/Makefile
11015--- linux-3.1.1/arch/x86/kernel/cpu/Makefile 2011-11-11 15:19:27.000000000 -0500
11016+++ linux-3.1.1/arch/x86/kernel/cpu/Makefile 2011-11-16 18:39:07.000000000 -0500
11017@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
11018 CFLAGS_REMOVE_perf_event.o = -pg
11019 endif
11020
11021-# Make sure load_percpu_segment has no stackprotector
11022-nostackp := $(call cc-option, -fno-stack-protector)
11023-CFLAGS_common.o := $(nostackp)
11024-
11025 obj-y := intel_cacheinfo.o scattered.o topology.o
11026 obj-y += proc.o capflags.o powerflags.o common.o
11027 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
11028diff -urNp linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce.c linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce.c
11029--- linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce.c 2011-11-11 15:19:27.000000000 -0500
11030+++ linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce.c 2011-11-16 18:39:07.000000000 -0500
11031@@ -42,6 +42,7 @@
11032 #include <asm/processor.h>
11033 #include <asm/mce.h>
11034 #include <asm/msr.h>
11035+#include <asm/local.h>
11036
11037 #include "mce-internal.h"
11038
11039@@ -205,7 +206,7 @@ static void print_mce(struct mce *m)
11040 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
11041 m->cs, m->ip);
11042
11043- if (m->cs == __KERNEL_CS)
11044+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
11045 print_symbol("{%s}", m->ip);
11046 pr_cont("\n");
11047 }
11048@@ -233,10 +234,10 @@ static void print_mce(struct mce *m)
11049
11050 #define PANIC_TIMEOUT 5 /* 5 seconds */
11051
11052-static atomic_t mce_paniced;
11053+static atomic_unchecked_t mce_paniced;
11054
11055 static int fake_panic;
11056-static atomic_t mce_fake_paniced;
11057+static atomic_unchecked_t mce_fake_paniced;
11058
11059 /* Panic in progress. Enable interrupts and wait for final IPI */
11060 static void wait_for_panic(void)
11061@@ -260,7 +261,7 @@ static void mce_panic(char *msg, struct
11062 /*
11063 * Make sure only one CPU runs in machine check panic
11064 */
11065- if (atomic_inc_return(&mce_paniced) > 1)
11066+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
11067 wait_for_panic();
11068 barrier();
11069
11070@@ -268,7 +269,7 @@ static void mce_panic(char *msg, struct
11071 console_verbose();
11072 } else {
11073 /* Don't log too much for fake panic */
11074- if (atomic_inc_return(&mce_fake_paniced) > 1)
11075+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
11076 return;
11077 }
11078 /* First print corrected ones that are still unlogged */
11079@@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
11080 * might have been modified by someone else.
11081 */
11082 rmb();
11083- if (atomic_read(&mce_paniced))
11084+ if (atomic_read_unchecked(&mce_paniced))
11085 wait_for_panic();
11086 if (!monarch_timeout)
11087 goto out;
11088@@ -1429,7 +1430,7 @@ void __cpuinit mcheck_cpu_init(struct cp
11089 */
11090
11091 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
11092-static int mce_chrdev_open_count; /* #times opened */
11093+static local_t mce_chrdev_open_count; /* #times opened */
11094 static int mce_chrdev_open_exclu; /* already open exclusive? */
11095
11096 static int mce_chrdev_open(struct inode *inode, struct file *file)
11097@@ -1437,7 +1438,7 @@ static int mce_chrdev_open(struct inode
11098 spin_lock(&mce_chrdev_state_lock);
11099
11100 if (mce_chrdev_open_exclu ||
11101- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
11102+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
11103 spin_unlock(&mce_chrdev_state_lock);
11104
11105 return -EBUSY;
11106@@ -1445,7 +1446,7 @@ static int mce_chrdev_open(struct inode
11107
11108 if (file->f_flags & O_EXCL)
11109 mce_chrdev_open_exclu = 1;
11110- mce_chrdev_open_count++;
11111+ local_inc(&mce_chrdev_open_count);
11112
11113 spin_unlock(&mce_chrdev_state_lock);
11114
11115@@ -1456,7 +1457,7 @@ static int mce_chrdev_release(struct ino
11116 {
11117 spin_lock(&mce_chrdev_state_lock);
11118
11119- mce_chrdev_open_count--;
11120+ local_dec(&mce_chrdev_open_count);
11121 mce_chrdev_open_exclu = 0;
11122
11123 spin_unlock(&mce_chrdev_state_lock);
11124@@ -2147,7 +2148,7 @@ struct dentry *mce_get_debugfs_dir(void)
11125 static void mce_reset(void)
11126 {
11127 cpu_missing = 0;
11128- atomic_set(&mce_fake_paniced, 0);
11129+ atomic_set_unchecked(&mce_fake_paniced, 0);
11130 atomic_set(&mce_executing, 0);
11131 atomic_set(&mce_callin, 0);
11132 atomic_set(&global_nwo, 0);
11133diff -urNp linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce-inject.c
11134--- linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-11-11 15:19:27.000000000 -0500
11135+++ linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-11-16 18:39:07.000000000 -0500
11136@@ -215,7 +215,9 @@ static int inject_init(void)
11137 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
11138 return -ENOMEM;
11139 printk(KERN_INFO "Machine check injector initialized\n");
11140- mce_chrdev_ops.write = mce_write;
11141+ pax_open_kernel();
11142+ *(void **)&mce_chrdev_ops.write = mce_write;
11143+ pax_close_kernel();
11144 register_die_notifier(&mce_raise_nb);
11145 return 0;
11146 }
11147diff -urNp linux-3.1.1/arch/x86/kernel/cpu/mtrr/main.c linux-3.1.1/arch/x86/kernel/cpu/mtrr/main.c
11148--- linux-3.1.1/arch/x86/kernel/cpu/mtrr/main.c 2011-11-11 15:19:27.000000000 -0500
11149+++ linux-3.1.1/arch/x86/kernel/cpu/mtrr/main.c 2011-11-16 18:39:07.000000000 -0500
11150@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
11151 u64 size_or_mask, size_and_mask;
11152 static bool mtrr_aps_delayed_init;
11153
11154-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
11155+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
11156
11157 const struct mtrr_ops *mtrr_if;
11158
11159diff -urNp linux-3.1.1/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.1.1/arch/x86/kernel/cpu/mtrr/mtrr.h
11160--- linux-3.1.1/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-11-11 15:19:27.000000000 -0500
11161+++ linux-3.1.1/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-11-16 18:39:07.000000000 -0500
11162@@ -25,7 +25,7 @@ struct mtrr_ops {
11163 int (*validate_add_page)(unsigned long base, unsigned long size,
11164 unsigned int type);
11165 int (*have_wrcomb)(void);
11166-};
11167+} __do_const;
11168
11169 extern int generic_get_free_region(unsigned long base, unsigned long size,
11170 int replace_reg);
11171diff -urNp linux-3.1.1/arch/x86/kernel/cpu/perf_event.c linux-3.1.1/arch/x86/kernel/cpu/perf_event.c
11172--- linux-3.1.1/arch/x86/kernel/cpu/perf_event.c 2011-11-11 15:19:27.000000000 -0500
11173+++ linux-3.1.1/arch/x86/kernel/cpu/perf_event.c 2011-11-16 18:40:08.000000000 -0500
11174@@ -795,6 +795,8 @@ static int x86_schedule_events(struct cp
11175 int i, j, w, wmax, num = 0;
11176 struct hw_perf_event *hwc;
11177
11178+ pax_track_stack();
11179+
11180 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
11181
11182 for (i = 0; i < n; i++) {
11183@@ -1919,7 +1921,7 @@ perf_callchain_user(struct perf_callchai
11184 break;
11185
11186 perf_callchain_store(entry, frame.return_address);
11187- fp = frame.next_frame;
11188+ fp = (const void __force_user *)frame.next_frame;
11189 }
11190 }
11191
11192diff -urNp linux-3.1.1/arch/x86/kernel/crash.c linux-3.1.1/arch/x86/kernel/crash.c
11193--- linux-3.1.1/arch/x86/kernel/crash.c 2011-11-11 15:19:27.000000000 -0500
11194+++ linux-3.1.1/arch/x86/kernel/crash.c 2011-11-16 18:39:07.000000000 -0500
11195@@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
11196 regs = args->regs;
11197
11198 #ifdef CONFIG_X86_32
11199- if (!user_mode_vm(regs)) {
11200+ if (!user_mode(regs)) {
11201 crash_fixup_ss_esp(&fixed_regs, regs);
11202 regs = &fixed_regs;
11203 }
11204diff -urNp linux-3.1.1/arch/x86/kernel/doublefault_32.c linux-3.1.1/arch/x86/kernel/doublefault_32.c
11205--- linux-3.1.1/arch/x86/kernel/doublefault_32.c 2011-11-11 15:19:27.000000000 -0500
11206+++ linux-3.1.1/arch/x86/kernel/doublefault_32.c 2011-11-16 18:39:07.000000000 -0500
11207@@ -11,7 +11,7 @@
11208
11209 #define DOUBLEFAULT_STACKSIZE (1024)
11210 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
11211-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
11212+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
11213
11214 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
11215
11216@@ -21,7 +21,7 @@ static void doublefault_fn(void)
11217 unsigned long gdt, tss;
11218
11219 store_gdt(&gdt_desc);
11220- gdt = gdt_desc.address;
11221+ gdt = (unsigned long)gdt_desc.address;
11222
11223 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
11224
11225@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
11226 /* 0x2 bit is always set */
11227 .flags = X86_EFLAGS_SF | 0x2,
11228 .sp = STACK_START,
11229- .es = __USER_DS,
11230+ .es = __KERNEL_DS,
11231 .cs = __KERNEL_CS,
11232 .ss = __KERNEL_DS,
11233- .ds = __USER_DS,
11234+ .ds = __KERNEL_DS,
11235 .fs = __KERNEL_PERCPU,
11236
11237 .__cr3 = __pa_nodebug(swapper_pg_dir),
11238diff -urNp linux-3.1.1/arch/x86/kernel/dumpstack_32.c linux-3.1.1/arch/x86/kernel/dumpstack_32.c
11239--- linux-3.1.1/arch/x86/kernel/dumpstack_32.c 2011-11-11 15:19:27.000000000 -0500
11240+++ linux-3.1.1/arch/x86/kernel/dumpstack_32.c 2011-11-16 18:39:07.000000000 -0500
11241@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
11242 bp = stack_frame(task, regs);
11243
11244 for (;;) {
11245- struct thread_info *context;
11246+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11247
11248- context = (struct thread_info *)
11249- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
11250- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
11251+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11252
11253- stack = (unsigned long *)context->previous_esp;
11254- if (!stack)
11255+ if (stack_start == task_stack_page(task))
11256 break;
11257+ stack = *(unsigned long **)stack_start;
11258 if (ops->stack(data, "IRQ") < 0)
11259 break;
11260 touch_nmi_watchdog();
11261@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
11262 * When in-kernel, we also print out the stack and code at the
11263 * time of the fault..
11264 */
11265- if (!user_mode_vm(regs)) {
11266+ if (!user_mode(regs)) {
11267 unsigned int code_prologue = code_bytes * 43 / 64;
11268 unsigned int code_len = code_bytes;
11269 unsigned char c;
11270 u8 *ip;
11271+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
11272
11273 printk(KERN_EMERG "Stack:\n");
11274 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
11275
11276 printk(KERN_EMERG "Code: ");
11277
11278- ip = (u8 *)regs->ip - code_prologue;
11279+ ip = (u8 *)regs->ip - code_prologue + cs_base;
11280 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
11281 /* try starting at IP */
11282- ip = (u8 *)regs->ip;
11283+ ip = (u8 *)regs->ip + cs_base;
11284 code_len = code_len - code_prologue + 1;
11285 }
11286 for (i = 0; i < code_len; i++, ip++) {
11287@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11288 printk(" Bad EIP value.");
11289 break;
11290 }
11291- if (ip == (u8 *)regs->ip)
11292+ if (ip == (u8 *)regs->ip + cs_base)
11293 printk("<%02x> ", c);
11294 else
11295 printk("%02x ", c);
11296@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11297 {
11298 unsigned short ud2;
11299
11300+ ip = ktla_ktva(ip);
11301 if (ip < PAGE_OFFSET)
11302 return 0;
11303 if (probe_kernel_address((unsigned short *)ip, ud2))
11304@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
11305
11306 return ud2 == 0x0b0f;
11307 }
11308+
11309+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11310+void pax_check_alloca(unsigned long size)
11311+{
11312+ unsigned long sp = (unsigned long)&sp, stack_left;
11313+
11314+ /* all kernel stacks are of the same size */
11315+ stack_left = sp & (THREAD_SIZE - 1);
11316+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
11317+}
11318+EXPORT_SYMBOL(pax_check_alloca);
11319+#endif
11320diff -urNp linux-3.1.1/arch/x86/kernel/dumpstack_64.c linux-3.1.1/arch/x86/kernel/dumpstack_64.c
11321--- linux-3.1.1/arch/x86/kernel/dumpstack_64.c 2011-11-11 15:19:27.000000000 -0500
11322+++ linux-3.1.1/arch/x86/kernel/dumpstack_64.c 2011-11-16 18:39:07.000000000 -0500
11323@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task
11324 unsigned long *irq_stack_end =
11325 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11326 unsigned used = 0;
11327- struct thread_info *tinfo;
11328 int graph = 0;
11329 unsigned long dummy;
11330+ void *stack_start;
11331
11332 if (!task)
11333 task = current;
11334@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task
11335 * current stack address. If the stacks consist of nested
11336 * exceptions
11337 */
11338- tinfo = task_thread_info(task);
11339 for (;;) {
11340 char *id;
11341 unsigned long *estack_end;
11342+
11343 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11344 &used, &id);
11345
11346@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task
11347 if (ops->stack(data, id) < 0)
11348 break;
11349
11350- bp = ops->walk_stack(tinfo, stack, bp, ops,
11351+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11352 data, estack_end, &graph);
11353 ops->stack(data, "<EOE>");
11354 /*
11355@@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task
11356 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11357 if (ops->stack(data, "IRQ") < 0)
11358 break;
11359- bp = ops->walk_stack(tinfo, stack, bp,
11360+ bp = ops->walk_stack(task, irq_stack, stack, bp,
11361 ops, data, irq_stack_end, &graph);
11362 /*
11363 * We link to the next stack (which would be
11364@@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task
11365 /*
11366 * This handles the process stack:
11367 */
11368- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11369+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11370+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11371 put_cpu();
11372 }
11373 EXPORT_SYMBOL(dump_trace);
11374@@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
11375
11376 return ud2 == 0x0b0f;
11377 }
11378+
11379+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11380+void pax_check_alloca(unsigned long size)
11381+{
11382+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
11383+ unsigned cpu, used;
11384+ char *id;
11385+
11386+ /* check the process stack first */
11387+ stack_start = (unsigned long)task_stack_page(current);
11388+ stack_end = stack_start + THREAD_SIZE;
11389+ if (likely(stack_start <= sp && sp < stack_end)) {
11390+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
11391+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
11392+ return;
11393+ }
11394+
11395+ cpu = get_cpu();
11396+
11397+ /* check the irq stacks */
11398+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
11399+ stack_start = stack_end - IRQ_STACK_SIZE;
11400+ if (stack_start <= sp && sp < stack_end) {
11401+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
11402+ put_cpu();
11403+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
11404+ return;
11405+ }
11406+
11407+ /* check the exception stacks */
11408+ used = 0;
11409+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
11410+ stack_start = stack_end - EXCEPTION_STKSZ;
11411+ if (stack_end && stack_start <= sp && sp < stack_end) {
11412+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
11413+ put_cpu();
11414+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
11415+ return;
11416+ }
11417+
11418+ put_cpu();
11419+
11420+ /* unknown stack */
11421+ BUG();
11422+}
11423+EXPORT_SYMBOL(pax_check_alloca);
11424+#endif
11425diff -urNp linux-3.1.1/arch/x86/kernel/dumpstack.c linux-3.1.1/arch/x86/kernel/dumpstack.c
11426--- linux-3.1.1/arch/x86/kernel/dumpstack.c 2011-11-11 15:19:27.000000000 -0500
11427+++ linux-3.1.1/arch/x86/kernel/dumpstack.c 2011-11-16 18:40:08.000000000 -0500
11428@@ -2,6 +2,9 @@
11429 * Copyright (C) 1991, 1992 Linus Torvalds
11430 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11431 */
11432+#ifdef CONFIG_GRKERNSEC_HIDESYM
11433+#define __INCLUDED_BY_HIDESYM 1
11434+#endif
11435 #include <linux/kallsyms.h>
11436 #include <linux/kprobes.h>
11437 #include <linux/uaccess.h>
11438@@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11439 static void
11440 print_ftrace_graph_addr(unsigned long addr, void *data,
11441 const struct stacktrace_ops *ops,
11442- struct thread_info *tinfo, int *graph)
11443+ struct task_struct *task, int *graph)
11444 {
11445- struct task_struct *task = tinfo->task;
11446 unsigned long ret_addr;
11447 int index = task->curr_ret_stack;
11448
11449@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11450 static inline void
11451 print_ftrace_graph_addr(unsigned long addr, void *data,
11452 const struct stacktrace_ops *ops,
11453- struct thread_info *tinfo, int *graph)
11454+ struct task_struct *task, int *graph)
11455 { }
11456 #endif
11457
11458@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11459 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11460 */
11461
11462-static inline int valid_stack_ptr(struct thread_info *tinfo,
11463- void *p, unsigned int size, void *end)
11464+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11465 {
11466- void *t = tinfo;
11467 if (end) {
11468 if (p < end && p >= (end-THREAD_SIZE))
11469 return 1;
11470@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11471 }
11472
11473 unsigned long
11474-print_context_stack(struct thread_info *tinfo,
11475+print_context_stack(struct task_struct *task, void *stack_start,
11476 unsigned long *stack, unsigned long bp,
11477 const struct stacktrace_ops *ops, void *data,
11478 unsigned long *end, int *graph)
11479 {
11480 struct stack_frame *frame = (struct stack_frame *)bp;
11481
11482- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11483+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11484 unsigned long addr;
11485
11486 addr = *stack;
11487@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11488 } else {
11489 ops->address(data, addr, 0);
11490 }
11491- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11492+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11493 }
11494 stack++;
11495 }
11496@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11497 EXPORT_SYMBOL_GPL(print_context_stack);
11498
11499 unsigned long
11500-print_context_stack_bp(struct thread_info *tinfo,
11501+print_context_stack_bp(struct task_struct *task, void *stack_start,
11502 unsigned long *stack, unsigned long bp,
11503 const struct stacktrace_ops *ops, void *data,
11504 unsigned long *end, int *graph)
11505@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11506 struct stack_frame *frame = (struct stack_frame *)bp;
11507 unsigned long *ret_addr = &frame->return_address;
11508
11509- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11510+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11511 unsigned long addr = *ret_addr;
11512
11513 if (!__kernel_text_address(addr))
11514@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11515 ops->address(data, addr, 1);
11516 frame = frame->next_frame;
11517 ret_addr = &frame->return_address;
11518- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11519+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11520 }
11521
11522 return (unsigned long)frame;
11523@@ -186,7 +186,7 @@ void dump_stack(void)
11524
11525 bp = stack_frame(current, NULL);
11526 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11527- current->pid, current->comm, print_tainted(),
11528+ task_pid_nr(current), current->comm, print_tainted(),
11529 init_utsname()->release,
11530 (int)strcspn(init_utsname()->version, " "),
11531 init_utsname()->version);
11532@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
11533 }
11534 EXPORT_SYMBOL_GPL(oops_begin);
11535
11536+extern void gr_handle_kernel_exploit(void);
11537+
11538 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11539 {
11540 if (regs && kexec_should_crash(current))
11541@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long fl
11542 panic("Fatal exception in interrupt");
11543 if (panic_on_oops)
11544 panic("Fatal exception");
11545- do_exit(signr);
11546+
11547+ gr_handle_kernel_exploit();
11548+
11549+ do_group_exit(signr);
11550 }
11551
11552 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11553@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, str
11554
11555 show_registers(regs);
11556 #ifdef CONFIG_X86_32
11557- if (user_mode_vm(regs)) {
11558+ if (user_mode(regs)) {
11559 sp = regs->sp;
11560 ss = regs->ss & 0xffff;
11561 } else {
11562@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs
11563 unsigned long flags = oops_begin();
11564 int sig = SIGSEGV;
11565
11566- if (!user_mode_vm(regs))
11567+ if (!user_mode(regs))
11568 report_bug(regs->ip, regs);
11569
11570 if (__die(str, regs, err))
11571diff -urNp linux-3.1.1/arch/x86/kernel/early_printk.c linux-3.1.1/arch/x86/kernel/early_printk.c
11572--- linux-3.1.1/arch/x86/kernel/early_printk.c 2011-11-11 15:19:27.000000000 -0500
11573+++ linux-3.1.1/arch/x86/kernel/early_printk.c 2011-11-16 18:40:08.000000000 -0500
11574@@ -7,6 +7,7 @@
11575 #include <linux/pci_regs.h>
11576 #include <linux/pci_ids.h>
11577 #include <linux/errno.h>
11578+#include <linux/sched.h>
11579 #include <asm/io.h>
11580 #include <asm/processor.h>
11581 #include <asm/fcntl.h>
11582@@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11583 int n;
11584 va_list ap;
11585
11586+ pax_track_stack();
11587+
11588 va_start(ap, fmt);
11589 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11590 early_console->write(early_console, buf, n);
11591diff -urNp linux-3.1.1/arch/x86/kernel/entry_32.S linux-3.1.1/arch/x86/kernel/entry_32.S
11592--- linux-3.1.1/arch/x86/kernel/entry_32.S 2011-11-11 15:19:27.000000000 -0500
11593+++ linux-3.1.1/arch/x86/kernel/entry_32.S 2011-11-16 18:40:08.000000000 -0500
11594@@ -186,13 +186,146 @@
11595 /*CFI_REL_OFFSET gs, PT_GS*/
11596 .endm
11597 .macro SET_KERNEL_GS reg
11598+
11599+#ifdef CONFIG_CC_STACKPROTECTOR
11600 movl $(__KERNEL_STACK_CANARY), \reg
11601+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11602+ movl $(__USER_DS), \reg
11603+#else
11604+ xorl \reg, \reg
11605+#endif
11606+
11607 movl \reg, %gs
11608 .endm
11609
11610 #endif /* CONFIG_X86_32_LAZY_GS */
11611
11612-.macro SAVE_ALL
11613+.macro pax_enter_kernel
11614+#ifdef CONFIG_PAX_KERNEXEC
11615+ call pax_enter_kernel
11616+#endif
11617+.endm
11618+
11619+.macro pax_exit_kernel
11620+#ifdef CONFIG_PAX_KERNEXEC
11621+ call pax_exit_kernel
11622+#endif
11623+.endm
11624+
11625+#ifdef CONFIG_PAX_KERNEXEC
11626+ENTRY(pax_enter_kernel)
11627+#ifdef CONFIG_PARAVIRT
11628+ pushl %eax
11629+ pushl %ecx
11630+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11631+ mov %eax, %esi
11632+#else
11633+ mov %cr0, %esi
11634+#endif
11635+ bts $16, %esi
11636+ jnc 1f
11637+ mov %cs, %esi
11638+ cmp $__KERNEL_CS, %esi
11639+ jz 3f
11640+ ljmp $__KERNEL_CS, $3f
11641+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11642+2:
11643+#ifdef CONFIG_PARAVIRT
11644+ mov %esi, %eax
11645+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11646+#else
11647+ mov %esi, %cr0
11648+#endif
11649+3:
11650+#ifdef CONFIG_PARAVIRT
11651+ popl %ecx
11652+ popl %eax
11653+#endif
11654+ ret
11655+ENDPROC(pax_enter_kernel)
11656+
11657+ENTRY(pax_exit_kernel)
11658+#ifdef CONFIG_PARAVIRT
11659+ pushl %eax
11660+ pushl %ecx
11661+#endif
11662+ mov %cs, %esi
11663+ cmp $__KERNEXEC_KERNEL_CS, %esi
11664+ jnz 2f
11665+#ifdef CONFIG_PARAVIRT
11666+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11667+ mov %eax, %esi
11668+#else
11669+ mov %cr0, %esi
11670+#endif
11671+ btr $16, %esi
11672+ ljmp $__KERNEL_CS, $1f
11673+1:
11674+#ifdef CONFIG_PARAVIRT
11675+ mov %esi, %eax
11676+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11677+#else
11678+ mov %esi, %cr0
11679+#endif
11680+2:
11681+#ifdef CONFIG_PARAVIRT
11682+ popl %ecx
11683+ popl %eax
11684+#endif
11685+ ret
11686+ENDPROC(pax_exit_kernel)
11687+#endif
11688+
11689+.macro pax_erase_kstack
11690+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11691+ call pax_erase_kstack
11692+#endif
11693+.endm
11694+
11695+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11696+/*
11697+ * ebp: thread_info
11698+ * ecx, edx: can be clobbered
11699+ */
11700+ENTRY(pax_erase_kstack)
11701+ pushl %edi
11702+ pushl %eax
11703+
11704+ mov TI_lowest_stack(%ebp), %edi
11705+ mov $-0xBEEF, %eax
11706+ std
11707+
11708+1: mov %edi, %ecx
11709+ and $THREAD_SIZE_asm - 1, %ecx
11710+ shr $2, %ecx
11711+ repne scasl
11712+ jecxz 2f
11713+
11714+ cmp $2*16, %ecx
11715+ jc 2f
11716+
11717+ mov $2*16, %ecx
11718+ repe scasl
11719+ jecxz 2f
11720+ jne 1b
11721+
11722+2: cld
11723+ mov %esp, %ecx
11724+ sub %edi, %ecx
11725+ shr $2, %ecx
11726+ rep stosl
11727+
11728+ mov TI_task_thread_sp0(%ebp), %edi
11729+ sub $128, %edi
11730+ mov %edi, TI_lowest_stack(%ebp)
11731+
11732+ popl %eax
11733+ popl %edi
11734+ ret
11735+ENDPROC(pax_erase_kstack)
11736+#endif
11737+
11738+.macro __SAVE_ALL _DS
11739 cld
11740 PUSH_GS
11741 pushl_cfi %fs
11742@@ -215,7 +348,7 @@
11743 CFI_REL_OFFSET ecx, 0
11744 pushl_cfi %ebx
11745 CFI_REL_OFFSET ebx, 0
11746- movl $(__USER_DS), %edx
11747+ movl $\_DS, %edx
11748 movl %edx, %ds
11749 movl %edx, %es
11750 movl $(__KERNEL_PERCPU), %edx
11751@@ -223,6 +356,15 @@
11752 SET_KERNEL_GS %edx
11753 .endm
11754
11755+.macro SAVE_ALL
11756+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11757+ __SAVE_ALL __KERNEL_DS
11758+ pax_enter_kernel
11759+#else
11760+ __SAVE_ALL __USER_DS
11761+#endif
11762+.endm
11763+
11764 .macro RESTORE_INT_REGS
11765 popl_cfi %ebx
11766 CFI_RESTORE ebx
11767@@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
11768 popfl_cfi
11769 jmp syscall_exit
11770 CFI_ENDPROC
11771-END(ret_from_fork)
11772+ENDPROC(ret_from_fork)
11773
11774 /*
11775 * Interrupt exit functions should be protected against kprobes
11776@@ -333,7 +475,15 @@ check_userspace:
11777 movb PT_CS(%esp), %al
11778 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11779 cmpl $USER_RPL, %eax
11780+
11781+#ifdef CONFIG_PAX_KERNEXEC
11782+ jae resume_userspace
11783+
11784+ PAX_EXIT_KERNEL
11785+ jmp resume_kernel
11786+#else
11787 jb resume_kernel # not returning to v8086 or userspace
11788+#endif
11789
11790 ENTRY(resume_userspace)
11791 LOCKDEP_SYS_EXIT
11792@@ -345,8 +495,8 @@ ENTRY(resume_userspace)
11793 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11794 # int/exception return?
11795 jne work_pending
11796- jmp restore_all
11797-END(ret_from_exception)
11798+ jmp restore_all_pax
11799+ENDPROC(ret_from_exception)
11800
11801 #ifdef CONFIG_PREEMPT
11802 ENTRY(resume_kernel)
11803@@ -361,7 +511,7 @@ need_resched:
11804 jz restore_all
11805 call preempt_schedule_irq
11806 jmp need_resched
11807-END(resume_kernel)
11808+ENDPROC(resume_kernel)
11809 #endif
11810 CFI_ENDPROC
11811 /*
11812@@ -395,23 +545,34 @@ sysenter_past_esp:
11813 /*CFI_REL_OFFSET cs, 0*/
11814 /*
11815 * Push current_thread_info()->sysenter_return to the stack.
11816- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11817- * pushed above; +8 corresponds to copy_thread's esp0 setting.
11818 */
11819- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11820+ pushl_cfi $0
11821 CFI_REL_OFFSET eip, 0
11822
11823 pushl_cfi %eax
11824 SAVE_ALL
11825+ GET_THREAD_INFO(%ebp)
11826+ movl TI_sysenter_return(%ebp),%ebp
11827+ movl %ebp,PT_EIP(%esp)
11828 ENABLE_INTERRUPTS(CLBR_NONE)
11829
11830 /*
11831 * Load the potential sixth argument from user stack.
11832 * Careful about security.
11833 */
11834+ movl PT_OLDESP(%esp),%ebp
11835+
11836+#ifdef CONFIG_PAX_MEMORY_UDEREF
11837+ mov PT_OLDSS(%esp),%ds
11838+1: movl %ds:(%ebp),%ebp
11839+ push %ss
11840+ pop %ds
11841+#else
11842 cmpl $__PAGE_OFFSET-3,%ebp
11843 jae syscall_fault
11844 1: movl (%ebp),%ebp
11845+#endif
11846+
11847 movl %ebp,PT_EBP(%esp)
11848 .section __ex_table,"a"
11849 .align 4
11850@@ -434,12 +595,24 @@ sysenter_do_call:
11851 testl $_TIF_ALLWORK_MASK, %ecx
11852 jne sysexit_audit
11853 sysenter_exit:
11854+
11855+#ifdef CONFIG_PAX_RANDKSTACK
11856+ pushl_cfi %eax
11857+ movl %esp, %eax
11858+ call pax_randomize_kstack
11859+ popl_cfi %eax
11860+#endif
11861+
11862+ pax_erase_kstack
11863+
11864 /* if something modifies registers it must also disable sysexit */
11865 movl PT_EIP(%esp), %edx
11866 movl PT_OLDESP(%esp), %ecx
11867 xorl %ebp,%ebp
11868 TRACE_IRQS_ON
11869 1: mov PT_FS(%esp), %fs
11870+2: mov PT_DS(%esp), %ds
11871+3: mov PT_ES(%esp), %es
11872 PTGS_TO_GS
11873 ENABLE_INTERRUPTS_SYSEXIT
11874
11875@@ -456,6 +629,9 @@ sysenter_audit:
11876 movl %eax,%edx /* 2nd arg: syscall number */
11877 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11878 call audit_syscall_entry
11879+
11880+ pax_erase_kstack
11881+
11882 pushl_cfi %ebx
11883 movl PT_EAX(%esp),%eax /* reload syscall number */
11884 jmp sysenter_do_call
11885@@ -482,11 +658,17 @@ sysexit_audit:
11886
11887 CFI_ENDPROC
11888 .pushsection .fixup,"ax"
11889-2: movl $0,PT_FS(%esp)
11890+4: movl $0,PT_FS(%esp)
11891+ jmp 1b
11892+5: movl $0,PT_DS(%esp)
11893+ jmp 1b
11894+6: movl $0,PT_ES(%esp)
11895 jmp 1b
11896 .section __ex_table,"a"
11897 .align 4
11898- .long 1b,2b
11899+ .long 1b,4b
11900+ .long 2b,5b
11901+ .long 3b,6b
11902 .popsection
11903 PTGS_TO_GS_EX
11904 ENDPROC(ia32_sysenter_target)
11905@@ -519,6 +701,15 @@ syscall_exit:
11906 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11907 jne syscall_exit_work
11908
11909+restore_all_pax:
11910+
11911+#ifdef CONFIG_PAX_RANDKSTACK
11912+ movl %esp, %eax
11913+ call pax_randomize_kstack
11914+#endif
11915+
11916+ pax_erase_kstack
11917+
11918 restore_all:
11919 TRACE_IRQS_IRET
11920 restore_all_notrace:
11921@@ -578,14 +769,34 @@ ldt_ss:
11922 * compensating for the offset by changing to the ESPFIX segment with
11923 * a base address that matches for the difference.
11924 */
11925-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11926+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11927 mov %esp, %edx /* load kernel esp */
11928 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11929 mov %dx, %ax /* eax: new kernel esp */
11930 sub %eax, %edx /* offset (low word is 0) */
11931+#ifdef CONFIG_SMP
11932+ movl PER_CPU_VAR(cpu_number), %ebx
11933+ shll $PAGE_SHIFT_asm, %ebx
11934+ addl $cpu_gdt_table, %ebx
11935+#else
11936+ movl $cpu_gdt_table, %ebx
11937+#endif
11938 shr $16, %edx
11939- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11940- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11941+
11942+#ifdef CONFIG_PAX_KERNEXEC
11943+ mov %cr0, %esi
11944+ btr $16, %esi
11945+ mov %esi, %cr0
11946+#endif
11947+
11948+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11949+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11950+
11951+#ifdef CONFIG_PAX_KERNEXEC
11952+ bts $16, %esi
11953+ mov %esi, %cr0
11954+#endif
11955+
11956 pushl_cfi $__ESPFIX_SS
11957 pushl_cfi %eax /* new kernel esp */
11958 /* Disable interrupts, but do not irqtrace this section: we
11959@@ -614,34 +825,28 @@ work_resched:
11960 movl TI_flags(%ebp), %ecx
11961 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11962 # than syscall tracing?
11963- jz restore_all
11964+ jz restore_all_pax
11965 testb $_TIF_NEED_RESCHED, %cl
11966 jnz work_resched
11967
11968 work_notifysig: # deal with pending signals and
11969 # notify-resume requests
11970+ movl %esp, %eax
11971 #ifdef CONFIG_VM86
11972 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11973- movl %esp, %eax
11974- jne work_notifysig_v86 # returning to kernel-space or
11975+ jz 1f # returning to kernel-space or
11976 # vm86-space
11977- xorl %edx, %edx
11978- call do_notify_resume
11979- jmp resume_userspace_sig
11980
11981- ALIGN
11982-work_notifysig_v86:
11983 pushl_cfi %ecx # save ti_flags for do_notify_resume
11984 call save_v86_state # %eax contains pt_regs pointer
11985 popl_cfi %ecx
11986 movl %eax, %esp
11987-#else
11988- movl %esp, %eax
11989+1:
11990 #endif
11991 xorl %edx, %edx
11992 call do_notify_resume
11993 jmp resume_userspace_sig
11994-END(work_pending)
11995+ENDPROC(work_pending)
11996
11997 # perform syscall exit tracing
11998 ALIGN
11999@@ -649,11 +854,14 @@ syscall_trace_entry:
12000 movl $-ENOSYS,PT_EAX(%esp)
12001 movl %esp, %eax
12002 call syscall_trace_enter
12003+
12004+ pax_erase_kstack
12005+
12006 /* What it returned is what we'll actually use. */
12007 cmpl $(nr_syscalls), %eax
12008 jnae syscall_call
12009 jmp syscall_exit
12010-END(syscall_trace_entry)
12011+ENDPROC(syscall_trace_entry)
12012
12013 # perform syscall exit tracing
12014 ALIGN
12015@@ -666,20 +874,24 @@ syscall_exit_work:
12016 movl %esp, %eax
12017 call syscall_trace_leave
12018 jmp resume_userspace
12019-END(syscall_exit_work)
12020+ENDPROC(syscall_exit_work)
12021 CFI_ENDPROC
12022
12023 RING0_INT_FRAME # can't unwind into user space anyway
12024 syscall_fault:
12025+#ifdef CONFIG_PAX_MEMORY_UDEREF
12026+ push %ss
12027+ pop %ds
12028+#endif
12029 GET_THREAD_INFO(%ebp)
12030 movl $-EFAULT,PT_EAX(%esp)
12031 jmp resume_userspace
12032-END(syscall_fault)
12033+ENDPROC(syscall_fault)
12034
12035 syscall_badsys:
12036 movl $-ENOSYS,PT_EAX(%esp)
12037 jmp resume_userspace
12038-END(syscall_badsys)
12039+ENDPROC(syscall_badsys)
12040 CFI_ENDPROC
12041 /*
12042 * End of kprobes section
12043@@ -753,6 +965,36 @@ ptregs_clone:
12044 CFI_ENDPROC
12045 ENDPROC(ptregs_clone)
12046
12047+ ALIGN;
12048+ENTRY(kernel_execve)
12049+ CFI_STARTPROC
12050+ pushl_cfi %ebp
12051+ sub $PT_OLDSS+4,%esp
12052+ pushl_cfi %edi
12053+ pushl_cfi %ecx
12054+ pushl_cfi %eax
12055+ lea 3*4(%esp),%edi
12056+ mov $PT_OLDSS/4+1,%ecx
12057+ xorl %eax,%eax
12058+ rep stosl
12059+ popl_cfi %eax
12060+ popl_cfi %ecx
12061+ popl_cfi %edi
12062+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
12063+ pushl_cfi %esp
12064+ call sys_execve
12065+ add $4,%esp
12066+ CFI_ADJUST_CFA_OFFSET -4
12067+ GET_THREAD_INFO(%ebp)
12068+ test %eax,%eax
12069+ jz syscall_exit
12070+ add $PT_OLDSS+4,%esp
12071+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
12072+ popl_cfi %ebp
12073+ ret
12074+ CFI_ENDPROC
12075+ENDPROC(kernel_execve)
12076+
12077 .macro FIXUP_ESPFIX_STACK
12078 /*
12079 * Switch back for ESPFIX stack to the normal zerobased stack
12080@@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
12081 * normal stack and adjusts ESP with the matching offset.
12082 */
12083 /* fixup the stack */
12084- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
12085- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
12086+#ifdef CONFIG_SMP
12087+ movl PER_CPU_VAR(cpu_number), %ebx
12088+ shll $PAGE_SHIFT_asm, %ebx
12089+ addl $cpu_gdt_table, %ebx
12090+#else
12091+ movl $cpu_gdt_table, %ebx
12092+#endif
12093+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
12094+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
12095 shl $16, %eax
12096 addl %esp, %eax /* the adjusted stack pointer */
12097 pushl_cfi $__KERNEL_DS
12098@@ -816,7 +1065,7 @@ vector=vector+1
12099 .endr
12100 2: jmp common_interrupt
12101 .endr
12102-END(irq_entries_start)
12103+ENDPROC(irq_entries_start)
12104
12105 .previous
12106 END(interrupt)
12107@@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
12108 pushl_cfi $do_coprocessor_error
12109 jmp error_code
12110 CFI_ENDPROC
12111-END(coprocessor_error)
12112+ENDPROC(coprocessor_error)
12113
12114 ENTRY(simd_coprocessor_error)
12115 RING0_INT_FRAME
12116@@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
12117 #endif
12118 jmp error_code
12119 CFI_ENDPROC
12120-END(simd_coprocessor_error)
12121+ENDPROC(simd_coprocessor_error)
12122
12123 ENTRY(device_not_available)
12124 RING0_INT_FRAME
12125@@ -893,7 +1142,7 @@ ENTRY(device_not_available)
12126 pushl_cfi $do_device_not_available
12127 jmp error_code
12128 CFI_ENDPROC
12129-END(device_not_available)
12130+ENDPROC(device_not_available)
12131
12132 #ifdef CONFIG_PARAVIRT
12133 ENTRY(native_iret)
12134@@ -902,12 +1151,12 @@ ENTRY(native_iret)
12135 .align 4
12136 .long native_iret, iret_exc
12137 .previous
12138-END(native_iret)
12139+ENDPROC(native_iret)
12140
12141 ENTRY(native_irq_enable_sysexit)
12142 sti
12143 sysexit
12144-END(native_irq_enable_sysexit)
12145+ENDPROC(native_irq_enable_sysexit)
12146 #endif
12147
12148 ENTRY(overflow)
12149@@ -916,7 +1165,7 @@ ENTRY(overflow)
12150 pushl_cfi $do_overflow
12151 jmp error_code
12152 CFI_ENDPROC
12153-END(overflow)
12154+ENDPROC(overflow)
12155
12156 ENTRY(bounds)
12157 RING0_INT_FRAME
12158@@ -924,7 +1173,7 @@ ENTRY(bounds)
12159 pushl_cfi $do_bounds
12160 jmp error_code
12161 CFI_ENDPROC
12162-END(bounds)
12163+ENDPROC(bounds)
12164
12165 ENTRY(invalid_op)
12166 RING0_INT_FRAME
12167@@ -932,7 +1181,7 @@ ENTRY(invalid_op)
12168 pushl_cfi $do_invalid_op
12169 jmp error_code
12170 CFI_ENDPROC
12171-END(invalid_op)
12172+ENDPROC(invalid_op)
12173
12174 ENTRY(coprocessor_segment_overrun)
12175 RING0_INT_FRAME
12176@@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
12177 pushl_cfi $do_coprocessor_segment_overrun
12178 jmp error_code
12179 CFI_ENDPROC
12180-END(coprocessor_segment_overrun)
12181+ENDPROC(coprocessor_segment_overrun)
12182
12183 ENTRY(invalid_TSS)
12184 RING0_EC_FRAME
12185 pushl_cfi $do_invalid_TSS
12186 jmp error_code
12187 CFI_ENDPROC
12188-END(invalid_TSS)
12189+ENDPROC(invalid_TSS)
12190
12191 ENTRY(segment_not_present)
12192 RING0_EC_FRAME
12193 pushl_cfi $do_segment_not_present
12194 jmp error_code
12195 CFI_ENDPROC
12196-END(segment_not_present)
12197+ENDPROC(segment_not_present)
12198
12199 ENTRY(stack_segment)
12200 RING0_EC_FRAME
12201 pushl_cfi $do_stack_segment
12202 jmp error_code
12203 CFI_ENDPROC
12204-END(stack_segment)
12205+ENDPROC(stack_segment)
12206
12207 ENTRY(alignment_check)
12208 RING0_EC_FRAME
12209 pushl_cfi $do_alignment_check
12210 jmp error_code
12211 CFI_ENDPROC
12212-END(alignment_check)
12213+ENDPROC(alignment_check)
12214
12215 ENTRY(divide_error)
12216 RING0_INT_FRAME
12217@@ -976,7 +1225,7 @@ ENTRY(divide_error)
12218 pushl_cfi $do_divide_error
12219 jmp error_code
12220 CFI_ENDPROC
12221-END(divide_error)
12222+ENDPROC(divide_error)
12223
12224 #ifdef CONFIG_X86_MCE
12225 ENTRY(machine_check)
12226@@ -985,7 +1234,7 @@ ENTRY(machine_check)
12227 pushl_cfi machine_check_vector
12228 jmp error_code
12229 CFI_ENDPROC
12230-END(machine_check)
12231+ENDPROC(machine_check)
12232 #endif
12233
12234 ENTRY(spurious_interrupt_bug)
12235@@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
12236 pushl_cfi $do_spurious_interrupt_bug
12237 jmp error_code
12238 CFI_ENDPROC
12239-END(spurious_interrupt_bug)
12240+ENDPROC(spurious_interrupt_bug)
12241 /*
12242 * End of kprobes section
12243 */
12244@@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector
12245
12246 ENTRY(mcount)
12247 ret
12248-END(mcount)
12249+ENDPROC(mcount)
12250
12251 ENTRY(ftrace_caller)
12252 cmpl $0, function_trace_stop
12253@@ -1138,7 +1387,7 @@ ftrace_graph_call:
12254 .globl ftrace_stub
12255 ftrace_stub:
12256 ret
12257-END(ftrace_caller)
12258+ENDPROC(ftrace_caller)
12259
12260 #else /* ! CONFIG_DYNAMIC_FTRACE */
12261
12262@@ -1174,7 +1423,7 @@ trace:
12263 popl %ecx
12264 popl %eax
12265 jmp ftrace_stub
12266-END(mcount)
12267+ENDPROC(mcount)
12268 #endif /* CONFIG_DYNAMIC_FTRACE */
12269 #endif /* CONFIG_FUNCTION_TRACER */
12270
12271@@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
12272 popl %ecx
12273 popl %eax
12274 ret
12275-END(ftrace_graph_caller)
12276+ENDPROC(ftrace_graph_caller)
12277
12278 .globl return_to_handler
12279 return_to_handler:
12280@@ -1209,7 +1458,6 @@ return_to_handler:
12281 jmp *%ecx
12282 #endif
12283
12284-.section .rodata,"a"
12285 #include "syscall_table_32.S"
12286
12287 syscall_table_size=(.-sys_call_table)
12288@@ -1255,15 +1503,18 @@ error_code:
12289 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
12290 REG_TO_PTGS %ecx
12291 SET_KERNEL_GS %ecx
12292- movl $(__USER_DS), %ecx
12293+ movl $(__KERNEL_DS), %ecx
12294 movl %ecx, %ds
12295 movl %ecx, %es
12296+
12297+ pax_enter_kernel
12298+
12299 TRACE_IRQS_OFF
12300 movl %esp,%eax # pt_regs pointer
12301 call *%edi
12302 jmp ret_from_exception
12303 CFI_ENDPROC
12304-END(page_fault)
12305+ENDPROC(page_fault)
12306
12307 /*
12308 * Debug traps and NMI can happen at the one SYSENTER instruction
12309@@ -1305,7 +1556,7 @@ debug_stack_correct:
12310 call do_debug
12311 jmp ret_from_exception
12312 CFI_ENDPROC
12313-END(debug)
12314+ENDPROC(debug)
12315
12316 /*
12317 * NMI is doubly nasty. It can happen _while_ we're handling
12318@@ -1342,6 +1593,9 @@ nmi_stack_correct:
12319 xorl %edx,%edx # zero error code
12320 movl %esp,%eax # pt_regs pointer
12321 call do_nmi
12322+
12323+ pax_exit_kernel
12324+
12325 jmp restore_all_notrace
12326 CFI_ENDPROC
12327
12328@@ -1378,12 +1632,15 @@ nmi_espfix_stack:
12329 FIXUP_ESPFIX_STACK # %eax == %esp
12330 xorl %edx,%edx # zero error code
12331 call do_nmi
12332+
12333+ pax_exit_kernel
12334+
12335 RESTORE_REGS
12336 lss 12+4(%esp), %esp # back to espfix stack
12337 CFI_ADJUST_CFA_OFFSET -24
12338 jmp irq_return
12339 CFI_ENDPROC
12340-END(nmi)
12341+ENDPROC(nmi)
12342
12343 ENTRY(int3)
12344 RING0_INT_FRAME
12345@@ -1395,14 +1652,14 @@ ENTRY(int3)
12346 call do_int3
12347 jmp ret_from_exception
12348 CFI_ENDPROC
12349-END(int3)
12350+ENDPROC(int3)
12351
12352 ENTRY(general_protection)
12353 RING0_EC_FRAME
12354 pushl_cfi $do_general_protection
12355 jmp error_code
12356 CFI_ENDPROC
12357-END(general_protection)
12358+ENDPROC(general_protection)
12359
12360 #ifdef CONFIG_KVM_GUEST
12361 ENTRY(async_page_fault)
12362@@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
12363 pushl_cfi $do_async_page_fault
12364 jmp error_code
12365 CFI_ENDPROC
12366-END(async_page_fault)
12367+ENDPROC(async_page_fault)
12368 #endif
12369
12370 /*
12371diff -urNp linux-3.1.1/arch/x86/kernel/entry_64.S linux-3.1.1/arch/x86/kernel/entry_64.S
12372--- linux-3.1.1/arch/x86/kernel/entry_64.S 2011-11-11 15:19:27.000000000 -0500
12373+++ linux-3.1.1/arch/x86/kernel/entry_64.S 2011-11-17 18:28:56.000000000 -0500
12374@@ -55,6 +55,8 @@
12375 #include <asm/paravirt.h>
12376 #include <asm/ftrace.h>
12377 #include <asm/percpu.h>
12378+#include <asm/pgtable.h>
12379+#include <asm/alternative-asm.h>
12380
12381 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
12382 #include <linux/elf-em.h>
12383@@ -68,8 +70,9 @@
12384 #ifdef CONFIG_FUNCTION_TRACER
12385 #ifdef CONFIG_DYNAMIC_FTRACE
12386 ENTRY(mcount)
12387+ pax_force_retaddr
12388 retq
12389-END(mcount)
12390+ENDPROC(mcount)
12391
12392 ENTRY(ftrace_caller)
12393 cmpl $0, function_trace_stop
12394@@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
12395 #endif
12396
12397 GLOBAL(ftrace_stub)
12398+ pax_force_retaddr
12399 retq
12400-END(ftrace_caller)
12401+ENDPROC(ftrace_caller)
12402
12403 #else /* ! CONFIG_DYNAMIC_FTRACE */
12404 ENTRY(mcount)
12405@@ -112,6 +116,7 @@ ENTRY(mcount)
12406 #endif
12407
12408 GLOBAL(ftrace_stub)
12409+ pax_force_retaddr
12410 retq
12411
12412 trace:
12413@@ -121,12 +126,13 @@ trace:
12414 movq 8(%rbp), %rsi
12415 subq $MCOUNT_INSN_SIZE, %rdi
12416
12417+ pax_force_fptr ftrace_trace_function
12418 call *ftrace_trace_function
12419
12420 MCOUNT_RESTORE_FRAME
12421
12422 jmp ftrace_stub
12423-END(mcount)
12424+ENDPROC(mcount)
12425 #endif /* CONFIG_DYNAMIC_FTRACE */
12426 #endif /* CONFIG_FUNCTION_TRACER */
12427
12428@@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
12429
12430 MCOUNT_RESTORE_FRAME
12431
12432+ pax_force_retaddr
12433 retq
12434-END(ftrace_graph_caller)
12435+ENDPROC(ftrace_graph_caller)
12436
12437 GLOBAL(return_to_handler)
12438 subq $24, %rsp
12439@@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
12440 movq 8(%rsp), %rdx
12441 movq (%rsp), %rax
12442 addq $24, %rsp
12443+ pax_force_fptr %rdi
12444 jmp *%rdi
12445 #endif
12446
12447@@ -178,6 +186,269 @@ ENTRY(native_usergs_sysret64)
12448 ENDPROC(native_usergs_sysret64)
12449 #endif /* CONFIG_PARAVIRT */
12450
12451+ .macro ljmpq sel, off
12452+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
12453+ .byte 0x48; ljmp *1234f(%rip)
12454+ .pushsection .rodata
12455+ .align 16
12456+ 1234: .quad \off; .word \sel
12457+ .popsection
12458+#else
12459+ pushq $\sel
12460+ pushq $\off
12461+ lretq
12462+#endif
12463+ .endm
12464+
12465+ .macro pax_enter_kernel
12466+#ifdef CONFIG_PAX_KERNEXEC
12467+ call pax_enter_kernel
12468+#endif
12469+ .endm
12470+
12471+ .macro pax_exit_kernel
12472+#ifdef CONFIG_PAX_KERNEXEC
12473+ call pax_exit_kernel
12474+#endif
12475+ .endm
12476+
12477+#ifdef CONFIG_PAX_KERNEXEC
12478+ENTRY(pax_enter_kernel)
12479+ pushq %rdi
12480+
12481+#ifdef CONFIG_PARAVIRT
12482+ PV_SAVE_REGS(CLBR_RDI)
12483+#endif
12484+
12485+ GET_CR0_INTO_RDI
12486+ bts $16,%rdi
12487+ jnc 1f
12488+ mov %cs,%edi
12489+ cmp $__KERNEL_CS,%edi
12490+ jz 3f
12491+ ljmpq __KERNEL_CS,3f
12492+1: ljmpq __KERNEXEC_KERNEL_CS,2f
12493+2: SET_RDI_INTO_CR0
12494+3:
12495+
12496+#ifdef CONFIG_PARAVIRT
12497+ PV_RESTORE_REGS(CLBR_RDI)
12498+#endif
12499+
12500+ popq %rdi
12501+ pax_force_retaddr
12502+ retq
12503+ENDPROC(pax_enter_kernel)
12504+
12505+ENTRY(pax_exit_kernel)
12506+ pushq %rdi
12507+
12508+#ifdef CONFIG_PARAVIRT
12509+ PV_SAVE_REGS(CLBR_RDI)
12510+#endif
12511+
12512+ mov %cs,%rdi
12513+ cmp $__KERNEXEC_KERNEL_CS,%edi
12514+ jnz 2f
12515+ GET_CR0_INTO_RDI
12516+ btr $16,%rdi
12517+ ljmpq __KERNEL_CS,1f
12518+1: SET_RDI_INTO_CR0
12519+2:
12520+
12521+#ifdef CONFIG_PARAVIRT
12522+ PV_RESTORE_REGS(CLBR_RDI);
12523+#endif
12524+
12525+ popq %rdi
12526+ pax_force_retaddr
12527+ retq
12528+ENDPROC(pax_exit_kernel)
12529+#endif
12530+
12531+ .macro pax_enter_kernel_user
12532+#ifdef CONFIG_PAX_MEMORY_UDEREF
12533+ call pax_enter_kernel_user
12534+#endif
12535+ .endm
12536+
12537+ .macro pax_exit_kernel_user
12538+#ifdef CONFIG_PAX_MEMORY_UDEREF
12539+ call pax_exit_kernel_user
12540+#endif
12541+#ifdef CONFIG_PAX_RANDKSTACK
12542+ push %rax
12543+ call pax_randomize_kstack
12544+ pop %rax
12545+#endif
12546+ .endm
12547+
12548+#ifdef CONFIG_PAX_MEMORY_UDEREF
12549+ENTRY(pax_enter_kernel_user)
12550+ pushq %rdi
12551+ pushq %rbx
12552+
12553+#ifdef CONFIG_PARAVIRT
12554+ PV_SAVE_REGS(CLBR_RDI)
12555+#endif
12556+
12557+ GET_CR3_INTO_RDI
12558+ mov %rdi,%rbx
12559+ add $__START_KERNEL_map,%rbx
12560+ sub phys_base(%rip),%rbx
12561+
12562+#ifdef CONFIG_PARAVIRT
12563+ pushq %rdi
12564+ cmpl $0, pv_info+PARAVIRT_enabled
12565+ jz 1f
12566+ i = 0
12567+ .rept USER_PGD_PTRS
12568+ mov i*8(%rbx),%rsi
12569+ mov $0,%sil
12570+ lea i*8(%rbx),%rdi
12571+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
12572+ i = i + 1
12573+ .endr
12574+ jmp 2f
12575+1:
12576+#endif
12577+
12578+ i = 0
12579+ .rept USER_PGD_PTRS
12580+ movb $0,i*8(%rbx)
12581+ i = i + 1
12582+ .endr
12583+
12584+#ifdef CONFIG_PARAVIRT
12585+2: popq %rdi
12586+#endif
12587+ SET_RDI_INTO_CR3
12588+
12589+#ifdef CONFIG_PAX_KERNEXEC
12590+ GET_CR0_INTO_RDI
12591+ bts $16,%rdi
12592+ SET_RDI_INTO_CR0
12593+#endif
12594+
12595+#ifdef CONFIG_PARAVIRT
12596+ PV_RESTORE_REGS(CLBR_RDI)
12597+#endif
12598+
12599+ popq %rbx
12600+ popq %rdi
12601+ pax_force_retaddr
12602+ retq
12603+ENDPROC(pax_enter_kernel_user)
12604+
12605+ENTRY(pax_exit_kernel_user)
12606+ push %rdi
12607+
12608+#ifdef CONFIG_PARAVIRT
12609+ pushq %rbx
12610+ PV_SAVE_REGS(CLBR_RDI)
12611+#endif
12612+
12613+#ifdef CONFIG_PAX_KERNEXEC
12614+ GET_CR0_INTO_RDI
12615+ btr $16,%rdi
12616+ SET_RDI_INTO_CR0
12617+#endif
12618+
12619+ GET_CR3_INTO_RDI
12620+ add $__START_KERNEL_map,%rdi
12621+ sub phys_base(%rip),%rdi
12622+
12623+#ifdef CONFIG_PARAVIRT
12624+ cmpl $0, pv_info+PARAVIRT_enabled
12625+ jz 1f
12626+ mov %rdi,%rbx
12627+ i = 0
12628+ .rept USER_PGD_PTRS
12629+ mov i*8(%rbx),%rsi
12630+ mov $0x67,%sil
12631+ lea i*8(%rbx),%rdi
12632+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
12633+ i = i + 1
12634+ .endr
12635+ jmp 2f
12636+1:
12637+#endif
12638+
12639+ i = 0
12640+ .rept USER_PGD_PTRS
12641+ movb $0x67,i*8(%rdi)
12642+ i = i + 1
12643+ .endr
12644+
12645+#ifdef CONFIG_PARAVIRT
12646+2: PV_RESTORE_REGS(CLBR_RDI)
12647+ popq %rbx
12648+#endif
12649+
12650+ popq %rdi
12651+ pax_force_retaddr
12652+ retq
12653+ENDPROC(pax_exit_kernel_user)
12654+#endif
12655+
12656+.macro pax_erase_kstack
12657+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12658+ call pax_erase_kstack
12659+#endif
12660+.endm
12661+
12662+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12663+/*
12664+ * r10: thread_info
12665+ * rcx, rdx: can be clobbered
12666+ */
12667+ENTRY(pax_erase_kstack)
12668+ pushq %rdi
12669+ pushq %rax
12670+ pushq %r10
12671+
12672+ GET_THREAD_INFO(%r10)
12673+ mov TI_lowest_stack(%r10), %rdi
12674+ mov $-0xBEEF, %rax
12675+ std
12676+
12677+1: mov %edi, %ecx
12678+ and $THREAD_SIZE_asm - 1, %ecx
12679+ shr $3, %ecx
12680+ repne scasq
12681+ jecxz 2f
12682+
12683+ cmp $2*8, %ecx
12684+ jc 2f
12685+
12686+ mov $2*8, %ecx
12687+ repe scasq
12688+ jecxz 2f
12689+ jne 1b
12690+
12691+2: cld
12692+ mov %esp, %ecx
12693+ sub %edi, %ecx
12694+
12695+ cmp $THREAD_SIZE_asm, %rcx
12696+ jb 3f
12697+ ud2
12698+3:
12699+
12700+ shr $3, %ecx
12701+ rep stosq
12702+
12703+ mov TI_task_thread_sp0(%r10), %rdi
12704+ sub $256, %rdi
12705+ mov %rdi, TI_lowest_stack(%r10)
12706+
12707+ popq %r10
12708+ popq %rax
12709+ popq %rdi
12710+ pax_force_retaddr
12711+ ret
12712+ENDPROC(pax_erase_kstack)
12713+#endif
12714
12715 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12716 #ifdef CONFIG_TRACE_IRQFLAGS
12717@@ -319,7 +590,7 @@ ENDPROC(native_usergs_sysret64)
12718 movq %rsp, %rsi
12719
12720 leaq -RBP(%rsp),%rdi /* arg1 for handler */
12721- testl $3, CS(%rdi)
12722+ testb $3, CS(%rdi)
12723 je 1f
12724 SWAPGS
12725 /*
12726@@ -350,9 +621,10 @@ ENTRY(save_rest)
12727 movq_cfi r15, R15+16
12728 movq %r11, 8(%rsp) /* return address */
12729 FIXUP_TOP_OF_STACK %r11, 16
12730+ pax_force_retaddr
12731 ret
12732 CFI_ENDPROC
12733-END(save_rest)
12734+ENDPROC(save_rest)
12735
12736 /* save complete stack frame */
12737 .pushsection .kprobes.text, "ax"
12738@@ -381,9 +653,10 @@ ENTRY(save_paranoid)
12739 js 1f /* negative -> in kernel */
12740 SWAPGS
12741 xorl %ebx,%ebx
12742-1: ret
12743+1: pax_force_retaddr
12744+ ret
12745 CFI_ENDPROC
12746-END(save_paranoid)
12747+ENDPROC(save_paranoid)
12748 .popsection
12749
12750 /*
12751@@ -405,7 +678,7 @@ ENTRY(ret_from_fork)
12752
12753 RESTORE_REST
12754
12755- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12756+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12757 je int_ret_from_sys_call
12758
12759 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12760@@ -415,7 +688,7 @@ ENTRY(ret_from_fork)
12761 jmp ret_from_sys_call # go to the SYSRET fastpath
12762
12763 CFI_ENDPROC
12764-END(ret_from_fork)
12765+ENDPROC(ret_from_fork)
12766
12767 /*
12768 * System call entry. Up to 6 arguments in registers are supported.
12769@@ -451,7 +724,7 @@ END(ret_from_fork)
12770 ENTRY(system_call)
12771 CFI_STARTPROC simple
12772 CFI_SIGNAL_FRAME
12773- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12774+ CFI_DEF_CFA rsp,0
12775 CFI_REGISTER rip,rcx
12776 /*CFI_REGISTER rflags,r11*/
12777 SWAPGS_UNSAFE_STACK
12778@@ -464,12 +737,13 @@ ENTRY(system_call_after_swapgs)
12779
12780 movq %rsp,PER_CPU_VAR(old_rsp)
12781 movq PER_CPU_VAR(kernel_stack),%rsp
12782+ pax_enter_kernel_user
12783 /*
12784 * No need to follow this irqs off/on section - it's straight
12785 * and short:
12786 */
12787 ENABLE_INTERRUPTS(CLBR_NONE)
12788- SAVE_ARGS 8,0
12789+ SAVE_ARGS 8*6,0
12790 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12791 movq %rcx,RIP-ARGOFFSET(%rsp)
12792 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12793@@ -498,6 +772,8 @@ sysret_check:
12794 andl %edi,%edx
12795 jnz sysret_careful
12796 CFI_REMEMBER_STATE
12797+ pax_exit_kernel_user
12798+ pax_erase_kstack
12799 /*
12800 * sysretq will re-enable interrupts:
12801 */
12802@@ -556,6 +832,9 @@ auditsys:
12803 movq %rax,%rsi /* 2nd arg: syscall number */
12804 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12805 call audit_syscall_entry
12806+
12807+ pax_erase_kstack
12808+
12809 LOAD_ARGS 0 /* reload call-clobbered registers */
12810 jmp system_call_fastpath
12811
12812@@ -586,6 +865,9 @@ tracesys:
12813 FIXUP_TOP_OF_STACK %rdi
12814 movq %rsp,%rdi
12815 call syscall_trace_enter
12816+
12817+ pax_erase_kstack
12818+
12819 /*
12820 * Reload arg registers from stack in case ptrace changed them.
12821 * We don't reload %rax because syscall_trace_enter() returned
12822@@ -607,7 +889,7 @@ tracesys:
12823 GLOBAL(int_ret_from_sys_call)
12824 DISABLE_INTERRUPTS(CLBR_NONE)
12825 TRACE_IRQS_OFF
12826- testl $3,CS-ARGOFFSET(%rsp)
12827+ testb $3,CS-ARGOFFSET(%rsp)
12828 je retint_restore_args
12829 movl $_TIF_ALLWORK_MASK,%edi
12830 /* edi: mask to check */
12831@@ -664,7 +946,7 @@ int_restore_rest:
12832 TRACE_IRQS_OFF
12833 jmp int_with_check
12834 CFI_ENDPROC
12835-END(system_call)
12836+ENDPROC(system_call)
12837
12838 /*
12839 * Certain special system calls that need to save a complete full stack frame.
12840@@ -680,7 +962,7 @@ ENTRY(\label)
12841 call \func
12842 jmp ptregscall_common
12843 CFI_ENDPROC
12844-END(\label)
12845+ENDPROC(\label)
12846 .endm
12847
12848 PTREGSCALL stub_clone, sys_clone, %r8
12849@@ -698,9 +980,10 @@ ENTRY(ptregscall_common)
12850 movq_cfi_restore R12+8, r12
12851 movq_cfi_restore RBP+8, rbp
12852 movq_cfi_restore RBX+8, rbx
12853+ pax_force_retaddr
12854 ret $REST_SKIP /* pop extended registers */
12855 CFI_ENDPROC
12856-END(ptregscall_common)
12857+ENDPROC(ptregscall_common)
12858
12859 ENTRY(stub_execve)
12860 CFI_STARTPROC
12861@@ -715,7 +998,7 @@ ENTRY(stub_execve)
12862 RESTORE_REST
12863 jmp int_ret_from_sys_call
12864 CFI_ENDPROC
12865-END(stub_execve)
12866+ENDPROC(stub_execve)
12867
12868 /*
12869 * sigreturn is special because it needs to restore all registers on return.
12870@@ -733,7 +1016,7 @@ ENTRY(stub_rt_sigreturn)
12871 RESTORE_REST
12872 jmp int_ret_from_sys_call
12873 CFI_ENDPROC
12874-END(stub_rt_sigreturn)
12875+ENDPROC(stub_rt_sigreturn)
12876
12877 /*
12878 * Build the entry stubs and pointer table with some assembler magic.
12879@@ -768,7 +1051,7 @@ vector=vector+1
12880 2: jmp common_interrupt
12881 .endr
12882 CFI_ENDPROC
12883-END(irq_entries_start)
12884+ENDPROC(irq_entries_start)
12885
12886 .previous
12887 END(interrupt)
12888@@ -789,6 +1072,16 @@ END(interrupt)
12889 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12890 SAVE_ARGS_IRQ
12891 PARTIAL_FRAME 0
12892+#ifdef CONFIG_PAX_MEMORY_UDEREF
12893+ testb $3, CS(%rdi)
12894+ jnz 1f
12895+ pax_enter_kernel
12896+ jmp 2f
12897+1: pax_enter_kernel_user
12898+2:
12899+#else
12900+ pax_enter_kernel
12901+#endif
12902 call \func
12903 .endm
12904
12905@@ -820,7 +1113,7 @@ ret_from_intr:
12906
12907 exit_intr:
12908 GET_THREAD_INFO(%rcx)
12909- testl $3,CS-ARGOFFSET(%rsp)
12910+ testb $3,CS-ARGOFFSET(%rsp)
12911 je retint_kernel
12912
12913 /* Interrupt came from user space */
12914@@ -842,12 +1135,16 @@ retint_swapgs: /* return to user-space
12915 * The iretq could re-enable interrupts:
12916 */
12917 DISABLE_INTERRUPTS(CLBR_ANY)
12918+ pax_exit_kernel_user
12919+ pax_erase_kstack
12920 TRACE_IRQS_IRETQ
12921 SWAPGS
12922 jmp restore_args
12923
12924 retint_restore_args: /* return to kernel space */
12925 DISABLE_INTERRUPTS(CLBR_ANY)
12926+ pax_exit_kernel
12927+ pax_force_retaddr RIP-ARGOFFSET
12928 /*
12929 * The iretq could re-enable interrupts:
12930 */
12931@@ -936,7 +1233,7 @@ ENTRY(retint_kernel)
12932 #endif
12933
12934 CFI_ENDPROC
12935-END(common_interrupt)
12936+ENDPROC(common_interrupt)
12937 /*
12938 * End of kprobes section
12939 */
12940@@ -952,7 +1249,7 @@ ENTRY(\sym)
12941 interrupt \do_sym
12942 jmp ret_from_intr
12943 CFI_ENDPROC
12944-END(\sym)
12945+ENDPROC(\sym)
12946 .endm
12947
12948 #ifdef CONFIG_SMP
12949@@ -1017,12 +1314,22 @@ ENTRY(\sym)
12950 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12951 call error_entry
12952 DEFAULT_FRAME 0
12953+#ifdef CONFIG_PAX_MEMORY_UDEREF
12954+ testb $3, CS(%rsp)
12955+ jnz 1f
12956+ pax_enter_kernel
12957+ jmp 2f
12958+1: pax_enter_kernel_user
12959+2:
12960+#else
12961+ pax_enter_kernel
12962+#endif
12963 movq %rsp,%rdi /* pt_regs pointer */
12964 xorl %esi,%esi /* no error code */
12965 call \do_sym
12966 jmp error_exit /* %ebx: no swapgs flag */
12967 CFI_ENDPROC
12968-END(\sym)
12969+ENDPROC(\sym)
12970 .endm
12971
12972 .macro paranoidzeroentry sym do_sym
12973@@ -1034,15 +1341,25 @@ ENTRY(\sym)
12974 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12975 call save_paranoid
12976 TRACE_IRQS_OFF
12977+#ifdef CONFIG_PAX_MEMORY_UDEREF
12978+ testb $3, CS(%rsp)
12979+ jnz 1f
12980+ pax_enter_kernel
12981+ jmp 2f
12982+1: pax_enter_kernel_user
12983+2:
12984+#else
12985+ pax_enter_kernel
12986+#endif
12987 movq %rsp,%rdi /* pt_regs pointer */
12988 xorl %esi,%esi /* no error code */
12989 call \do_sym
12990 jmp paranoid_exit /* %ebx: no swapgs flag */
12991 CFI_ENDPROC
12992-END(\sym)
12993+ENDPROC(\sym)
12994 .endm
12995
12996-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12997+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12998 .macro paranoidzeroentry_ist sym do_sym ist
12999 ENTRY(\sym)
13000 INTR_FRAME
13001@@ -1052,14 +1369,30 @@ ENTRY(\sym)
13002 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
13003 call save_paranoid
13004 TRACE_IRQS_OFF
13005+#ifdef CONFIG_PAX_MEMORY_UDEREF
13006+ testb $3, CS(%rsp)
13007+ jnz 1f
13008+ pax_enter_kernel
13009+ jmp 2f
13010+1: pax_enter_kernel_user
13011+2:
13012+#else
13013+ pax_enter_kernel
13014+#endif
13015 movq %rsp,%rdi /* pt_regs pointer */
13016 xorl %esi,%esi /* no error code */
13017+#ifdef CONFIG_SMP
13018+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
13019+ lea init_tss(%r12), %r12
13020+#else
13021+ lea init_tss(%rip), %r12
13022+#endif
13023 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
13024 call \do_sym
13025 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
13026 jmp paranoid_exit /* %ebx: no swapgs flag */
13027 CFI_ENDPROC
13028-END(\sym)
13029+ENDPROC(\sym)
13030 .endm
13031
13032 .macro errorentry sym do_sym
13033@@ -1070,13 +1403,23 @@ ENTRY(\sym)
13034 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
13035 call error_entry
13036 DEFAULT_FRAME 0
13037+#ifdef CONFIG_PAX_MEMORY_UDEREF
13038+ testb $3, CS(%rsp)
13039+ jnz 1f
13040+ pax_enter_kernel
13041+ jmp 2f
13042+1: pax_enter_kernel_user
13043+2:
13044+#else
13045+ pax_enter_kernel
13046+#endif
13047 movq %rsp,%rdi /* pt_regs pointer */
13048 movq ORIG_RAX(%rsp),%rsi /* get error code */
13049 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13050 call \do_sym
13051 jmp error_exit /* %ebx: no swapgs flag */
13052 CFI_ENDPROC
13053-END(\sym)
13054+ENDPROC(\sym)
13055 .endm
13056
13057 /* error code is on the stack already */
13058@@ -1089,13 +1432,23 @@ ENTRY(\sym)
13059 call save_paranoid
13060 DEFAULT_FRAME 0
13061 TRACE_IRQS_OFF
13062+#ifdef CONFIG_PAX_MEMORY_UDEREF
13063+ testb $3, CS(%rsp)
13064+ jnz 1f
13065+ pax_enter_kernel
13066+ jmp 2f
13067+1: pax_enter_kernel_user
13068+2:
13069+#else
13070+ pax_enter_kernel
13071+#endif
13072 movq %rsp,%rdi /* pt_regs pointer */
13073 movq ORIG_RAX(%rsp),%rsi /* get error code */
13074 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13075 call \do_sym
13076 jmp paranoid_exit /* %ebx: no swapgs flag */
13077 CFI_ENDPROC
13078-END(\sym)
13079+ENDPROC(\sym)
13080 .endm
13081
13082 zeroentry divide_error do_divide_error
13083@@ -1125,9 +1478,10 @@ gs_change:
13084 2: mfence /* workaround */
13085 SWAPGS
13086 popfq_cfi
13087+ pax_force_retaddr
13088 ret
13089 CFI_ENDPROC
13090-END(native_load_gs_index)
13091+ENDPROC(native_load_gs_index)
13092
13093 .section __ex_table,"a"
13094 .align 8
13095@@ -1149,13 +1503,14 @@ ENTRY(kernel_thread_helper)
13096 * Here we are in the child and the registers are set as they were
13097 * at kernel_thread() invocation in the parent.
13098 */
13099+ pax_force_fptr %rsi
13100 call *%rsi
13101 # exit
13102 mov %eax, %edi
13103 call do_exit
13104 ud2 # padding for call trace
13105 CFI_ENDPROC
13106-END(kernel_thread_helper)
13107+ENDPROC(kernel_thread_helper)
13108
13109 /*
13110 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
13111@@ -1184,9 +1539,10 @@ ENTRY(kernel_execve)
13112 je int_ret_from_sys_call
13113 RESTORE_ARGS
13114 UNFAKE_STACK_FRAME
13115+ pax_force_retaddr
13116 ret
13117 CFI_ENDPROC
13118-END(kernel_execve)
13119+ENDPROC(kernel_execve)
13120
13121 /* Call softirq on interrupt stack. Interrupts are off. */
13122 ENTRY(call_softirq)
13123@@ -1204,9 +1560,10 @@ ENTRY(call_softirq)
13124 CFI_DEF_CFA_REGISTER rsp
13125 CFI_ADJUST_CFA_OFFSET -8
13126 decl PER_CPU_VAR(irq_count)
13127+ pax_force_retaddr
13128 ret
13129 CFI_ENDPROC
13130-END(call_softirq)
13131+ENDPROC(call_softirq)
13132
13133 #ifdef CONFIG_XEN
13134 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
13135@@ -1244,7 +1601,7 @@ ENTRY(xen_do_hypervisor_callback) # do
13136 decl PER_CPU_VAR(irq_count)
13137 jmp error_exit
13138 CFI_ENDPROC
13139-END(xen_do_hypervisor_callback)
13140+ENDPROC(xen_do_hypervisor_callback)
13141
13142 /*
13143 * Hypervisor uses this for application faults while it executes.
13144@@ -1303,7 +1660,7 @@ ENTRY(xen_failsafe_callback)
13145 SAVE_ALL
13146 jmp error_exit
13147 CFI_ENDPROC
13148-END(xen_failsafe_callback)
13149+ENDPROC(xen_failsafe_callback)
13150
13151 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
13152 xen_hvm_callback_vector xen_evtchn_do_upcall
13153@@ -1352,16 +1709,31 @@ ENTRY(paranoid_exit)
13154 TRACE_IRQS_OFF
13155 testl %ebx,%ebx /* swapgs needed? */
13156 jnz paranoid_restore
13157- testl $3,CS(%rsp)
13158+ testb $3,CS(%rsp)
13159 jnz paranoid_userspace
13160+#ifdef CONFIG_PAX_MEMORY_UDEREF
13161+ pax_exit_kernel
13162+ TRACE_IRQS_IRETQ 0
13163+ SWAPGS_UNSAFE_STACK
13164+ RESTORE_ALL 8
13165+ pax_force_retaddr
13166+ jmp irq_return
13167+#endif
13168 paranoid_swapgs:
13169+#ifdef CONFIG_PAX_MEMORY_UDEREF
13170+ pax_exit_kernel_user
13171+#else
13172+ pax_exit_kernel
13173+#endif
13174 TRACE_IRQS_IRETQ 0
13175 SWAPGS_UNSAFE_STACK
13176 RESTORE_ALL 8
13177 jmp irq_return
13178 paranoid_restore:
13179+ pax_exit_kernel
13180 TRACE_IRQS_IRETQ 0
13181 RESTORE_ALL 8
13182+ pax_force_retaddr
13183 jmp irq_return
13184 paranoid_userspace:
13185 GET_THREAD_INFO(%rcx)
13186@@ -1390,7 +1762,7 @@ paranoid_schedule:
13187 TRACE_IRQS_OFF
13188 jmp paranoid_userspace
13189 CFI_ENDPROC
13190-END(paranoid_exit)
13191+ENDPROC(paranoid_exit)
13192
13193 /*
13194 * Exception entry point. This expects an error code/orig_rax on the stack.
13195@@ -1417,12 +1789,13 @@ ENTRY(error_entry)
13196 movq_cfi r14, R14+8
13197 movq_cfi r15, R15+8
13198 xorl %ebx,%ebx
13199- testl $3,CS+8(%rsp)
13200+ testb $3,CS+8(%rsp)
13201 je error_kernelspace
13202 error_swapgs:
13203 SWAPGS
13204 error_sti:
13205 TRACE_IRQS_OFF
13206+ pax_force_retaddr
13207 ret
13208
13209 /*
13210@@ -1449,7 +1822,7 @@ bstep_iret:
13211 movq %rcx,RIP+8(%rsp)
13212 jmp error_swapgs
13213 CFI_ENDPROC
13214-END(error_entry)
13215+ENDPROC(error_entry)
13216
13217
13218 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
13219@@ -1469,7 +1842,7 @@ ENTRY(error_exit)
13220 jnz retint_careful
13221 jmp retint_swapgs
13222 CFI_ENDPROC
13223-END(error_exit)
13224+ENDPROC(error_exit)
13225
13226
13227 /* runs on exception stack */
13228@@ -1481,6 +1854,16 @@ ENTRY(nmi)
13229 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
13230 call save_paranoid
13231 DEFAULT_FRAME 0
13232+#ifdef CONFIG_PAX_MEMORY_UDEREF
13233+ testb $3, CS(%rsp)
13234+ jnz 1f
13235+ pax_enter_kernel
13236+ jmp 2f
13237+1: pax_enter_kernel_user
13238+2:
13239+#else
13240+ pax_enter_kernel
13241+#endif
13242 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
13243 movq %rsp,%rdi
13244 movq $-1,%rsi
13245@@ -1491,12 +1874,28 @@ ENTRY(nmi)
13246 DISABLE_INTERRUPTS(CLBR_NONE)
13247 testl %ebx,%ebx /* swapgs needed? */
13248 jnz nmi_restore
13249- testl $3,CS(%rsp)
13250+ testb $3,CS(%rsp)
13251 jnz nmi_userspace
13252+#ifdef CONFIG_PAX_MEMORY_UDEREF
13253+ pax_exit_kernel
13254+ SWAPGS_UNSAFE_STACK
13255+ RESTORE_ALL 8
13256+ pax_force_retaddr
13257+ jmp irq_return
13258+#endif
13259 nmi_swapgs:
13260+#ifdef CONFIG_PAX_MEMORY_UDEREF
13261+ pax_exit_kernel_user
13262+#else
13263+ pax_exit_kernel
13264+#endif
13265 SWAPGS_UNSAFE_STACK
13266+ RESTORE_ALL 8
13267+ jmp irq_return
13268 nmi_restore:
13269+ pax_exit_kernel
13270 RESTORE_ALL 8
13271+ pax_force_retaddr
13272 jmp irq_return
13273 nmi_userspace:
13274 GET_THREAD_INFO(%rcx)
13275@@ -1525,14 +1924,14 @@ nmi_schedule:
13276 jmp paranoid_exit
13277 CFI_ENDPROC
13278 #endif
13279-END(nmi)
13280+ENDPROC(nmi)
13281
13282 ENTRY(ignore_sysret)
13283 CFI_STARTPROC
13284 mov $-ENOSYS,%eax
13285 sysret
13286 CFI_ENDPROC
13287-END(ignore_sysret)
13288+ENDPROC(ignore_sysret)
13289
13290 /*
13291 * End of kprobes section
13292diff -urNp linux-3.1.1/arch/x86/kernel/ftrace.c linux-3.1.1/arch/x86/kernel/ftrace.c
13293--- linux-3.1.1/arch/x86/kernel/ftrace.c 2011-11-11 15:19:27.000000000 -0500
13294+++ linux-3.1.1/arch/x86/kernel/ftrace.c 2011-11-16 18:39:07.000000000 -0500
13295@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
13296 static const void *mod_code_newcode; /* holds the text to write to the IP */
13297
13298 static unsigned nmi_wait_count;
13299-static atomic_t nmi_update_count = ATOMIC_INIT(0);
13300+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
13301
13302 int ftrace_arch_read_dyn_info(char *buf, int size)
13303 {
13304@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
13305
13306 r = snprintf(buf, size, "%u %u",
13307 nmi_wait_count,
13308- atomic_read(&nmi_update_count));
13309+ atomic_read_unchecked(&nmi_update_count));
13310 return r;
13311 }
13312
13313@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
13314
13315 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
13316 smp_rmb();
13317+ pax_open_kernel();
13318 ftrace_mod_code();
13319- atomic_inc(&nmi_update_count);
13320+ pax_close_kernel();
13321+ atomic_inc_unchecked(&nmi_update_count);
13322 }
13323 /* Must have previous changes seen before executions */
13324 smp_mb();
13325@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
13326 {
13327 unsigned char replaced[MCOUNT_INSN_SIZE];
13328
13329+ ip = ktla_ktva(ip);
13330+
13331 /*
13332 * Note: Due to modules and __init, code can
13333 * disappear and change, we need to protect against faulting
13334@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
13335 unsigned char old[MCOUNT_INSN_SIZE], *new;
13336 int ret;
13337
13338- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
13339+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
13340 new = ftrace_call_replace(ip, (unsigned long)func);
13341 ret = ftrace_modify_code(ip, old, new);
13342
13343@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
13344 {
13345 unsigned char code[MCOUNT_INSN_SIZE];
13346
13347+ ip = ktla_ktva(ip);
13348+
13349 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
13350 return -EFAULT;
13351
13352diff -urNp linux-3.1.1/arch/x86/kernel/head32.c linux-3.1.1/arch/x86/kernel/head32.c
13353--- linux-3.1.1/arch/x86/kernel/head32.c 2011-11-11 15:19:27.000000000 -0500
13354+++ linux-3.1.1/arch/x86/kernel/head32.c 2011-11-16 18:39:07.000000000 -0500
13355@@ -19,6 +19,7 @@
13356 #include <asm/io_apic.h>
13357 #include <asm/bios_ebda.h>
13358 #include <asm/tlbflush.h>
13359+#include <asm/boot.h>
13360
13361 static void __init i386_default_early_setup(void)
13362 {
13363@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
13364 {
13365 memblock_init();
13366
13367- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13368+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13369
13370 #ifdef CONFIG_BLK_DEV_INITRD
13371 /* Reserve INITRD */
13372diff -urNp linux-3.1.1/arch/x86/kernel/head_32.S linux-3.1.1/arch/x86/kernel/head_32.S
13373--- linux-3.1.1/arch/x86/kernel/head_32.S 2011-11-11 15:19:27.000000000 -0500
13374+++ linux-3.1.1/arch/x86/kernel/head_32.S 2011-11-16 18:39:07.000000000 -0500
13375@@ -25,6 +25,12 @@
13376 /* Physical address */
13377 #define pa(X) ((X) - __PAGE_OFFSET)
13378
13379+#ifdef CONFIG_PAX_KERNEXEC
13380+#define ta(X) (X)
13381+#else
13382+#define ta(X) ((X) - __PAGE_OFFSET)
13383+#endif
13384+
13385 /*
13386 * References to members of the new_cpu_data structure.
13387 */
13388@@ -54,11 +60,7 @@
13389 * and small than max_low_pfn, otherwise will waste some page table entries
13390 */
13391
13392-#if PTRS_PER_PMD > 1
13393-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
13394-#else
13395-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
13396-#endif
13397+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
13398
13399 /* Number of possible pages in the lowmem region */
13400 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
13401@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
13402 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13403
13404 /*
13405+ * Real beginning of normal "text" segment
13406+ */
13407+ENTRY(stext)
13408+ENTRY(_stext)
13409+
13410+/*
13411 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
13412 * %esi points to the real-mode code as a 32-bit pointer.
13413 * CS and DS must be 4 GB flat segments, but we don't depend on
13414@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13415 * can.
13416 */
13417 __HEAD
13418+
13419+#ifdef CONFIG_PAX_KERNEXEC
13420+ jmp startup_32
13421+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
13422+.fill PAGE_SIZE-5,1,0xcc
13423+#endif
13424+
13425 ENTRY(startup_32)
13426 movl pa(stack_start),%ecx
13427
13428@@ -105,6 +120,57 @@ ENTRY(startup_32)
13429 2:
13430 leal -__PAGE_OFFSET(%ecx),%esp
13431
13432+#ifdef CONFIG_SMP
13433+ movl $pa(cpu_gdt_table),%edi
13434+ movl $__per_cpu_load,%eax
13435+ movw %ax,__KERNEL_PERCPU + 2(%edi)
13436+ rorl $16,%eax
13437+ movb %al,__KERNEL_PERCPU + 4(%edi)
13438+ movb %ah,__KERNEL_PERCPU + 7(%edi)
13439+ movl $__per_cpu_end - 1,%eax
13440+ subl $__per_cpu_start,%eax
13441+ movw %ax,__KERNEL_PERCPU + 0(%edi)
13442+#endif
13443+
13444+#ifdef CONFIG_PAX_MEMORY_UDEREF
13445+ movl $NR_CPUS,%ecx
13446+ movl $pa(cpu_gdt_table),%edi
13447+1:
13448+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
13449+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
13450+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
13451+ addl $PAGE_SIZE_asm,%edi
13452+ loop 1b
13453+#endif
13454+
13455+#ifdef CONFIG_PAX_KERNEXEC
13456+ movl $pa(boot_gdt),%edi
13457+ movl $__LOAD_PHYSICAL_ADDR,%eax
13458+ movw %ax,__BOOT_CS + 2(%edi)
13459+ rorl $16,%eax
13460+ movb %al,__BOOT_CS + 4(%edi)
13461+ movb %ah,__BOOT_CS + 7(%edi)
13462+ rorl $16,%eax
13463+
13464+ ljmp $(__BOOT_CS),$1f
13465+1:
13466+
13467+ movl $NR_CPUS,%ecx
13468+ movl $pa(cpu_gdt_table),%edi
13469+ addl $__PAGE_OFFSET,%eax
13470+1:
13471+ movw %ax,__KERNEL_CS + 2(%edi)
13472+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
13473+ rorl $16,%eax
13474+ movb %al,__KERNEL_CS + 4(%edi)
13475+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
13476+ movb %ah,__KERNEL_CS + 7(%edi)
13477+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
13478+ rorl $16,%eax
13479+ addl $PAGE_SIZE_asm,%edi
13480+ loop 1b
13481+#endif
13482+
13483 /*
13484 * Clear BSS first so that there are no surprises...
13485 */
13486@@ -195,8 +261,11 @@ ENTRY(startup_32)
13487 movl %eax, pa(max_pfn_mapped)
13488
13489 /* Do early initialization of the fixmap area */
13490- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
13491- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
13492+#ifdef CONFIG_COMPAT_VDSO
13493+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
13494+#else
13495+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
13496+#endif
13497 #else /* Not PAE */
13498
13499 page_pde_offset = (__PAGE_OFFSET >> 20);
13500@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
13501 movl %eax, pa(max_pfn_mapped)
13502
13503 /* Do early initialization of the fixmap area */
13504- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
13505- movl %eax,pa(initial_page_table+0xffc)
13506+#ifdef CONFIG_COMPAT_VDSO
13507+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
13508+#else
13509+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
13510+#endif
13511 #endif
13512
13513 #ifdef CONFIG_PARAVIRT
13514@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
13515 cmpl $num_subarch_entries, %eax
13516 jae bad_subarch
13517
13518- movl pa(subarch_entries)(,%eax,4), %eax
13519- subl $__PAGE_OFFSET, %eax
13520- jmp *%eax
13521+ jmp *pa(subarch_entries)(,%eax,4)
13522
13523 bad_subarch:
13524 WEAK(lguest_entry)
13525@@ -255,10 +325,10 @@ WEAK(xen_entry)
13526 __INITDATA
13527
13528 subarch_entries:
13529- .long default_entry /* normal x86/PC */
13530- .long lguest_entry /* lguest hypervisor */
13531- .long xen_entry /* Xen hypervisor */
13532- .long default_entry /* Moorestown MID */
13533+ .long ta(default_entry) /* normal x86/PC */
13534+ .long ta(lguest_entry) /* lguest hypervisor */
13535+ .long ta(xen_entry) /* Xen hypervisor */
13536+ .long ta(default_entry) /* Moorestown MID */
13537 num_subarch_entries = (. - subarch_entries) / 4
13538 .previous
13539 #else
13540@@ -312,6 +382,7 @@ default_entry:
13541 orl %edx,%eax
13542 movl %eax,%cr4
13543
13544+#ifdef CONFIG_X86_PAE
13545 testb $X86_CR4_PAE, %al # check if PAE is enabled
13546 jz 6f
13547
13548@@ -340,6 +411,9 @@ default_entry:
13549 /* Make changes effective */
13550 wrmsr
13551
13552+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
13553+#endif
13554+
13555 6:
13556
13557 /*
13558@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
13559 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
13560 movl %eax,%ss # after changing gdt.
13561
13562- movl $(__USER_DS),%eax # DS/ES contains default USER segment
13563+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
13564 movl %eax,%ds
13565 movl %eax,%es
13566
13567@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
13568 */
13569 cmpb $0,ready
13570 jne 1f
13571- movl $gdt_page,%eax
13572+ movl $cpu_gdt_table,%eax
13573 movl $stack_canary,%ecx
13574+#ifdef CONFIG_SMP
13575+ addl $__per_cpu_load,%ecx
13576+#endif
13577 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
13578 shrl $16, %ecx
13579 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
13580 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
13581 1:
13582-#endif
13583 movl $(__KERNEL_STACK_CANARY),%eax
13584+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
13585+ movl $(__USER_DS),%eax
13586+#else
13587+ xorl %eax,%eax
13588+#endif
13589 movl %eax,%gs
13590
13591 xorl %eax,%eax # Clear LDT
13592@@ -558,22 +639,22 @@ early_page_fault:
13593 jmp early_fault
13594
13595 early_fault:
13596- cld
13597 #ifdef CONFIG_PRINTK
13598+ cmpl $1,%ss:early_recursion_flag
13599+ je hlt_loop
13600+ incl %ss:early_recursion_flag
13601+ cld
13602 pusha
13603 movl $(__KERNEL_DS),%eax
13604 movl %eax,%ds
13605 movl %eax,%es
13606- cmpl $2,early_recursion_flag
13607- je hlt_loop
13608- incl early_recursion_flag
13609 movl %cr2,%eax
13610 pushl %eax
13611 pushl %edx /* trapno */
13612 pushl $fault_msg
13613 call printk
13614+; call dump_stack
13615 #endif
13616- call dump_stack
13617 hlt_loop:
13618 hlt
13619 jmp hlt_loop
13620@@ -581,8 +662,11 @@ hlt_loop:
13621 /* This is the default interrupt "handler" :-) */
13622 ALIGN
13623 ignore_int:
13624- cld
13625 #ifdef CONFIG_PRINTK
13626+ cmpl $2,%ss:early_recursion_flag
13627+ je hlt_loop
13628+ incl %ss:early_recursion_flag
13629+ cld
13630 pushl %eax
13631 pushl %ecx
13632 pushl %edx
13633@@ -591,9 +675,6 @@ ignore_int:
13634 movl $(__KERNEL_DS),%eax
13635 movl %eax,%ds
13636 movl %eax,%es
13637- cmpl $2,early_recursion_flag
13638- je hlt_loop
13639- incl early_recursion_flag
13640 pushl 16(%esp)
13641 pushl 24(%esp)
13642 pushl 32(%esp)
13643@@ -622,29 +703,43 @@ ENTRY(initial_code)
13644 /*
13645 * BSS section
13646 */
13647-__PAGE_ALIGNED_BSS
13648- .align PAGE_SIZE
13649 #ifdef CONFIG_X86_PAE
13650+.section .initial_pg_pmd,"a",@progbits
13651 initial_pg_pmd:
13652 .fill 1024*KPMDS,4,0
13653 #else
13654+.section .initial_page_table,"a",@progbits
13655 ENTRY(initial_page_table)
13656 .fill 1024,4,0
13657 #endif
13658+.section .initial_pg_fixmap,"a",@progbits
13659 initial_pg_fixmap:
13660 .fill 1024,4,0
13661+.section .empty_zero_page,"a",@progbits
13662 ENTRY(empty_zero_page)
13663 .fill 4096,1,0
13664+.section .swapper_pg_dir,"a",@progbits
13665 ENTRY(swapper_pg_dir)
13666+#ifdef CONFIG_X86_PAE
13667+ .fill 4,8,0
13668+#else
13669 .fill 1024,4,0
13670+#endif
13671+
13672+/*
13673+ * The IDT has to be page-aligned to simplify the Pentium
13674+ * F0 0F bug workaround.. We have a special link segment
13675+ * for this.
13676+ */
13677+.section .idt,"a",@progbits
13678+ENTRY(idt_table)
13679+ .fill 256,8,0
13680
13681 /*
13682 * This starts the data section.
13683 */
13684 #ifdef CONFIG_X86_PAE
13685-__PAGE_ALIGNED_DATA
13686- /* Page-aligned for the benefit of paravirt? */
13687- .align PAGE_SIZE
13688+.section .initial_page_table,"a",@progbits
13689 ENTRY(initial_page_table)
13690 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
13691 # if KPMDS == 3
13692@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
13693 # error "Kernel PMDs should be 1, 2 or 3"
13694 # endif
13695 .align PAGE_SIZE /* needs to be page-sized too */
13696+
13697+#ifdef CONFIG_PAX_PER_CPU_PGD
13698+ENTRY(cpu_pgd)
13699+ .rept NR_CPUS
13700+ .fill 4,8,0
13701+ .endr
13702+#endif
13703+
13704 #endif
13705
13706 .data
13707 .balign 4
13708 ENTRY(stack_start)
13709- .long init_thread_union+THREAD_SIZE
13710+ .long init_thread_union+THREAD_SIZE-8
13711+
13712+ready: .byte 0
13713
13714+.section .rodata,"a",@progbits
13715 early_recursion_flag:
13716 .long 0
13717
13718-ready: .byte 0
13719-
13720 int_msg:
13721 .asciz "Unknown interrupt or fault at: %p %p %p\n"
13722
13723@@ -707,7 +811,7 @@ fault_msg:
13724 .word 0 # 32 bit align gdt_desc.address
13725 boot_gdt_descr:
13726 .word __BOOT_DS+7
13727- .long boot_gdt - __PAGE_OFFSET
13728+ .long pa(boot_gdt)
13729
13730 .word 0 # 32-bit align idt_desc.address
13731 idt_descr:
13732@@ -718,7 +822,7 @@ idt_descr:
13733 .word 0 # 32 bit align gdt_desc.address
13734 ENTRY(early_gdt_descr)
13735 .word GDT_ENTRIES*8-1
13736- .long gdt_page /* Overwritten for secondary CPUs */
13737+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
13738
13739 /*
13740 * The boot_gdt must mirror the equivalent in setup.S and is
13741@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
13742 .align L1_CACHE_BYTES
13743 ENTRY(boot_gdt)
13744 .fill GDT_ENTRY_BOOT_CS,8,0
13745- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
13746- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
13747+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
13748+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
13749+
13750+ .align PAGE_SIZE_asm
13751+ENTRY(cpu_gdt_table)
13752+ .rept NR_CPUS
13753+ .quad 0x0000000000000000 /* NULL descriptor */
13754+ .quad 0x0000000000000000 /* 0x0b reserved */
13755+ .quad 0x0000000000000000 /* 0x13 reserved */
13756+ .quad 0x0000000000000000 /* 0x1b reserved */
13757+
13758+#ifdef CONFIG_PAX_KERNEXEC
13759+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
13760+#else
13761+ .quad 0x0000000000000000 /* 0x20 unused */
13762+#endif
13763+
13764+ .quad 0x0000000000000000 /* 0x28 unused */
13765+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
13766+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
13767+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
13768+ .quad 0x0000000000000000 /* 0x4b reserved */
13769+ .quad 0x0000000000000000 /* 0x53 reserved */
13770+ .quad 0x0000000000000000 /* 0x5b reserved */
13771+
13772+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
13773+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
13774+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
13775+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
13776+
13777+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
13778+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
13779+
13780+ /*
13781+ * Segments used for calling PnP BIOS have byte granularity.
13782+ * The code segments and data segments have fixed 64k limits,
13783+ * the transfer segment sizes are set at run time.
13784+ */
13785+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
13786+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
13787+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
13788+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
13789+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
13790+
13791+ /*
13792+ * The APM segments have byte granularity and their bases
13793+ * are set at run time. All have 64k limits.
13794+ */
13795+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
13796+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
13797+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
13798+
13799+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
13800+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
13801+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
13802+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
13803+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
13804+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
13805+
13806+ /* Be sure this is zeroed to avoid false validations in Xen */
13807+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
13808+ .endr
13809diff -urNp linux-3.1.1/arch/x86/kernel/head_64.S linux-3.1.1/arch/x86/kernel/head_64.S
13810--- linux-3.1.1/arch/x86/kernel/head_64.S 2011-11-11 15:19:27.000000000 -0500
13811+++ linux-3.1.1/arch/x86/kernel/head_64.S 2011-11-16 18:39:07.000000000 -0500
13812@@ -19,6 +19,7 @@
13813 #include <asm/cache.h>
13814 #include <asm/processor-flags.h>
13815 #include <asm/percpu.h>
13816+#include <asm/cpufeature.h>
13817
13818 #ifdef CONFIG_PARAVIRT
13819 #include <asm/asm-offsets.h>
13820@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
13821 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
13822 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
13823 L3_START_KERNEL = pud_index(__START_KERNEL_map)
13824+L4_VMALLOC_START = pgd_index(VMALLOC_START)
13825+L3_VMALLOC_START = pud_index(VMALLOC_START)
13826+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
13827+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
13828
13829 .text
13830 __HEAD
13831@@ -85,35 +90,22 @@ startup_64:
13832 */
13833 addq %rbp, init_level4_pgt + 0(%rip)
13834 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
13835+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
13836+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
13837 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
13838
13839 addq %rbp, level3_ident_pgt + 0(%rip)
13840+#ifndef CONFIG_XEN
13841+ addq %rbp, level3_ident_pgt + 8(%rip)
13842+#endif
13843
13844- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
13845- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
13846+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
13847
13848- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
13849+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
13850+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
13851
13852- /* Add an Identity mapping if I am above 1G */
13853- leaq _text(%rip), %rdi
13854- andq $PMD_PAGE_MASK, %rdi
13855-
13856- movq %rdi, %rax
13857- shrq $PUD_SHIFT, %rax
13858- andq $(PTRS_PER_PUD - 1), %rax
13859- jz ident_complete
13860-
13861- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
13862- leaq level3_ident_pgt(%rip), %rbx
13863- movq %rdx, 0(%rbx, %rax, 8)
13864-
13865- movq %rdi, %rax
13866- shrq $PMD_SHIFT, %rax
13867- andq $(PTRS_PER_PMD - 1), %rax
13868- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
13869- leaq level2_spare_pgt(%rip), %rbx
13870- movq %rdx, 0(%rbx, %rax, 8)
13871-ident_complete:
13872+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
13873+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
13874
13875 /*
13876 * Fixup the kernel text+data virtual addresses. Note that
13877@@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
13878 * after the boot processor executes this code.
13879 */
13880
13881- /* Enable PAE mode and PGE */
13882- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
13883+ /* Enable PAE mode and PSE/PGE */
13884+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
13885 movq %rax, %cr4
13886
13887 /* Setup early boot stage 4 level pagetables. */
13888@@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
13889 movl $MSR_EFER, %ecx
13890 rdmsr
13891 btsl $_EFER_SCE, %eax /* Enable System Call */
13892- btl $20,%edi /* No Execute supported? */
13893+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
13894 jnc 1f
13895 btsl $_EFER_NX, %eax
13896+ leaq init_level4_pgt(%rip), %rdi
13897+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
13898+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
13899+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
13900+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
13901 1: wrmsr /* Make changes effective */
13902
13903 /* Setup cr0 */
13904@@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
13905 bad_address:
13906 jmp bad_address
13907
13908- .section ".init.text","ax"
13909+ __INIT
13910 #ifdef CONFIG_EARLY_PRINTK
13911 .globl early_idt_handlers
13912 early_idt_handlers:
13913@@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
13914 #endif /* EARLY_PRINTK */
13915 1: hlt
13916 jmp 1b
13917+ .previous
13918
13919 #ifdef CONFIG_EARLY_PRINTK
13920+ __INITDATA
13921 early_recursion_flag:
13922 .long 0
13923+ .previous
13924
13925+ .section .rodata,"a",@progbits
13926 early_idt_msg:
13927 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
13928 early_idt_ripmsg:
13929 .asciz "RIP %s\n"
13930-#endif /* CONFIG_EARLY_PRINTK */
13931 .previous
13932+#endif /* CONFIG_EARLY_PRINTK */
13933
13934+ .section .rodata,"a",@progbits
13935 #define NEXT_PAGE(name) \
13936 .balign PAGE_SIZE; \
13937 ENTRY(name)
13938@@ -338,7 +340,6 @@ ENTRY(name)
13939 i = i + 1 ; \
13940 .endr
13941
13942- .data
13943 /*
13944 * This default setting generates an ident mapping at address 0x100000
13945 * and a mapping for the kernel that precisely maps virtual address
13946@@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
13947 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13948 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
13949 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13950+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
13951+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
13952+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
13953+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13954 .org init_level4_pgt + L4_START_KERNEL*8, 0
13955 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
13956 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
13957
13958+#ifdef CONFIG_PAX_PER_CPU_PGD
13959+NEXT_PAGE(cpu_pgd)
13960+ .rept NR_CPUS
13961+ .fill 512,8,0
13962+ .endr
13963+#endif
13964+
13965 NEXT_PAGE(level3_ident_pgt)
13966 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13967+#ifdef CONFIG_XEN
13968 .fill 511,8,0
13969+#else
13970+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
13971+ .fill 510,8,0
13972+#endif
13973+
13974+NEXT_PAGE(level3_vmalloc_pgt)
13975+ .fill 512,8,0
13976+
13977+NEXT_PAGE(level3_vmemmap_pgt)
13978+ .fill L3_VMEMMAP_START,8,0
13979+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13980
13981 NEXT_PAGE(level3_kernel_pgt)
13982 .fill L3_START_KERNEL,8,0
13983@@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13984 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13985 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13986
13987+NEXT_PAGE(level2_vmemmap_pgt)
13988+ .fill 512,8,0
13989+
13990 NEXT_PAGE(level2_fixmap_pgt)
13991- .fill 506,8,0
13992- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13993- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13994- .fill 5,8,0
13995+ .fill 507,8,0
13996+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13997+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13998+ .fill 4,8,0
13999
14000-NEXT_PAGE(level1_fixmap_pgt)
14001+NEXT_PAGE(level1_vsyscall_pgt)
14002 .fill 512,8,0
14003
14004-NEXT_PAGE(level2_ident_pgt)
14005- /* Since I easily can, map the first 1G.
14006+ /* Since I easily can, map the first 2G.
14007 * Don't set NX because code runs from these pages.
14008 */
14009- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
14010+NEXT_PAGE(level2_ident_pgt)
14011+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
14012
14013 NEXT_PAGE(level2_kernel_pgt)
14014 /*
14015@@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
14016 * If you want to increase this then increase MODULES_VADDR
14017 * too.)
14018 */
14019- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
14020- KERNEL_IMAGE_SIZE/PMD_SIZE)
14021-
14022-NEXT_PAGE(level2_spare_pgt)
14023- .fill 512, 8, 0
14024+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
14025
14026 #undef PMDS
14027 #undef NEXT_PAGE
14028
14029- .data
14030+ .align PAGE_SIZE
14031+ENTRY(cpu_gdt_table)
14032+ .rept NR_CPUS
14033+ .quad 0x0000000000000000 /* NULL descriptor */
14034+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
14035+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
14036+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
14037+ .quad 0x00cffb000000ffff /* __USER32_CS */
14038+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
14039+ .quad 0x00affb000000ffff /* __USER_CS */
14040+
14041+#ifdef CONFIG_PAX_KERNEXEC
14042+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
14043+#else
14044+ .quad 0x0 /* unused */
14045+#endif
14046+
14047+ .quad 0,0 /* TSS */
14048+ .quad 0,0 /* LDT */
14049+ .quad 0,0,0 /* three TLS descriptors */
14050+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
14051+ /* asm/segment.h:GDT_ENTRIES must match this */
14052+
14053+ /* zero the remaining page */
14054+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
14055+ .endr
14056+
14057 .align 16
14058 .globl early_gdt_descr
14059 early_gdt_descr:
14060 .word GDT_ENTRIES*8-1
14061 early_gdt_descr_base:
14062- .quad INIT_PER_CPU_VAR(gdt_page)
14063+ .quad cpu_gdt_table
14064
14065 ENTRY(phys_base)
14066 /* This must match the first entry in level2_kernel_pgt */
14067 .quad 0x0000000000000000
14068
14069 #include "../../x86/xen/xen-head.S"
14070-
14071- .section .bss, "aw", @nobits
14072+
14073+ .section .rodata,"a",@progbits
14074 .align L1_CACHE_BYTES
14075 ENTRY(idt_table)
14076- .skip IDT_ENTRIES * 16
14077+ .fill 512,8,0
14078
14079 __PAGE_ALIGNED_BSS
14080 .align PAGE_SIZE
14081diff -urNp linux-3.1.1/arch/x86/kernel/i386_ksyms_32.c linux-3.1.1/arch/x86/kernel/i386_ksyms_32.c
14082--- linux-3.1.1/arch/x86/kernel/i386_ksyms_32.c 2011-11-11 15:19:27.000000000 -0500
14083+++ linux-3.1.1/arch/x86/kernel/i386_ksyms_32.c 2011-11-16 18:39:07.000000000 -0500
14084@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
14085 EXPORT_SYMBOL(cmpxchg8b_emu);
14086 #endif
14087
14088+EXPORT_SYMBOL_GPL(cpu_gdt_table);
14089+
14090 /* Networking helper routines. */
14091 EXPORT_SYMBOL(csum_partial_copy_generic);
14092+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
14093+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
14094
14095 EXPORT_SYMBOL(__get_user_1);
14096 EXPORT_SYMBOL(__get_user_2);
14097@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
14098
14099 EXPORT_SYMBOL(csum_partial);
14100 EXPORT_SYMBOL(empty_zero_page);
14101+
14102+#ifdef CONFIG_PAX_KERNEXEC
14103+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
14104+#endif
14105diff -urNp linux-3.1.1/arch/x86/kernel/i8259.c linux-3.1.1/arch/x86/kernel/i8259.c
14106--- linux-3.1.1/arch/x86/kernel/i8259.c 2011-11-11 15:19:27.000000000 -0500
14107+++ linux-3.1.1/arch/x86/kernel/i8259.c 2011-11-16 18:39:07.000000000 -0500
14108@@ -210,7 +210,7 @@ spurious_8259A_irq:
14109 "spurious 8259A interrupt: IRQ%d.\n", irq);
14110 spurious_irq_mask |= irqmask;
14111 }
14112- atomic_inc(&irq_err_count);
14113+ atomic_inc_unchecked(&irq_err_count);
14114 /*
14115 * Theoretically we do not have to handle this IRQ,
14116 * but in Linux this does not cause problems and is
14117diff -urNp linux-3.1.1/arch/x86/kernel/init_task.c linux-3.1.1/arch/x86/kernel/init_task.c
14118--- linux-3.1.1/arch/x86/kernel/init_task.c 2011-11-11 15:19:27.000000000 -0500
14119+++ linux-3.1.1/arch/x86/kernel/init_task.c 2011-11-16 18:39:07.000000000 -0500
14120@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
14121 * way process stacks are handled. This is done by having a special
14122 * "init_task" linker map entry..
14123 */
14124-union thread_union init_thread_union __init_task_data =
14125- { INIT_THREAD_INFO(init_task) };
14126+union thread_union init_thread_union __init_task_data;
14127
14128 /*
14129 * Initial task structure.
14130@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
14131 * section. Since TSS's are completely CPU-local, we want them
14132 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
14133 */
14134-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
14135-
14136+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
14137+EXPORT_SYMBOL(init_tss);
14138diff -urNp linux-3.1.1/arch/x86/kernel/ioport.c linux-3.1.1/arch/x86/kernel/ioport.c
14139--- linux-3.1.1/arch/x86/kernel/ioport.c 2011-11-11 15:19:27.000000000 -0500
14140+++ linux-3.1.1/arch/x86/kernel/ioport.c 2011-11-16 18:40:08.000000000 -0500
14141@@ -6,6 +6,7 @@
14142 #include <linux/sched.h>
14143 #include <linux/kernel.h>
14144 #include <linux/capability.h>
14145+#include <linux/security.h>
14146 #include <linux/errno.h>
14147 #include <linux/types.h>
14148 #include <linux/ioport.h>
14149@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
14150
14151 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
14152 return -EINVAL;
14153+#ifdef CONFIG_GRKERNSEC_IO
14154+ if (turn_on && grsec_disable_privio) {
14155+ gr_handle_ioperm();
14156+ return -EPERM;
14157+ }
14158+#endif
14159 if (turn_on && !capable(CAP_SYS_RAWIO))
14160 return -EPERM;
14161
14162@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
14163 * because the ->io_bitmap_max value must match the bitmap
14164 * contents:
14165 */
14166- tss = &per_cpu(init_tss, get_cpu());
14167+ tss = init_tss + get_cpu();
14168
14169 if (turn_on)
14170 bitmap_clear(t->io_bitmap_ptr, from, num);
14171@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
14172 return -EINVAL;
14173 /* Trying to gain more privileges? */
14174 if (level > old) {
14175+#ifdef CONFIG_GRKERNSEC_IO
14176+ if (grsec_disable_privio) {
14177+ gr_handle_iopl();
14178+ return -EPERM;
14179+ }
14180+#endif
14181 if (!capable(CAP_SYS_RAWIO))
14182 return -EPERM;
14183 }
14184diff -urNp linux-3.1.1/arch/x86/kernel/irq_32.c linux-3.1.1/arch/x86/kernel/irq_32.c
14185--- linux-3.1.1/arch/x86/kernel/irq_32.c 2011-11-11 15:19:27.000000000 -0500
14186+++ linux-3.1.1/arch/x86/kernel/irq_32.c 2011-11-16 18:39:07.000000000 -0500
14187@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
14188 __asm__ __volatile__("andl %%esp,%0" :
14189 "=r" (sp) : "0" (THREAD_SIZE - 1));
14190
14191- return sp < (sizeof(struct thread_info) + STACK_WARN);
14192+ return sp < STACK_WARN;
14193 }
14194
14195 static void print_stack_overflow(void)
14196@@ -54,8 +54,8 @@ static inline void print_stack_overflow(
14197 * per-CPU IRQ handling contexts (thread information and stack)
14198 */
14199 union irq_ctx {
14200- struct thread_info tinfo;
14201- u32 stack[THREAD_SIZE/sizeof(u32)];
14202+ unsigned long previous_esp;
14203+ u32 stack[THREAD_SIZE/sizeof(u32)];
14204 } __attribute__((aligned(THREAD_SIZE)));
14205
14206 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
14207@@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
14208 static inline int
14209 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
14210 {
14211- union irq_ctx *curctx, *irqctx;
14212+ union irq_ctx *irqctx;
14213 u32 *isp, arg1, arg2;
14214
14215- curctx = (union irq_ctx *) current_thread_info();
14216 irqctx = __this_cpu_read(hardirq_ctx);
14217
14218 /*
14219@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
14220 * handler) we can't do that and just have to keep using the
14221 * current stack (which is the irq stack already after all)
14222 */
14223- if (unlikely(curctx == irqctx))
14224+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
14225 return 0;
14226
14227 /* build the stack frame on the IRQ stack */
14228- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14229- irqctx->tinfo.task = curctx->tinfo.task;
14230- irqctx->tinfo.previous_esp = current_stack_pointer;
14231+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14232+ irqctx->previous_esp = current_stack_pointer;
14233
14234- /*
14235- * Copy the softirq bits in preempt_count so that the
14236- * softirq checks work in the hardirq context.
14237- */
14238- irqctx->tinfo.preempt_count =
14239- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
14240- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
14241+#ifdef CONFIG_PAX_MEMORY_UDEREF
14242+ __set_fs(MAKE_MM_SEG(0));
14243+#endif
14244
14245 if (unlikely(overflow))
14246 call_on_stack(print_stack_overflow, isp);
14247@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
14248 : "0" (irq), "1" (desc), "2" (isp),
14249 "D" (desc->handle_irq)
14250 : "memory", "cc", "ecx");
14251+
14252+#ifdef CONFIG_PAX_MEMORY_UDEREF
14253+ __set_fs(current_thread_info()->addr_limit);
14254+#endif
14255+
14256 return 1;
14257 }
14258
14259@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
14260 */
14261 void __cpuinit irq_ctx_init(int cpu)
14262 {
14263- union irq_ctx *irqctx;
14264-
14265 if (per_cpu(hardirq_ctx, cpu))
14266 return;
14267
14268- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
14269- THREAD_FLAGS,
14270- THREAD_ORDER));
14271- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
14272- irqctx->tinfo.cpu = cpu;
14273- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
14274- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14275-
14276- per_cpu(hardirq_ctx, cpu) = irqctx;
14277-
14278- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
14279- THREAD_FLAGS,
14280- THREAD_ORDER));
14281- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
14282- irqctx->tinfo.cpu = cpu;
14283- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14284-
14285- per_cpu(softirq_ctx, cpu) = irqctx;
14286+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
14287+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
14288
14289 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
14290 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
14291@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
14292 asmlinkage void do_softirq(void)
14293 {
14294 unsigned long flags;
14295- struct thread_info *curctx;
14296 union irq_ctx *irqctx;
14297 u32 *isp;
14298
14299@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
14300 local_irq_save(flags);
14301
14302 if (local_softirq_pending()) {
14303- curctx = current_thread_info();
14304 irqctx = __this_cpu_read(softirq_ctx);
14305- irqctx->tinfo.task = curctx->task;
14306- irqctx->tinfo.previous_esp = current_stack_pointer;
14307+ irqctx->previous_esp = current_stack_pointer;
14308
14309 /* build the stack frame on the softirq stack */
14310- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14311+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14312+
14313+#ifdef CONFIG_PAX_MEMORY_UDEREF
14314+ __set_fs(MAKE_MM_SEG(0));
14315+#endif
14316
14317 call_on_stack(__do_softirq, isp);
14318+
14319+#ifdef CONFIG_PAX_MEMORY_UDEREF
14320+ __set_fs(current_thread_info()->addr_limit);
14321+#endif
14322+
14323 /*
14324 * Shouldn't happen, we returned above if in_interrupt():
14325 */
14326diff -urNp linux-3.1.1/arch/x86/kernel/irq.c linux-3.1.1/arch/x86/kernel/irq.c
14327--- linux-3.1.1/arch/x86/kernel/irq.c 2011-11-11 15:19:27.000000000 -0500
14328+++ linux-3.1.1/arch/x86/kernel/irq.c 2011-11-16 18:39:07.000000000 -0500
14329@@ -17,7 +17,7 @@
14330 #include <asm/mce.h>
14331 #include <asm/hw_irq.h>
14332
14333-atomic_t irq_err_count;
14334+atomic_unchecked_t irq_err_count;
14335
14336 /* Function pointer for generic interrupt vector handling */
14337 void (*x86_platform_ipi_callback)(void) = NULL;
14338@@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
14339 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
14340 seq_printf(p, " Machine check polls\n");
14341 #endif
14342- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
14343+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
14344 #if defined(CONFIG_X86_IO_APIC)
14345- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
14346+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
14347 #endif
14348 return 0;
14349 }
14350@@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
14351
14352 u64 arch_irq_stat(void)
14353 {
14354- u64 sum = atomic_read(&irq_err_count);
14355+ u64 sum = atomic_read_unchecked(&irq_err_count);
14356
14357 #ifdef CONFIG_X86_IO_APIC
14358- sum += atomic_read(&irq_mis_count);
14359+ sum += atomic_read_unchecked(&irq_mis_count);
14360 #endif
14361 return sum;
14362 }
14363diff -urNp linux-3.1.1/arch/x86/kernel/kgdb.c linux-3.1.1/arch/x86/kernel/kgdb.c
14364--- linux-3.1.1/arch/x86/kernel/kgdb.c 2011-11-11 15:19:27.000000000 -0500
14365+++ linux-3.1.1/arch/x86/kernel/kgdb.c 2011-11-16 18:39:07.000000000 -0500
14366@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
14367 #ifdef CONFIG_X86_32
14368 switch (regno) {
14369 case GDB_SS:
14370- if (!user_mode_vm(regs))
14371+ if (!user_mode(regs))
14372 *(unsigned long *)mem = __KERNEL_DS;
14373 break;
14374 case GDB_SP:
14375- if (!user_mode_vm(regs))
14376+ if (!user_mode(regs))
14377 *(unsigned long *)mem = kernel_stack_pointer(regs);
14378 break;
14379 case GDB_GS:
14380@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
14381 case 'k':
14382 /* clear the trace bit */
14383 linux_regs->flags &= ~X86_EFLAGS_TF;
14384- atomic_set(&kgdb_cpu_doing_single_step, -1);
14385+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
14386
14387 /* set the trace bit if we're stepping */
14388 if (remcomInBuffer[0] == 's') {
14389 linux_regs->flags |= X86_EFLAGS_TF;
14390- atomic_set(&kgdb_cpu_doing_single_step,
14391+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
14392 raw_smp_processor_id());
14393 }
14394
14395@@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
14396 return NOTIFY_DONE;
14397
14398 case DIE_DEBUG:
14399- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
14400+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
14401 if (user_mode(regs))
14402 return single_step_cont(regs, args);
14403 break;
14404diff -urNp linux-3.1.1/arch/x86/kernel/kprobes.c linux-3.1.1/arch/x86/kernel/kprobes.c
14405--- linux-3.1.1/arch/x86/kernel/kprobes.c 2011-11-11 15:19:27.000000000 -0500
14406+++ linux-3.1.1/arch/x86/kernel/kprobes.c 2011-11-16 18:39:07.000000000 -0500
14407@@ -117,8 +117,11 @@ static void __kprobes __synthesize_relat
14408 } __attribute__((packed)) *insn;
14409
14410 insn = (struct __arch_relative_insn *)from;
14411+
14412+ pax_open_kernel();
14413 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
14414 insn->op = op;
14415+ pax_close_kernel();
14416 }
14417
14418 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
14419@@ -155,7 +158,7 @@ static int __kprobes can_boost(kprobe_op
14420 kprobe_opcode_t opcode;
14421 kprobe_opcode_t *orig_opcodes = opcodes;
14422
14423- if (search_exception_tables((unsigned long)opcodes))
14424+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
14425 return 0; /* Page fault may occur on this address. */
14426
14427 retry:
14428@@ -316,7 +319,9 @@ static int __kprobes __copy_instruction(
14429 }
14430 }
14431 insn_get_length(&insn);
14432+ pax_open_kernel();
14433 memcpy(dest, insn.kaddr, insn.length);
14434+ pax_close_kernel();
14435
14436 #ifdef CONFIG_X86_64
14437 if (insn_rip_relative(&insn)) {
14438@@ -340,7 +345,9 @@ static int __kprobes __copy_instruction(
14439 (u8 *) dest;
14440 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
14441 disp = (u8 *) dest + insn_offset_displacement(&insn);
14442+ pax_open_kernel();
14443 *(s32 *) disp = (s32) newdisp;
14444+ pax_close_kernel();
14445 }
14446 #endif
14447 return insn.length;
14448@@ -354,12 +361,12 @@ static void __kprobes arch_copy_kprobe(s
14449 */
14450 __copy_instruction(p->ainsn.insn, p->addr, 0);
14451
14452- if (can_boost(p->addr))
14453+ if (can_boost(ktla_ktva(p->addr)))
14454 p->ainsn.boostable = 0;
14455 else
14456 p->ainsn.boostable = -1;
14457
14458- p->opcode = *p->addr;
14459+ p->opcode = *(ktla_ktva(p->addr));
14460 }
14461
14462 int __kprobes arch_prepare_kprobe(struct kprobe *p)
14463@@ -476,7 +483,7 @@ static void __kprobes setup_singlestep(s
14464 * nor set current_kprobe, because it doesn't use single
14465 * stepping.
14466 */
14467- regs->ip = (unsigned long)p->ainsn.insn;
14468+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14469 preempt_enable_no_resched();
14470 return;
14471 }
14472@@ -495,7 +502,7 @@ static void __kprobes setup_singlestep(s
14473 if (p->opcode == BREAKPOINT_INSTRUCTION)
14474 regs->ip = (unsigned long)p->addr;
14475 else
14476- regs->ip = (unsigned long)p->ainsn.insn;
14477+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14478 }
14479
14480 /*
14481@@ -574,7 +581,7 @@ static int __kprobes kprobe_handler(stru
14482 setup_singlestep(p, regs, kcb, 0);
14483 return 1;
14484 }
14485- } else if (*addr != BREAKPOINT_INSTRUCTION) {
14486+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
14487 /*
14488 * The breakpoint instruction was removed right
14489 * after we hit it. Another cpu has removed
14490@@ -682,6 +689,9 @@ static void __used __kprobes kretprobe_t
14491 " movq %rax, 152(%rsp)\n"
14492 RESTORE_REGS_STRING
14493 " popfq\n"
14494+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
14495+ " btsq $63,(%rsp)\n"
14496+#endif
14497 #else
14498 " pushf\n"
14499 SAVE_REGS_STRING
14500@@ -819,7 +829,7 @@ static void __kprobes resume_execution(s
14501 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
14502 {
14503 unsigned long *tos = stack_addr(regs);
14504- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
14505+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
14506 unsigned long orig_ip = (unsigned long)p->addr;
14507 kprobe_opcode_t *insn = p->ainsn.insn;
14508
14509@@ -1001,7 +1011,7 @@ int __kprobes kprobe_exceptions_notify(s
14510 struct die_args *args = data;
14511 int ret = NOTIFY_DONE;
14512
14513- if (args->regs && user_mode_vm(args->regs))
14514+ if (args->regs && user_mode(args->regs))
14515 return ret;
14516
14517 switch (val) {
14518@@ -1383,7 +1393,7 @@ int __kprobes arch_prepare_optimized_kpr
14519 * Verify if the address gap is in 2GB range, because this uses
14520 * a relative jump.
14521 */
14522- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
14523+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
14524 if (abs(rel) > 0x7fffffff)
14525 return -ERANGE;
14526
14527@@ -1404,11 +1414,11 @@ int __kprobes arch_prepare_optimized_kpr
14528 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
14529
14530 /* Set probe function call */
14531- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
14532+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
14533
14534 /* Set returning jmp instruction at the tail of out-of-line buffer */
14535 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
14536- (u8 *)op->kp.addr + op->optinsn.size);
14537+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
14538
14539 flush_icache_range((unsigned long) buf,
14540 (unsigned long) buf + TMPL_END_IDX +
14541@@ -1430,7 +1440,7 @@ static void __kprobes setup_optimize_kpr
14542 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
14543
14544 /* Backup instructions which will be replaced by jump address */
14545- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
14546+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
14547 RELATIVE_ADDR_SIZE);
14548
14549 insn_buf[0] = RELATIVEJUMP_OPCODE;
14550diff -urNp linux-3.1.1/arch/x86/kernel/kvm.c linux-3.1.1/arch/x86/kernel/kvm.c
14551--- linux-3.1.1/arch/x86/kernel/kvm.c 2011-11-11 15:19:27.000000000 -0500
14552+++ linux-3.1.1/arch/x86/kernel/kvm.c 2011-11-16 18:39:07.000000000 -0500
14553@@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(vo
14554 pv_mmu_ops.set_pud = kvm_set_pud;
14555 #if PAGETABLE_LEVELS == 4
14556 pv_mmu_ops.set_pgd = kvm_set_pgd;
14557+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
14558 #endif
14559 #endif
14560 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
14561diff -urNp linux-3.1.1/arch/x86/kernel/ldt.c linux-3.1.1/arch/x86/kernel/ldt.c
14562--- linux-3.1.1/arch/x86/kernel/ldt.c 2011-11-11 15:19:27.000000000 -0500
14563+++ linux-3.1.1/arch/x86/kernel/ldt.c 2011-11-16 18:39:07.000000000 -0500
14564@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
14565 if (reload) {
14566 #ifdef CONFIG_SMP
14567 preempt_disable();
14568- load_LDT(pc);
14569+ load_LDT_nolock(pc);
14570 if (!cpumask_equal(mm_cpumask(current->mm),
14571 cpumask_of(smp_processor_id())))
14572 smp_call_function(flush_ldt, current->mm, 1);
14573 preempt_enable();
14574 #else
14575- load_LDT(pc);
14576+ load_LDT_nolock(pc);
14577 #endif
14578 }
14579 if (oldsize) {
14580@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
14581 return err;
14582
14583 for (i = 0; i < old->size; i++)
14584- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
14585+ write_ldt_entry(new->ldt, i, old->ldt + i);
14586 return 0;
14587 }
14588
14589@@ -116,6 +116,24 @@ int init_new_context(struct task_struct
14590 retval = copy_ldt(&mm->context, &old_mm->context);
14591 mutex_unlock(&old_mm->context.lock);
14592 }
14593+
14594+ if (tsk == current) {
14595+ mm->context.vdso = 0;
14596+
14597+#ifdef CONFIG_X86_32
14598+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
14599+ mm->context.user_cs_base = 0UL;
14600+ mm->context.user_cs_limit = ~0UL;
14601+
14602+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
14603+ cpus_clear(mm->context.cpu_user_cs_mask);
14604+#endif
14605+
14606+#endif
14607+#endif
14608+
14609+ }
14610+
14611 return retval;
14612 }
14613
14614@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
14615 }
14616 }
14617
14618+#ifdef CONFIG_PAX_SEGMEXEC
14619+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
14620+ error = -EINVAL;
14621+ goto out_unlock;
14622+ }
14623+#endif
14624+
14625 fill_ldt(&ldt, &ldt_info);
14626 if (oldmode)
14627 ldt.avl = 0;
14628diff -urNp linux-3.1.1/arch/x86/kernel/machine_kexec_32.c linux-3.1.1/arch/x86/kernel/machine_kexec_32.c
14629--- linux-3.1.1/arch/x86/kernel/machine_kexec_32.c 2011-11-11 15:19:27.000000000 -0500
14630+++ linux-3.1.1/arch/x86/kernel/machine_kexec_32.c 2011-11-16 18:39:07.000000000 -0500
14631@@ -27,7 +27,7 @@
14632 #include <asm/cacheflush.h>
14633 #include <asm/debugreg.h>
14634
14635-static void set_idt(void *newidt, __u16 limit)
14636+static void set_idt(struct desc_struct *newidt, __u16 limit)
14637 {
14638 struct desc_ptr curidt;
14639
14640@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
14641 }
14642
14643
14644-static void set_gdt(void *newgdt, __u16 limit)
14645+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
14646 {
14647 struct desc_ptr curgdt;
14648
14649@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
14650 }
14651
14652 control_page = page_address(image->control_code_page);
14653- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
14654+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
14655
14656 relocate_kernel_ptr = control_page;
14657 page_list[PA_CONTROL_PAGE] = __pa(control_page);
14658diff -urNp linux-3.1.1/arch/x86/kernel/microcode_intel.c linux-3.1.1/arch/x86/kernel/microcode_intel.c
14659--- linux-3.1.1/arch/x86/kernel/microcode_intel.c 2011-11-11 15:19:27.000000000 -0500
14660+++ linux-3.1.1/arch/x86/kernel/microcode_intel.c 2011-11-16 18:39:07.000000000 -0500
14661@@ -440,13 +440,13 @@ static enum ucode_state request_microcod
14662
14663 static int get_ucode_user(void *to, const void *from, size_t n)
14664 {
14665- return copy_from_user(to, from, n);
14666+ return copy_from_user(to, (const void __force_user *)from, n);
14667 }
14668
14669 static enum ucode_state
14670 request_microcode_user(int cpu, const void __user *buf, size_t size)
14671 {
14672- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
14673+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
14674 }
14675
14676 static void microcode_fini_cpu(int cpu)
14677diff -urNp linux-3.1.1/arch/x86/kernel/module.c linux-3.1.1/arch/x86/kernel/module.c
14678--- linux-3.1.1/arch/x86/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
14679+++ linux-3.1.1/arch/x86/kernel/module.c 2011-11-16 18:39:07.000000000 -0500
14680@@ -36,15 +36,60 @@
14681 #define DEBUGP(fmt...)
14682 #endif
14683
14684-void *module_alloc(unsigned long size)
14685+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
14686 {
14687 if (PAGE_ALIGN(size) > MODULES_LEN)
14688 return NULL;
14689 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
14690- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
14691+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
14692 -1, __builtin_return_address(0));
14693 }
14694
14695+void *module_alloc(unsigned long size)
14696+{
14697+
14698+#ifdef CONFIG_PAX_KERNEXEC
14699+ return __module_alloc(size, PAGE_KERNEL);
14700+#else
14701+ return __module_alloc(size, PAGE_KERNEL_EXEC);
14702+#endif
14703+
14704+}
14705+
14706+#ifdef CONFIG_PAX_KERNEXEC
14707+#ifdef CONFIG_X86_32
14708+void *module_alloc_exec(unsigned long size)
14709+{
14710+ struct vm_struct *area;
14711+
14712+ if (size == 0)
14713+ return NULL;
14714+
14715+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
14716+ return area ? area->addr : NULL;
14717+}
14718+EXPORT_SYMBOL(module_alloc_exec);
14719+
14720+void module_free_exec(struct module *mod, void *module_region)
14721+{
14722+ vunmap(module_region);
14723+}
14724+EXPORT_SYMBOL(module_free_exec);
14725+#else
14726+void module_free_exec(struct module *mod, void *module_region)
14727+{
14728+ module_free(mod, module_region);
14729+}
14730+EXPORT_SYMBOL(module_free_exec);
14731+
14732+void *module_alloc_exec(unsigned long size)
14733+{
14734+ return __module_alloc(size, PAGE_KERNEL_RX);
14735+}
14736+EXPORT_SYMBOL(module_alloc_exec);
14737+#endif
14738+#endif
14739+
14740 #ifdef CONFIG_X86_32
14741 int apply_relocate(Elf32_Shdr *sechdrs,
14742 const char *strtab,
14743@@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
14744 unsigned int i;
14745 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
14746 Elf32_Sym *sym;
14747- uint32_t *location;
14748+ uint32_t *plocation, location;
14749
14750 DEBUGP("Applying relocate section %u to %u\n", relsec,
14751 sechdrs[relsec].sh_info);
14752 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
14753 /* This is where to make the change */
14754- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
14755- + rel[i].r_offset;
14756+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
14757+ location = (uint32_t)plocation;
14758+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
14759+ plocation = ktla_ktva((void *)plocation);
14760 /* This is the symbol it is referring to. Note that all
14761 undefined symbols have been resolved. */
14762 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
14763@@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
14764 switch (ELF32_R_TYPE(rel[i].r_info)) {
14765 case R_386_32:
14766 /* We add the value into the location given */
14767- *location += sym->st_value;
14768+ pax_open_kernel();
14769+ *plocation += sym->st_value;
14770+ pax_close_kernel();
14771 break;
14772 case R_386_PC32:
14773 /* Add the value, subtract its postition */
14774- *location += sym->st_value - (uint32_t)location;
14775+ pax_open_kernel();
14776+ *plocation += sym->st_value - location;
14777+ pax_close_kernel();
14778 break;
14779 default:
14780 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
14781@@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
14782 case R_X86_64_NONE:
14783 break;
14784 case R_X86_64_64:
14785+ pax_open_kernel();
14786 *(u64 *)loc = val;
14787+ pax_close_kernel();
14788 break;
14789 case R_X86_64_32:
14790+ pax_open_kernel();
14791 *(u32 *)loc = val;
14792+ pax_close_kernel();
14793 if (val != *(u32 *)loc)
14794 goto overflow;
14795 break;
14796 case R_X86_64_32S:
14797+ pax_open_kernel();
14798 *(s32 *)loc = val;
14799+ pax_close_kernel();
14800 if ((s64)val != *(s32 *)loc)
14801 goto overflow;
14802 break;
14803 case R_X86_64_PC32:
14804 val -= (u64)loc;
14805+ pax_open_kernel();
14806 *(u32 *)loc = val;
14807+ pax_close_kernel();
14808+
14809 #if 0
14810 if ((s64)val != *(s32 *)loc)
14811 goto overflow;
14812diff -urNp linux-3.1.1/arch/x86/kernel/paravirt.c linux-3.1.1/arch/x86/kernel/paravirt.c
14813--- linux-3.1.1/arch/x86/kernel/paravirt.c 2011-11-11 15:19:27.000000000 -0500
14814+++ linux-3.1.1/arch/x86/kernel/paravirt.c 2011-11-17 18:29:42.000000000 -0500
14815@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
14816 {
14817 return x;
14818 }
14819+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
14820+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
14821+#endif
14822
14823 void __init default_banner(void)
14824 {
14825@@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
14826 .pv_lock_ops = pv_lock_ops,
14827 #endif
14828 };
14829+
14830+ pax_track_stack();
14831+
14832 return *((void **)&tmpl + type);
14833 }
14834
14835@@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
14836 if (opfunc == NULL)
14837 /* If there's no function, patch it with a ud2a (BUG) */
14838 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
14839- else if (opfunc == _paravirt_nop)
14840+ else if (opfunc == (void *)_paravirt_nop)
14841 /* If the operation is a nop, then nop the callsite */
14842 ret = paravirt_patch_nop();
14843
14844 /* identity functions just return their single argument */
14845- else if (opfunc == _paravirt_ident_32)
14846+ else if (opfunc == (void *)_paravirt_ident_32)
14847 ret = paravirt_patch_ident_32(insnbuf, len);
14848- else if (opfunc == _paravirt_ident_64)
14849+ else if (opfunc == (void *)_paravirt_ident_64)
14850 ret = paravirt_patch_ident_64(insnbuf, len);
14851+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
14852+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
14853+ ret = paravirt_patch_ident_64(insnbuf, len);
14854+#endif
14855
14856 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
14857 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
14858@@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
14859 if (insn_len > len || start == NULL)
14860 insn_len = len;
14861 else
14862- memcpy(insnbuf, start, insn_len);
14863+ memcpy(insnbuf, ktla_ktva(start), insn_len);
14864
14865 return insn_len;
14866 }
14867@@ -302,7 +312,7 @@ void arch_flush_lazy_mmu_mode(void)
14868 preempt_enable();
14869 }
14870
14871-struct pv_info pv_info = {
14872+struct pv_info pv_info __read_only = {
14873 .name = "bare hardware",
14874 .paravirt_enabled = 0,
14875 .kernel_rpl = 0,
14876@@ -313,16 +323,16 @@ struct pv_info pv_info = {
14877 #endif
14878 };
14879
14880-struct pv_init_ops pv_init_ops = {
14881+struct pv_init_ops pv_init_ops __read_only = {
14882 .patch = native_patch,
14883 };
14884
14885-struct pv_time_ops pv_time_ops = {
14886+struct pv_time_ops pv_time_ops __read_only = {
14887 .sched_clock = native_sched_clock,
14888 .steal_clock = native_steal_clock,
14889 };
14890
14891-struct pv_irq_ops pv_irq_ops = {
14892+struct pv_irq_ops pv_irq_ops __read_only = {
14893 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
14894 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
14895 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
14896@@ -334,7 +344,7 @@ struct pv_irq_ops pv_irq_ops = {
14897 #endif
14898 };
14899
14900-struct pv_cpu_ops pv_cpu_ops = {
14901+struct pv_cpu_ops pv_cpu_ops __read_only = {
14902 .cpuid = native_cpuid,
14903 .get_debugreg = native_get_debugreg,
14904 .set_debugreg = native_set_debugreg,
14905@@ -395,21 +405,26 @@ struct pv_cpu_ops pv_cpu_ops = {
14906 .end_context_switch = paravirt_nop,
14907 };
14908
14909-struct pv_apic_ops pv_apic_ops = {
14910+struct pv_apic_ops pv_apic_ops __read_only = {
14911 #ifdef CONFIG_X86_LOCAL_APIC
14912 .startup_ipi_hook = paravirt_nop,
14913 #endif
14914 };
14915
14916-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
14917+#ifdef CONFIG_X86_32
14918+#ifdef CONFIG_X86_PAE
14919+/* 64-bit pagetable entries */
14920+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
14921+#else
14922 /* 32-bit pagetable entries */
14923 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
14924+#endif
14925 #else
14926 /* 64-bit pagetable entries */
14927 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
14928 #endif
14929
14930-struct pv_mmu_ops pv_mmu_ops = {
14931+struct pv_mmu_ops pv_mmu_ops __read_only = {
14932
14933 .read_cr2 = native_read_cr2,
14934 .write_cr2 = native_write_cr2,
14935@@ -459,6 +474,7 @@ struct pv_mmu_ops pv_mmu_ops = {
14936 .make_pud = PTE_IDENT,
14937
14938 .set_pgd = native_set_pgd,
14939+ .set_pgd_batched = native_set_pgd_batched,
14940 #endif
14941 #endif /* PAGETABLE_LEVELS >= 3 */
14942
14943@@ -478,6 +494,12 @@ struct pv_mmu_ops pv_mmu_ops = {
14944 },
14945
14946 .set_fixmap = native_set_fixmap,
14947+
14948+#ifdef CONFIG_PAX_KERNEXEC
14949+ .pax_open_kernel = native_pax_open_kernel,
14950+ .pax_close_kernel = native_pax_close_kernel,
14951+#endif
14952+
14953 };
14954
14955 EXPORT_SYMBOL_GPL(pv_time_ops);
14956diff -urNp linux-3.1.1/arch/x86/kernel/paravirt-spinlocks.c linux-3.1.1/arch/x86/kernel/paravirt-spinlocks.c
14957--- linux-3.1.1/arch/x86/kernel/paravirt-spinlocks.c 2011-11-11 15:19:27.000000000 -0500
14958+++ linux-3.1.1/arch/x86/kernel/paravirt-spinlocks.c 2011-11-16 18:39:07.000000000 -0500
14959@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
14960 arch_spin_lock(lock);
14961 }
14962
14963-struct pv_lock_ops pv_lock_ops = {
14964+struct pv_lock_ops pv_lock_ops __read_only = {
14965 #ifdef CONFIG_SMP
14966 .spin_is_locked = __ticket_spin_is_locked,
14967 .spin_is_contended = __ticket_spin_is_contended,
14968diff -urNp linux-3.1.1/arch/x86/kernel/pci-iommu_table.c linux-3.1.1/arch/x86/kernel/pci-iommu_table.c
14969--- linux-3.1.1/arch/x86/kernel/pci-iommu_table.c 2011-11-11 15:19:27.000000000 -0500
14970+++ linux-3.1.1/arch/x86/kernel/pci-iommu_table.c 2011-11-16 18:40:08.000000000 -0500
14971@@ -2,7 +2,7 @@
14972 #include <asm/iommu_table.h>
14973 #include <linux/string.h>
14974 #include <linux/kallsyms.h>
14975-
14976+#include <linux/sched.h>
14977
14978 #define DEBUG 1
14979
14980@@ -51,6 +51,8 @@ void __init check_iommu_entries(struct i
14981 {
14982 struct iommu_table_entry *p, *q, *x;
14983
14984+ pax_track_stack();
14985+
14986 /* Simple cyclic dependency checker. */
14987 for (p = start; p < finish; p++) {
14988 q = find_dependents_of(start, finish, p);
14989diff -urNp linux-3.1.1/arch/x86/kernel/process_32.c linux-3.1.1/arch/x86/kernel/process_32.c
14990--- linux-3.1.1/arch/x86/kernel/process_32.c 2011-11-11 15:19:27.000000000 -0500
14991+++ linux-3.1.1/arch/x86/kernel/process_32.c 2011-11-16 18:39:07.000000000 -0500
14992@@ -66,6 +66,7 @@ asmlinkage void ret_from_fork(void) __as
14993 unsigned long thread_saved_pc(struct task_struct *tsk)
14994 {
14995 return ((unsigned long *)tsk->thread.sp)[3];
14996+//XXX return tsk->thread.eip;
14997 }
14998
14999 #ifndef CONFIG_SMP
15000@@ -128,15 +129,14 @@ void __show_regs(struct pt_regs *regs, i
15001 unsigned long sp;
15002 unsigned short ss, gs;
15003
15004- if (user_mode_vm(regs)) {
15005+ if (user_mode(regs)) {
15006 sp = regs->sp;
15007 ss = regs->ss & 0xffff;
15008- gs = get_user_gs(regs);
15009 } else {
15010 sp = kernel_stack_pointer(regs);
15011 savesegment(ss, ss);
15012- savesegment(gs, gs);
15013 }
15014+ gs = get_user_gs(regs);
15015
15016 show_regs_common();
15017
15018@@ -198,13 +198,14 @@ int copy_thread(unsigned long clone_flag
15019 struct task_struct *tsk;
15020 int err;
15021
15022- childregs = task_pt_regs(p);
15023+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
15024 *childregs = *regs;
15025 childregs->ax = 0;
15026 childregs->sp = sp;
15027
15028 p->thread.sp = (unsigned long) childregs;
15029 p->thread.sp0 = (unsigned long) (childregs+1);
15030+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15031
15032 p->thread.ip = (unsigned long) ret_from_fork;
15033
15034@@ -294,7 +295,7 @@ __switch_to(struct task_struct *prev_p,
15035 struct thread_struct *prev = &prev_p->thread,
15036 *next = &next_p->thread;
15037 int cpu = smp_processor_id();
15038- struct tss_struct *tss = &per_cpu(init_tss, cpu);
15039+ struct tss_struct *tss = init_tss + cpu;
15040 bool preload_fpu;
15041
15042 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
15043@@ -329,6 +330,10 @@ __switch_to(struct task_struct *prev_p,
15044 */
15045 lazy_save_gs(prev->gs);
15046
15047+#ifdef CONFIG_PAX_MEMORY_UDEREF
15048+ __set_fs(task_thread_info(next_p)->addr_limit);
15049+#endif
15050+
15051 /*
15052 * Load the per-thread Thread-Local Storage descriptor.
15053 */
15054@@ -364,6 +369,9 @@ __switch_to(struct task_struct *prev_p,
15055 */
15056 arch_end_context_switch(next_p);
15057
15058+ percpu_write(current_task, next_p);
15059+ percpu_write(current_tinfo, &next_p->tinfo);
15060+
15061 if (preload_fpu)
15062 __math_state_restore();
15063
15064@@ -373,8 +381,6 @@ __switch_to(struct task_struct *prev_p,
15065 if (prev->gs | next->gs)
15066 lazy_load_gs(next->gs);
15067
15068- percpu_write(current_task, next_p);
15069-
15070 return prev_p;
15071 }
15072
15073@@ -404,4 +410,3 @@ unsigned long get_wchan(struct task_stru
15074 } while (count++ < 16);
15075 return 0;
15076 }
15077-
15078diff -urNp linux-3.1.1/arch/x86/kernel/process_64.c linux-3.1.1/arch/x86/kernel/process_64.c
15079--- linux-3.1.1/arch/x86/kernel/process_64.c 2011-11-11 15:19:27.000000000 -0500
15080+++ linux-3.1.1/arch/x86/kernel/process_64.c 2011-11-16 18:39:07.000000000 -0500
15081@@ -88,7 +88,7 @@ static void __exit_idle(void)
15082 void exit_idle(void)
15083 {
15084 /* idle loop has pid 0 */
15085- if (current->pid)
15086+ if (task_pid_nr(current))
15087 return;
15088 __exit_idle();
15089 }
15090@@ -262,8 +262,7 @@ int copy_thread(unsigned long clone_flag
15091 struct pt_regs *childregs;
15092 struct task_struct *me = current;
15093
15094- childregs = ((struct pt_regs *)
15095- (THREAD_SIZE + task_stack_page(p))) - 1;
15096+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
15097 *childregs = *regs;
15098
15099 childregs->ax = 0;
15100@@ -275,6 +274,7 @@ int copy_thread(unsigned long clone_flag
15101 p->thread.sp = (unsigned long) childregs;
15102 p->thread.sp0 = (unsigned long) (childregs+1);
15103 p->thread.usersp = me->thread.usersp;
15104+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15105
15106 set_tsk_thread_flag(p, TIF_FORK);
15107
15108@@ -377,7 +377,7 @@ __switch_to(struct task_struct *prev_p,
15109 struct thread_struct *prev = &prev_p->thread;
15110 struct thread_struct *next = &next_p->thread;
15111 int cpu = smp_processor_id();
15112- struct tss_struct *tss = &per_cpu(init_tss, cpu);
15113+ struct tss_struct *tss = init_tss + cpu;
15114 unsigned fsindex, gsindex;
15115 bool preload_fpu;
15116
15117@@ -473,10 +473,9 @@ __switch_to(struct task_struct *prev_p,
15118 prev->usersp = percpu_read(old_rsp);
15119 percpu_write(old_rsp, next->usersp);
15120 percpu_write(current_task, next_p);
15121+ percpu_write(current_tinfo, &next_p->tinfo);
15122
15123- percpu_write(kernel_stack,
15124- (unsigned long)task_stack_page(next_p) +
15125- THREAD_SIZE - KERNEL_STACK_OFFSET);
15126+ percpu_write(kernel_stack, next->sp0);
15127
15128 /*
15129 * Now maybe reload the debug registers and handle I/O bitmaps
15130@@ -538,12 +537,11 @@ unsigned long get_wchan(struct task_stru
15131 if (!p || p == current || p->state == TASK_RUNNING)
15132 return 0;
15133 stack = (unsigned long)task_stack_page(p);
15134- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
15135+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
15136 return 0;
15137 fp = *(u64 *)(p->thread.sp);
15138 do {
15139- if (fp < (unsigned long)stack ||
15140- fp >= (unsigned long)stack+THREAD_SIZE)
15141+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
15142 return 0;
15143 ip = *(u64 *)(fp+8);
15144 if (!in_sched_functions(ip))
15145diff -urNp linux-3.1.1/arch/x86/kernel/process.c linux-3.1.1/arch/x86/kernel/process.c
15146--- linux-3.1.1/arch/x86/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
15147+++ linux-3.1.1/arch/x86/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
15148@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
15149
15150 void free_thread_info(struct thread_info *ti)
15151 {
15152- free_thread_xstate(ti->task);
15153 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
15154 }
15155
15156+static struct kmem_cache *task_struct_cachep;
15157+
15158 void arch_task_cache_init(void)
15159 {
15160- task_xstate_cachep =
15161- kmem_cache_create("task_xstate", xstate_size,
15162+ /* create a slab on which task_structs can be allocated */
15163+ task_struct_cachep =
15164+ kmem_cache_create("task_struct", sizeof(struct task_struct),
15165+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
15166+
15167+ task_xstate_cachep =
15168+ kmem_cache_create("task_xstate", xstate_size,
15169 __alignof__(union thread_xstate),
15170- SLAB_PANIC | SLAB_NOTRACK, NULL);
15171+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
15172+}
15173+
15174+struct task_struct *alloc_task_struct_node(int node)
15175+{
15176+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
15177+}
15178+
15179+void free_task_struct(struct task_struct *task)
15180+{
15181+ free_thread_xstate(task);
15182+ kmem_cache_free(task_struct_cachep, task);
15183 }
15184
15185 /*
15186@@ -70,7 +87,7 @@ void exit_thread(void)
15187 unsigned long *bp = t->io_bitmap_ptr;
15188
15189 if (bp) {
15190- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
15191+ struct tss_struct *tss = init_tss + get_cpu();
15192
15193 t->io_bitmap_ptr = NULL;
15194 clear_thread_flag(TIF_IO_BITMAP);
15195@@ -106,7 +123,7 @@ void show_regs_common(void)
15196
15197 printk(KERN_CONT "\n");
15198 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
15199- current->pid, current->comm, print_tainted(),
15200+ task_pid_nr(current), current->comm, print_tainted(),
15201 init_utsname()->release,
15202 (int)strcspn(init_utsname()->version, " "),
15203 init_utsname()->version);
15204@@ -120,6 +137,9 @@ void flush_thread(void)
15205 {
15206 struct task_struct *tsk = current;
15207
15208+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
15209+ loadsegment(gs, 0);
15210+#endif
15211 flush_ptrace_hw_breakpoint(tsk);
15212 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
15213 /*
15214@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
15215 regs.di = (unsigned long) arg;
15216
15217 #ifdef CONFIG_X86_32
15218- regs.ds = __USER_DS;
15219- regs.es = __USER_DS;
15220+ regs.ds = __KERNEL_DS;
15221+ regs.es = __KERNEL_DS;
15222 regs.fs = __KERNEL_PERCPU;
15223- regs.gs = __KERNEL_STACK_CANARY;
15224+ savesegment(gs, regs.gs);
15225 #else
15226 regs.ss = __KERNEL_DS;
15227 #endif
15228@@ -403,7 +423,7 @@ void default_idle(void)
15229 EXPORT_SYMBOL(default_idle);
15230 #endif
15231
15232-void stop_this_cpu(void *dummy)
15233+__noreturn void stop_this_cpu(void *dummy)
15234 {
15235 local_irq_disable();
15236 /*
15237@@ -645,16 +665,37 @@ static int __init idle_setup(char *str)
15238 }
15239 early_param("idle", idle_setup);
15240
15241-unsigned long arch_align_stack(unsigned long sp)
15242+#ifdef CONFIG_PAX_RANDKSTACK
15243+void pax_randomize_kstack(struct pt_regs *regs)
15244 {
15245- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
15246- sp -= get_random_int() % 8192;
15247- return sp & ~0xf;
15248-}
15249+ struct thread_struct *thread = &current->thread;
15250+ unsigned long time;
15251
15252-unsigned long arch_randomize_brk(struct mm_struct *mm)
15253-{
15254- unsigned long range_end = mm->brk + 0x02000000;
15255- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
15256-}
15257+ if (!randomize_va_space)
15258+ return;
15259+
15260+ if (v8086_mode(regs))
15261+ return;
15262
15263+ rdtscl(time);
15264+
15265+ /* P4 seems to return a 0 LSB, ignore it */
15266+#ifdef CONFIG_MPENTIUM4
15267+ time &= 0x3EUL;
15268+ time <<= 2;
15269+#elif defined(CONFIG_X86_64)
15270+ time &= 0xFUL;
15271+ time <<= 4;
15272+#else
15273+ time &= 0x1FUL;
15274+ time <<= 3;
15275+#endif
15276+
15277+ thread->sp0 ^= time;
15278+ load_sp0(init_tss + smp_processor_id(), thread);
15279+
15280+#ifdef CONFIG_X86_64
15281+ percpu_write(kernel_stack, thread->sp0);
15282+#endif
15283+}
15284+#endif
15285diff -urNp linux-3.1.1/arch/x86/kernel/ptrace.c linux-3.1.1/arch/x86/kernel/ptrace.c
15286--- linux-3.1.1/arch/x86/kernel/ptrace.c 2011-11-11 15:19:27.000000000 -0500
15287+++ linux-3.1.1/arch/x86/kernel/ptrace.c 2011-11-16 18:39:07.000000000 -0500
15288@@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *chi
15289 unsigned long addr, unsigned long data)
15290 {
15291 int ret;
15292- unsigned long __user *datap = (unsigned long __user *)data;
15293+ unsigned long __user *datap = (__force unsigned long __user *)data;
15294
15295 switch (request) {
15296 /* read the word at location addr in the USER area. */
15297@@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *chi
15298 if ((int) addr < 0)
15299 return -EIO;
15300 ret = do_get_thread_area(child, addr,
15301- (struct user_desc __user *)data);
15302+ (__force struct user_desc __user *) data);
15303 break;
15304
15305 case PTRACE_SET_THREAD_AREA:
15306 if ((int) addr < 0)
15307 return -EIO;
15308 ret = do_set_thread_area(child, addr,
15309- (struct user_desc __user *)data, 0);
15310+ (__force struct user_desc __user *) data, 0);
15311 break;
15312 #endif
15313
15314@@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct tas
15315 memset(info, 0, sizeof(*info));
15316 info->si_signo = SIGTRAP;
15317 info->si_code = si_code;
15318- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
15319+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
15320 }
15321
15322 void user_single_step_siginfo(struct task_struct *tsk,
15323diff -urNp linux-3.1.1/arch/x86/kernel/pvclock.c linux-3.1.1/arch/x86/kernel/pvclock.c
15324--- linux-3.1.1/arch/x86/kernel/pvclock.c 2011-11-11 15:19:27.000000000 -0500
15325+++ linux-3.1.1/arch/x86/kernel/pvclock.c 2011-11-16 18:39:07.000000000 -0500
15326@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
15327 return pv_tsc_khz;
15328 }
15329
15330-static atomic64_t last_value = ATOMIC64_INIT(0);
15331+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
15332
15333 void pvclock_resume(void)
15334 {
15335- atomic64_set(&last_value, 0);
15336+ atomic64_set_unchecked(&last_value, 0);
15337 }
15338
15339 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
15340@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
15341 * updating at the same time, and one of them could be slightly behind,
15342 * making the assumption that last_value always go forward fail to hold.
15343 */
15344- last = atomic64_read(&last_value);
15345+ last = atomic64_read_unchecked(&last_value);
15346 do {
15347 if (ret < last)
15348 return last;
15349- last = atomic64_cmpxchg(&last_value, last, ret);
15350+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
15351 } while (unlikely(last != ret));
15352
15353 return ret;
15354diff -urNp linux-3.1.1/arch/x86/kernel/reboot.c linux-3.1.1/arch/x86/kernel/reboot.c
15355--- linux-3.1.1/arch/x86/kernel/reboot.c 2011-11-11 15:19:27.000000000 -0500
15356+++ linux-3.1.1/arch/x86/kernel/reboot.c 2011-11-16 18:39:07.000000000 -0500
15357@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
15358 EXPORT_SYMBOL(pm_power_off);
15359
15360 static const struct desc_ptr no_idt = {};
15361-static int reboot_mode;
15362+static unsigned short reboot_mode;
15363 enum reboot_type reboot_type = BOOT_ACPI;
15364 int reboot_force;
15365
15366@@ -315,13 +315,17 @@ core_initcall(reboot_init);
15367 extern const unsigned char machine_real_restart_asm[];
15368 extern const u64 machine_real_restart_gdt[3];
15369
15370-void machine_real_restart(unsigned int type)
15371+__noreturn void machine_real_restart(unsigned int type)
15372 {
15373 void *restart_va;
15374 unsigned long restart_pa;
15375- void (*restart_lowmem)(unsigned int);
15376+ void (* __noreturn restart_lowmem)(unsigned int);
15377 u64 *lowmem_gdt;
15378
15379+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
15380+ struct desc_struct *gdt;
15381+#endif
15382+
15383 local_irq_disable();
15384
15385 /* Write zero to CMOS register number 0x0f, which the BIOS POST
15386@@ -347,14 +351,14 @@ void machine_real_restart(unsigned int t
15387 boot)". This seems like a fairly standard thing that gets set by
15388 REBOOT.COM programs, and the previous reset routine did this
15389 too. */
15390- *((unsigned short *)0x472) = reboot_mode;
15391+ *(unsigned short *)(__va(0x472)) = reboot_mode;
15392
15393 /* Patch the GDT in the low memory trampoline */
15394 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
15395
15396 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
15397 restart_pa = virt_to_phys(restart_va);
15398- restart_lowmem = (void (*)(unsigned int))restart_pa;
15399+ restart_lowmem = (void *)restart_pa;
15400
15401 /* GDT[0]: GDT self-pointer */
15402 lowmem_gdt[0] =
15403@@ -365,7 +369,33 @@ void machine_real_restart(unsigned int t
15404 GDT_ENTRY(0x009b, restart_pa, 0xffff);
15405
15406 /* Jump to the identity-mapped low memory code */
15407+
15408+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
15409+ gdt = get_cpu_gdt_table(smp_processor_id());
15410+ pax_open_kernel();
15411+#ifdef CONFIG_PAX_MEMORY_UDEREF
15412+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
15413+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
15414+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
15415+#endif
15416+#ifdef CONFIG_PAX_KERNEXEC
15417+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
15418+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
15419+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
15420+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
15421+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
15422+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
15423+#endif
15424+ pax_close_kernel();
15425+#endif
15426+
15427+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15428+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
15429+ unreachable();
15430+#else
15431 restart_lowmem(type);
15432+#endif
15433+
15434 }
15435 #ifdef CONFIG_APM_MODULE
15436 EXPORT_SYMBOL(machine_real_restart);
15437@@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_f
15438 * try to force a triple fault and then cycle between hitting the keyboard
15439 * controller and doing that
15440 */
15441-static void native_machine_emergency_restart(void)
15442+__noreturn static void native_machine_emergency_restart(void)
15443 {
15444 int i;
15445 int attempt = 0;
15446@@ -647,13 +677,13 @@ void native_machine_shutdown(void)
15447 #endif
15448 }
15449
15450-static void __machine_emergency_restart(int emergency)
15451+static __noreturn void __machine_emergency_restart(int emergency)
15452 {
15453 reboot_emergency = emergency;
15454 machine_ops.emergency_restart();
15455 }
15456
15457-static void native_machine_restart(char *__unused)
15458+static __noreturn void native_machine_restart(char *__unused)
15459 {
15460 printk("machine restart\n");
15461
15462@@ -662,7 +692,7 @@ static void native_machine_restart(char
15463 __machine_emergency_restart(0);
15464 }
15465
15466-static void native_machine_halt(void)
15467+static __noreturn void native_machine_halt(void)
15468 {
15469 /* stop other cpus and apics */
15470 machine_shutdown();
15471@@ -673,7 +703,7 @@ static void native_machine_halt(void)
15472 stop_this_cpu(NULL);
15473 }
15474
15475-static void native_machine_power_off(void)
15476+__noreturn static void native_machine_power_off(void)
15477 {
15478 if (pm_power_off) {
15479 if (!reboot_force)
15480@@ -682,6 +712,7 @@ static void native_machine_power_off(voi
15481 }
15482 /* a fallback in case there is no PM info available */
15483 tboot_shutdown(TB_SHUTDOWN_HALT);
15484+ unreachable();
15485 }
15486
15487 struct machine_ops machine_ops = {
15488diff -urNp linux-3.1.1/arch/x86/kernel/setup.c linux-3.1.1/arch/x86/kernel/setup.c
15489--- linux-3.1.1/arch/x86/kernel/setup.c 2011-11-11 15:19:27.000000000 -0500
15490+++ linux-3.1.1/arch/x86/kernel/setup.c 2011-11-16 18:39:07.000000000 -0500
15491@@ -447,7 +447,7 @@ static void __init parse_setup_data(void
15492
15493 switch (data->type) {
15494 case SETUP_E820_EXT:
15495- parse_e820_ext(data);
15496+ parse_e820_ext((struct setup_data __force_kernel *)data);
15497 break;
15498 case SETUP_DTB:
15499 add_dtb(pa_data);
15500@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
15501 * area (640->1Mb) as ram even though it is not.
15502 * take them out.
15503 */
15504- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
15505+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
15506 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
15507 }
15508
15509@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
15510
15511 if (!boot_params.hdr.root_flags)
15512 root_mountflags &= ~MS_RDONLY;
15513- init_mm.start_code = (unsigned long) _text;
15514- init_mm.end_code = (unsigned long) _etext;
15515+ init_mm.start_code = ktla_ktva((unsigned long) _text);
15516+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
15517 init_mm.end_data = (unsigned long) _edata;
15518 init_mm.brk = _brk_end;
15519
15520- code_resource.start = virt_to_phys(_text);
15521- code_resource.end = virt_to_phys(_etext)-1;
15522- data_resource.start = virt_to_phys(_etext);
15523+ code_resource.start = virt_to_phys(ktla_ktva(_text));
15524+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
15525+ data_resource.start = virt_to_phys(_sdata);
15526 data_resource.end = virt_to_phys(_edata)-1;
15527 bss_resource.start = virt_to_phys(&__bss_start);
15528 bss_resource.end = virt_to_phys(&__bss_stop)-1;
15529diff -urNp linux-3.1.1/arch/x86/kernel/setup_percpu.c linux-3.1.1/arch/x86/kernel/setup_percpu.c
15530--- linux-3.1.1/arch/x86/kernel/setup_percpu.c 2011-11-11 15:19:27.000000000 -0500
15531+++ linux-3.1.1/arch/x86/kernel/setup_percpu.c 2011-11-16 18:39:07.000000000 -0500
15532@@ -21,19 +21,17 @@
15533 #include <asm/cpu.h>
15534 #include <asm/stackprotector.h>
15535
15536-DEFINE_PER_CPU(int, cpu_number);
15537+#ifdef CONFIG_SMP
15538+DEFINE_PER_CPU(unsigned int, cpu_number);
15539 EXPORT_PER_CPU_SYMBOL(cpu_number);
15540+#endif
15541
15542-#ifdef CONFIG_X86_64
15543 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
15544-#else
15545-#define BOOT_PERCPU_OFFSET 0
15546-#endif
15547
15548 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
15549 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
15550
15551-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
15552+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
15553 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
15554 };
15555 EXPORT_SYMBOL(__per_cpu_offset);
15556@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
15557 {
15558 #ifdef CONFIG_X86_32
15559 struct desc_struct gdt;
15560+ unsigned long base = per_cpu_offset(cpu);
15561
15562- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
15563- 0x2 | DESCTYPE_S, 0x8);
15564- gdt.s = 1;
15565+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
15566+ 0x83 | DESCTYPE_S, 0xC);
15567 write_gdt_entry(get_cpu_gdt_table(cpu),
15568 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
15569 #endif
15570@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
15571 /* alrighty, percpu areas up and running */
15572 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
15573 for_each_possible_cpu(cpu) {
15574+#ifdef CONFIG_CC_STACKPROTECTOR
15575+#ifdef CONFIG_X86_32
15576+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
15577+#endif
15578+#endif
15579 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
15580 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
15581 per_cpu(cpu_number, cpu) = cpu;
15582@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
15583 */
15584 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
15585 #endif
15586+#ifdef CONFIG_CC_STACKPROTECTOR
15587+#ifdef CONFIG_X86_32
15588+ if (!cpu)
15589+ per_cpu(stack_canary.canary, cpu) = canary;
15590+#endif
15591+#endif
15592 /*
15593 * Up to this point, the boot CPU has been using .init.data
15594 * area. Reload any changed state for the boot CPU.
15595diff -urNp linux-3.1.1/arch/x86/kernel/signal.c linux-3.1.1/arch/x86/kernel/signal.c
15596--- linux-3.1.1/arch/x86/kernel/signal.c 2011-11-11 15:19:27.000000000 -0500
15597+++ linux-3.1.1/arch/x86/kernel/signal.c 2011-11-16 19:39:49.000000000 -0500
15598@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
15599 * Align the stack pointer according to the i386 ABI,
15600 * i.e. so that on function entry ((sp + 4) & 15) == 0.
15601 */
15602- sp = ((sp + 4) & -16ul) - 4;
15603+ sp = ((sp - 12) & -16ul) - 4;
15604 #else /* !CONFIG_X86_32 */
15605 sp = round_down(sp, 16) - 8;
15606 #endif
15607@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
15608 * Return an always-bogus address instead so we will die with SIGSEGV.
15609 */
15610 if (onsigstack && !likely(on_sig_stack(sp)))
15611- return (void __user *)-1L;
15612+ return (__force void __user *)-1L;
15613
15614 /* save i387 state */
15615 if (used_math() && save_i387_xstate(*fpstate) < 0)
15616- return (void __user *)-1L;
15617+ return (__force void __user *)-1L;
15618
15619 return (void __user *)sp;
15620 }
15621@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
15622 }
15623
15624 if (current->mm->context.vdso)
15625- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15626+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15627 else
15628- restorer = &frame->retcode;
15629+ restorer = (void __user *)&frame->retcode;
15630 if (ka->sa.sa_flags & SA_RESTORER)
15631 restorer = ka->sa.sa_restorer;
15632
15633@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
15634 * reasons and because gdb uses it as a signature to notice
15635 * signal handler stack frames.
15636 */
15637- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
15638+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
15639
15640 if (err)
15641 return -EFAULT;
15642@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
15643 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
15644
15645 /* Set up to return from userspace. */
15646- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15647+ if (current->mm->context.vdso)
15648+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15649+ else
15650+ restorer = (void __user *)&frame->retcode;
15651 if (ka->sa.sa_flags & SA_RESTORER)
15652 restorer = ka->sa.sa_restorer;
15653 put_user_ex(restorer, &frame->pretcode);
15654@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
15655 * reasons and because gdb uses it as a signature to notice
15656 * signal handler stack frames.
15657 */
15658- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
15659+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
15660 } put_user_catch(err);
15661
15662 if (err)
15663@@ -762,6 +765,8 @@ static void do_signal(struct pt_regs *re
15664 siginfo_t info;
15665 int signr;
15666
15667+ pax_track_stack();
15668+
15669 /*
15670 * We want the common case to go fast, which is why we may in certain
15671 * cases get here from kernel mode. Just return without doing anything
15672@@ -769,7 +774,7 @@ static void do_signal(struct pt_regs *re
15673 * X86_32: vm86 regs switched out by assembly code before reaching
15674 * here, so testing against kernel CS suffices.
15675 */
15676- if (!user_mode(regs))
15677+ if (!user_mode_novm(regs))
15678 return;
15679
15680 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
15681diff -urNp linux-3.1.1/arch/x86/kernel/smpboot.c linux-3.1.1/arch/x86/kernel/smpboot.c
15682--- linux-3.1.1/arch/x86/kernel/smpboot.c 2011-11-11 15:19:27.000000000 -0500
15683+++ linux-3.1.1/arch/x86/kernel/smpboot.c 2011-11-16 18:39:07.000000000 -0500
15684@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
15685 set_idle_for_cpu(cpu, c_idle.idle);
15686 do_rest:
15687 per_cpu(current_task, cpu) = c_idle.idle;
15688+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
15689 #ifdef CONFIG_X86_32
15690 /* Stack for startup_32 can be just as for start_secondary onwards */
15691 irq_ctx_init(cpu);
15692 #else
15693 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
15694 initial_gs = per_cpu_offset(cpu);
15695- per_cpu(kernel_stack, cpu) =
15696- (unsigned long)task_stack_page(c_idle.idle) -
15697- KERNEL_STACK_OFFSET + THREAD_SIZE;
15698+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
15699 #endif
15700+
15701+ pax_open_kernel();
15702 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
15703+ pax_close_kernel();
15704+
15705 initial_code = (unsigned long)start_secondary;
15706 stack_start = c_idle.idle->thread.sp;
15707
15708@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
15709
15710 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
15711
15712+#ifdef CONFIG_PAX_PER_CPU_PGD
15713+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
15714+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15715+ KERNEL_PGD_PTRS);
15716+#endif
15717+
15718 err = do_boot_cpu(apicid, cpu);
15719 if (err) {
15720 pr_debug("do_boot_cpu failed %d\n", err);
15721diff -urNp linux-3.1.1/arch/x86/kernel/step.c linux-3.1.1/arch/x86/kernel/step.c
15722--- linux-3.1.1/arch/x86/kernel/step.c 2011-11-11 15:19:27.000000000 -0500
15723+++ linux-3.1.1/arch/x86/kernel/step.c 2011-11-16 18:39:07.000000000 -0500
15724@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
15725 struct desc_struct *desc;
15726 unsigned long base;
15727
15728- seg &= ~7UL;
15729+ seg >>= 3;
15730
15731 mutex_lock(&child->mm->context.lock);
15732- if (unlikely((seg >> 3) >= child->mm->context.size))
15733+ if (unlikely(seg >= child->mm->context.size))
15734 addr = -1L; /* bogus selector, access would fault */
15735 else {
15736 desc = child->mm->context.ldt + seg;
15737@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
15738 addr += base;
15739 }
15740 mutex_unlock(&child->mm->context.lock);
15741- }
15742+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
15743+ addr = ktla_ktva(addr);
15744
15745 return addr;
15746 }
15747@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
15748 unsigned char opcode[15];
15749 unsigned long addr = convert_ip_to_linear(child, regs);
15750
15751+ if (addr == -EINVAL)
15752+ return 0;
15753+
15754 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
15755 for (i = 0; i < copied; i++) {
15756 switch (opcode[i]) {
15757diff -urNp linux-3.1.1/arch/x86/kernel/syscall_table_32.S linux-3.1.1/arch/x86/kernel/syscall_table_32.S
15758--- linux-3.1.1/arch/x86/kernel/syscall_table_32.S 2011-11-11 15:19:27.000000000 -0500
15759+++ linux-3.1.1/arch/x86/kernel/syscall_table_32.S 2011-11-16 18:39:07.000000000 -0500
15760@@ -1,3 +1,4 @@
15761+.section .rodata,"a",@progbits
15762 ENTRY(sys_call_table)
15763 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
15764 .long sys_exit
15765diff -urNp linux-3.1.1/arch/x86/kernel/sys_i386_32.c linux-3.1.1/arch/x86/kernel/sys_i386_32.c
15766--- linux-3.1.1/arch/x86/kernel/sys_i386_32.c 2011-11-11 15:19:27.000000000 -0500
15767+++ linux-3.1.1/arch/x86/kernel/sys_i386_32.c 2011-11-16 18:39:07.000000000 -0500
15768@@ -24,17 +24,224 @@
15769
15770 #include <asm/syscalls.h>
15771
15772-/*
15773- * Do a system call from kernel instead of calling sys_execve so we
15774- * end up with proper pt_regs.
15775- */
15776-int kernel_execve(const char *filename,
15777- const char *const argv[],
15778- const char *const envp[])
15779+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
15780 {
15781- long __res;
15782- asm volatile ("int $0x80"
15783- : "=a" (__res)
15784- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
15785- return __res;
15786+ unsigned long pax_task_size = TASK_SIZE;
15787+
15788+#ifdef CONFIG_PAX_SEGMEXEC
15789+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
15790+ pax_task_size = SEGMEXEC_TASK_SIZE;
15791+#endif
15792+
15793+ if (len > pax_task_size || addr > pax_task_size - len)
15794+ return -EINVAL;
15795+
15796+ return 0;
15797+}
15798+
15799+unsigned long
15800+arch_get_unmapped_area(struct file *filp, unsigned long addr,
15801+ unsigned long len, unsigned long pgoff, unsigned long flags)
15802+{
15803+ struct mm_struct *mm = current->mm;
15804+ struct vm_area_struct *vma;
15805+ unsigned long start_addr, pax_task_size = TASK_SIZE;
15806+
15807+#ifdef CONFIG_PAX_SEGMEXEC
15808+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15809+ pax_task_size = SEGMEXEC_TASK_SIZE;
15810+#endif
15811+
15812+ pax_task_size -= PAGE_SIZE;
15813+
15814+ if (len > pax_task_size)
15815+ return -ENOMEM;
15816+
15817+ if (flags & MAP_FIXED)
15818+ return addr;
15819+
15820+#ifdef CONFIG_PAX_RANDMMAP
15821+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15822+#endif
15823+
15824+ if (addr) {
15825+ addr = PAGE_ALIGN(addr);
15826+ if (pax_task_size - len >= addr) {
15827+ vma = find_vma(mm, addr);
15828+ if (check_heap_stack_gap(vma, addr, len))
15829+ return addr;
15830+ }
15831+ }
15832+ if (len > mm->cached_hole_size) {
15833+ start_addr = addr = mm->free_area_cache;
15834+ } else {
15835+ start_addr = addr = mm->mmap_base;
15836+ mm->cached_hole_size = 0;
15837+ }
15838+
15839+#ifdef CONFIG_PAX_PAGEEXEC
15840+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
15841+ start_addr = 0x00110000UL;
15842+
15843+#ifdef CONFIG_PAX_RANDMMAP
15844+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15845+ start_addr += mm->delta_mmap & 0x03FFF000UL;
15846+#endif
15847+
15848+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
15849+ start_addr = addr = mm->mmap_base;
15850+ else
15851+ addr = start_addr;
15852+ }
15853+#endif
15854+
15855+full_search:
15856+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
15857+ /* At this point: (!vma || addr < vma->vm_end). */
15858+ if (pax_task_size - len < addr) {
15859+ /*
15860+ * Start a new search - just in case we missed
15861+ * some holes.
15862+ */
15863+ if (start_addr != mm->mmap_base) {
15864+ start_addr = addr = mm->mmap_base;
15865+ mm->cached_hole_size = 0;
15866+ goto full_search;
15867+ }
15868+ return -ENOMEM;
15869+ }
15870+ if (check_heap_stack_gap(vma, addr, len))
15871+ break;
15872+ if (addr + mm->cached_hole_size < vma->vm_start)
15873+ mm->cached_hole_size = vma->vm_start - addr;
15874+ addr = vma->vm_end;
15875+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
15876+ start_addr = addr = mm->mmap_base;
15877+ mm->cached_hole_size = 0;
15878+ goto full_search;
15879+ }
15880+ }
15881+
15882+ /*
15883+ * Remember the place where we stopped the search:
15884+ */
15885+ mm->free_area_cache = addr + len;
15886+ return addr;
15887+}
15888+
15889+unsigned long
15890+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
15891+ const unsigned long len, const unsigned long pgoff,
15892+ const unsigned long flags)
15893+{
15894+ struct vm_area_struct *vma;
15895+ struct mm_struct *mm = current->mm;
15896+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
15897+
15898+#ifdef CONFIG_PAX_SEGMEXEC
15899+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15900+ pax_task_size = SEGMEXEC_TASK_SIZE;
15901+#endif
15902+
15903+ pax_task_size -= PAGE_SIZE;
15904+
15905+ /* requested length too big for entire address space */
15906+ if (len > pax_task_size)
15907+ return -ENOMEM;
15908+
15909+ if (flags & MAP_FIXED)
15910+ return addr;
15911+
15912+#ifdef CONFIG_PAX_PAGEEXEC
15913+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
15914+ goto bottomup;
15915+#endif
15916+
15917+#ifdef CONFIG_PAX_RANDMMAP
15918+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15919+#endif
15920+
15921+ /* requesting a specific address */
15922+ if (addr) {
15923+ addr = PAGE_ALIGN(addr);
15924+ if (pax_task_size - len >= addr) {
15925+ vma = find_vma(mm, addr);
15926+ if (check_heap_stack_gap(vma, addr, len))
15927+ return addr;
15928+ }
15929+ }
15930+
15931+ /* check if free_area_cache is useful for us */
15932+ if (len <= mm->cached_hole_size) {
15933+ mm->cached_hole_size = 0;
15934+ mm->free_area_cache = mm->mmap_base;
15935+ }
15936+
15937+ /* either no address requested or can't fit in requested address hole */
15938+ addr = mm->free_area_cache;
15939+
15940+ /* make sure it can fit in the remaining address space */
15941+ if (addr > len) {
15942+ vma = find_vma(mm, addr-len);
15943+ if (check_heap_stack_gap(vma, addr - len, len))
15944+ /* remember the address as a hint for next time */
15945+ return (mm->free_area_cache = addr-len);
15946+ }
15947+
15948+ if (mm->mmap_base < len)
15949+ goto bottomup;
15950+
15951+ addr = mm->mmap_base-len;
15952+
15953+ do {
15954+ /*
15955+ * Lookup failure means no vma is above this address,
15956+ * else if new region fits below vma->vm_start,
15957+ * return with success:
15958+ */
15959+ vma = find_vma(mm, addr);
15960+ if (check_heap_stack_gap(vma, addr, len))
15961+ /* remember the address as a hint for next time */
15962+ return (mm->free_area_cache = addr);
15963+
15964+ /* remember the largest hole we saw so far */
15965+ if (addr + mm->cached_hole_size < vma->vm_start)
15966+ mm->cached_hole_size = vma->vm_start - addr;
15967+
15968+ /* try just below the current vma->vm_start */
15969+ addr = skip_heap_stack_gap(vma, len);
15970+ } while (!IS_ERR_VALUE(addr));
15971+
15972+bottomup:
15973+ /*
15974+ * A failed mmap() very likely causes application failure,
15975+ * so fall back to the bottom-up function here. This scenario
15976+ * can happen with large stack limits and large mmap()
15977+ * allocations.
15978+ */
15979+
15980+#ifdef CONFIG_PAX_SEGMEXEC
15981+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15982+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15983+ else
15984+#endif
15985+
15986+ mm->mmap_base = TASK_UNMAPPED_BASE;
15987+
15988+#ifdef CONFIG_PAX_RANDMMAP
15989+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15990+ mm->mmap_base += mm->delta_mmap;
15991+#endif
15992+
15993+ mm->free_area_cache = mm->mmap_base;
15994+ mm->cached_hole_size = ~0UL;
15995+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15996+ /*
15997+ * Restore the topdown base:
15998+ */
15999+ mm->mmap_base = base;
16000+ mm->free_area_cache = base;
16001+ mm->cached_hole_size = ~0UL;
16002+
16003+ return addr;
16004 }
16005diff -urNp linux-3.1.1/arch/x86/kernel/sys_x86_64.c linux-3.1.1/arch/x86/kernel/sys_x86_64.c
16006--- linux-3.1.1/arch/x86/kernel/sys_x86_64.c 2011-11-11 15:19:27.000000000 -0500
16007+++ linux-3.1.1/arch/x86/kernel/sys_x86_64.c 2011-11-16 18:39:07.000000000 -0500
16008@@ -32,8 +32,8 @@ out:
16009 return error;
16010 }
16011
16012-static void find_start_end(unsigned long flags, unsigned long *begin,
16013- unsigned long *end)
16014+static void find_start_end(struct mm_struct *mm, unsigned long flags,
16015+ unsigned long *begin, unsigned long *end)
16016 {
16017 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
16018 unsigned long new_begin;
16019@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
16020 *begin = new_begin;
16021 }
16022 } else {
16023- *begin = TASK_UNMAPPED_BASE;
16024+ *begin = mm->mmap_base;
16025 *end = TASK_SIZE;
16026 }
16027 }
16028@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
16029 if (flags & MAP_FIXED)
16030 return addr;
16031
16032- find_start_end(flags, &begin, &end);
16033+ find_start_end(mm, flags, &begin, &end);
16034
16035 if (len > end)
16036 return -ENOMEM;
16037
16038+#ifdef CONFIG_PAX_RANDMMAP
16039+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16040+#endif
16041+
16042 if (addr) {
16043 addr = PAGE_ALIGN(addr);
16044 vma = find_vma(mm, addr);
16045- if (end - len >= addr &&
16046- (!vma || addr + len <= vma->vm_start))
16047+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
16048 return addr;
16049 }
16050 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
16051@@ -106,7 +109,7 @@ full_search:
16052 }
16053 return -ENOMEM;
16054 }
16055- if (!vma || addr + len <= vma->vm_start) {
16056+ if (check_heap_stack_gap(vma, addr, len)) {
16057 /*
16058 * Remember the place where we stopped the search:
16059 */
16060@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
16061 {
16062 struct vm_area_struct *vma;
16063 struct mm_struct *mm = current->mm;
16064- unsigned long addr = addr0;
16065+ unsigned long base = mm->mmap_base, addr = addr0;
16066
16067 /* requested length too big for entire address space */
16068 if (len > TASK_SIZE)
16069@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
16070 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
16071 goto bottomup;
16072
16073+#ifdef CONFIG_PAX_RANDMMAP
16074+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16075+#endif
16076+
16077 /* requesting a specific address */
16078 if (addr) {
16079 addr = PAGE_ALIGN(addr);
16080- vma = find_vma(mm, addr);
16081- if (TASK_SIZE - len >= addr &&
16082- (!vma || addr + len <= vma->vm_start))
16083- return addr;
16084+ if (TASK_SIZE - len >= addr) {
16085+ vma = find_vma(mm, addr);
16086+ if (check_heap_stack_gap(vma, addr, len))
16087+ return addr;
16088+ }
16089 }
16090
16091 /* check if free_area_cache is useful for us */
16092@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
16093 /* make sure it can fit in the remaining address space */
16094 if (addr > len) {
16095 vma = find_vma(mm, addr-len);
16096- if (!vma || addr <= vma->vm_start)
16097+ if (check_heap_stack_gap(vma, addr - len, len))
16098 /* remember the address as a hint for next time */
16099 return mm->free_area_cache = addr-len;
16100 }
16101@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
16102 * return with success:
16103 */
16104 vma = find_vma(mm, addr);
16105- if (!vma || addr+len <= vma->vm_start)
16106+ if (check_heap_stack_gap(vma, addr, len))
16107 /* remember the address as a hint for next time */
16108 return mm->free_area_cache = addr;
16109
16110@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
16111 mm->cached_hole_size = vma->vm_start - addr;
16112
16113 /* try just below the current vma->vm_start */
16114- addr = vma->vm_start-len;
16115- } while (len < vma->vm_start);
16116+ addr = skip_heap_stack_gap(vma, len);
16117+ } while (!IS_ERR_VALUE(addr));
16118
16119 bottomup:
16120 /*
16121@@ -198,13 +206,21 @@ bottomup:
16122 * can happen with large stack limits and large mmap()
16123 * allocations.
16124 */
16125+ mm->mmap_base = TASK_UNMAPPED_BASE;
16126+
16127+#ifdef CONFIG_PAX_RANDMMAP
16128+ if (mm->pax_flags & MF_PAX_RANDMMAP)
16129+ mm->mmap_base += mm->delta_mmap;
16130+#endif
16131+
16132+ mm->free_area_cache = mm->mmap_base;
16133 mm->cached_hole_size = ~0UL;
16134- mm->free_area_cache = TASK_UNMAPPED_BASE;
16135 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16136 /*
16137 * Restore the topdown base:
16138 */
16139- mm->free_area_cache = mm->mmap_base;
16140+ mm->mmap_base = base;
16141+ mm->free_area_cache = base;
16142 mm->cached_hole_size = ~0UL;
16143
16144 return addr;
16145diff -urNp linux-3.1.1/arch/x86/kernel/tboot.c linux-3.1.1/arch/x86/kernel/tboot.c
16146--- linux-3.1.1/arch/x86/kernel/tboot.c 2011-11-11 15:19:27.000000000 -0500
16147+++ linux-3.1.1/arch/x86/kernel/tboot.c 2011-11-16 18:39:07.000000000 -0500
16148@@ -218,7 +218,7 @@ static int tboot_setup_sleep(void)
16149
16150 void tboot_shutdown(u32 shutdown_type)
16151 {
16152- void (*shutdown)(void);
16153+ void (* __noreturn shutdown)(void);
16154
16155 if (!tboot_enabled())
16156 return;
16157@@ -240,7 +240,7 @@ void tboot_shutdown(u32 shutdown_type)
16158
16159 switch_to_tboot_pt();
16160
16161- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
16162+ shutdown = (void *)tboot->shutdown_entry;
16163 shutdown();
16164
16165 /* should not reach here */
16166@@ -297,7 +297,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
16167 tboot_shutdown(acpi_shutdown_map[sleep_state]);
16168 }
16169
16170-static atomic_t ap_wfs_count;
16171+static atomic_unchecked_t ap_wfs_count;
16172
16173 static int tboot_wait_for_aps(int num_aps)
16174 {
16175@@ -321,9 +321,9 @@ static int __cpuinit tboot_cpu_callback(
16176 {
16177 switch (action) {
16178 case CPU_DYING:
16179- atomic_inc(&ap_wfs_count);
16180+ atomic_inc_unchecked(&ap_wfs_count);
16181 if (num_online_cpus() == 1)
16182- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
16183+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
16184 return NOTIFY_BAD;
16185 break;
16186 }
16187@@ -342,7 +342,7 @@ static __init int tboot_late_init(void)
16188
16189 tboot_create_trampoline();
16190
16191- atomic_set(&ap_wfs_count, 0);
16192+ atomic_set_unchecked(&ap_wfs_count, 0);
16193 register_hotcpu_notifier(&tboot_cpu_notifier);
16194 return 0;
16195 }
16196diff -urNp linux-3.1.1/arch/x86/kernel/time.c linux-3.1.1/arch/x86/kernel/time.c
16197--- linux-3.1.1/arch/x86/kernel/time.c 2011-11-11 15:19:27.000000000 -0500
16198+++ linux-3.1.1/arch/x86/kernel/time.c 2011-11-16 18:39:07.000000000 -0500
16199@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs
16200 {
16201 unsigned long pc = instruction_pointer(regs);
16202
16203- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
16204+ if (!user_mode(regs) && in_lock_functions(pc)) {
16205 #ifdef CONFIG_FRAME_POINTER
16206- return *(unsigned long *)(regs->bp + sizeof(long));
16207+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
16208 #else
16209 unsigned long *sp =
16210 (unsigned long *)kernel_stack_pointer(regs);
16211@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
16212 * or above a saved flags. Eflags has bits 22-31 zero,
16213 * kernel addresses don't.
16214 */
16215+
16216+#ifdef CONFIG_PAX_KERNEXEC
16217+ return ktla_ktva(sp[0]);
16218+#else
16219 if (sp[0] >> 22)
16220 return sp[0];
16221 if (sp[1] >> 22)
16222 return sp[1];
16223 #endif
16224+
16225+#endif
16226 }
16227 return pc;
16228 }
16229diff -urNp linux-3.1.1/arch/x86/kernel/tls.c linux-3.1.1/arch/x86/kernel/tls.c
16230--- linux-3.1.1/arch/x86/kernel/tls.c 2011-11-11 15:19:27.000000000 -0500
16231+++ linux-3.1.1/arch/x86/kernel/tls.c 2011-11-16 18:39:07.000000000 -0500
16232@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
16233 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
16234 return -EINVAL;
16235
16236+#ifdef CONFIG_PAX_SEGMEXEC
16237+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
16238+ return -EINVAL;
16239+#endif
16240+
16241 set_tls_desc(p, idx, &info, 1);
16242
16243 return 0;
16244diff -urNp linux-3.1.1/arch/x86/kernel/trampoline_32.S linux-3.1.1/arch/x86/kernel/trampoline_32.S
16245--- linux-3.1.1/arch/x86/kernel/trampoline_32.S 2011-11-11 15:19:27.000000000 -0500
16246+++ linux-3.1.1/arch/x86/kernel/trampoline_32.S 2011-11-16 18:39:07.000000000 -0500
16247@@ -32,6 +32,12 @@
16248 #include <asm/segment.h>
16249 #include <asm/page_types.h>
16250
16251+#ifdef CONFIG_PAX_KERNEXEC
16252+#define ta(X) (X)
16253+#else
16254+#define ta(X) ((X) - __PAGE_OFFSET)
16255+#endif
16256+
16257 #ifdef CONFIG_SMP
16258
16259 .section ".x86_trampoline","a"
16260@@ -62,7 +68,7 @@ r_base = .
16261 inc %ax # protected mode (PE) bit
16262 lmsw %ax # into protected mode
16263 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
16264- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
16265+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
16266
16267 # These need to be in the same 64K segment as the above;
16268 # hence we don't use the boot_gdt_descr defined in head.S
16269diff -urNp linux-3.1.1/arch/x86/kernel/trampoline_64.S linux-3.1.1/arch/x86/kernel/trampoline_64.S
16270--- linux-3.1.1/arch/x86/kernel/trampoline_64.S 2011-11-11 15:19:27.000000000 -0500
16271+++ linux-3.1.1/arch/x86/kernel/trampoline_64.S 2011-11-16 18:39:07.000000000 -0500
16272@@ -90,7 +90,7 @@ startup_32:
16273 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
16274 movl %eax, %ds
16275
16276- movl $X86_CR4_PAE, %eax
16277+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
16278 movl %eax, %cr4 # Enable PAE mode
16279
16280 # Setup trampoline 4 level pagetables
16281@@ -138,7 +138,7 @@ tidt:
16282 # so the kernel can live anywhere
16283 .balign 4
16284 tgdt:
16285- .short tgdt_end - tgdt # gdt limit
16286+ .short tgdt_end - tgdt - 1 # gdt limit
16287 .long tgdt - r_base
16288 .short 0
16289 .quad 0x00cf9b000000ffff # __KERNEL32_CS
16290diff -urNp linux-3.1.1/arch/x86/kernel/traps.c linux-3.1.1/arch/x86/kernel/traps.c
16291--- linux-3.1.1/arch/x86/kernel/traps.c 2011-11-11 15:19:27.000000000 -0500
16292+++ linux-3.1.1/arch/x86/kernel/traps.c 2011-11-16 18:39:07.000000000 -0500
16293@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
16294
16295 /* Do we ignore FPU interrupts ? */
16296 char ignore_fpu_irq;
16297-
16298-/*
16299- * The IDT has to be page-aligned to simplify the Pentium
16300- * F0 0F bug workaround.
16301- */
16302-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
16303 #endif
16304
16305 DECLARE_BITMAP(used_vectors, NR_VECTORS);
16306@@ -117,13 +111,13 @@ static inline void preempt_conditional_c
16307 }
16308
16309 static void __kprobes
16310-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
16311+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
16312 long error_code, siginfo_t *info)
16313 {
16314 struct task_struct *tsk = current;
16315
16316 #ifdef CONFIG_X86_32
16317- if (regs->flags & X86_VM_MASK) {
16318+ if (v8086_mode(regs)) {
16319 /*
16320 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
16321 * On nmi (interrupt 2), do_trap should not be called.
16322@@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
16323 }
16324 #endif
16325
16326- if (!user_mode(regs))
16327+ if (!user_mode_novm(regs))
16328 goto kernel_trap;
16329
16330 #ifdef CONFIG_X86_32
16331@@ -157,7 +151,7 @@ trap_signal:
16332 printk_ratelimit()) {
16333 printk(KERN_INFO
16334 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
16335- tsk->comm, tsk->pid, str,
16336+ tsk->comm, task_pid_nr(tsk), str,
16337 regs->ip, regs->sp, error_code);
16338 print_vma_addr(" in ", regs->ip);
16339 printk("\n");
16340@@ -174,8 +168,20 @@ kernel_trap:
16341 if (!fixup_exception(regs)) {
16342 tsk->thread.error_code = error_code;
16343 tsk->thread.trap_no = trapnr;
16344+
16345+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16346+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
16347+ str = "PAX: suspicious stack segment fault";
16348+#endif
16349+
16350 die(str, regs, error_code);
16351 }
16352+
16353+#ifdef CONFIG_PAX_REFCOUNT
16354+ if (trapnr == 4)
16355+ pax_report_refcount_overflow(regs);
16356+#endif
16357+
16358 return;
16359
16360 #ifdef CONFIG_X86_32
16361@@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
16362 conditional_sti(regs);
16363
16364 #ifdef CONFIG_X86_32
16365- if (regs->flags & X86_VM_MASK)
16366+ if (v8086_mode(regs))
16367 goto gp_in_vm86;
16368 #endif
16369
16370 tsk = current;
16371- if (!user_mode(regs))
16372+ if (!user_mode_novm(regs))
16373 goto gp_in_kernel;
16374
16375+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
16376+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
16377+ struct mm_struct *mm = tsk->mm;
16378+ unsigned long limit;
16379+
16380+ down_write(&mm->mmap_sem);
16381+ limit = mm->context.user_cs_limit;
16382+ if (limit < TASK_SIZE) {
16383+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
16384+ up_write(&mm->mmap_sem);
16385+ return;
16386+ }
16387+ up_write(&mm->mmap_sem);
16388+ }
16389+#endif
16390+
16391 tsk->thread.error_code = error_code;
16392 tsk->thread.trap_no = 13;
16393
16394@@ -304,6 +326,13 @@ gp_in_kernel:
16395 if (notify_die(DIE_GPF, "general protection fault", regs,
16396 error_code, 13, SIGSEGV) == NOTIFY_STOP)
16397 return;
16398+
16399+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16400+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
16401+ die("PAX: suspicious general protection fault", regs, error_code);
16402+ else
16403+#endif
16404+
16405 die("general protection fault", regs, error_code);
16406 }
16407
16408@@ -433,6 +462,17 @@ static notrace __kprobes void default_do
16409 dotraplinkage notrace __kprobes void
16410 do_nmi(struct pt_regs *regs, long error_code)
16411 {
16412+
16413+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16414+ if (!user_mode(regs)) {
16415+ unsigned long cs = regs->cs & 0xFFFF;
16416+ unsigned long ip = ktva_ktla(regs->ip);
16417+
16418+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
16419+ regs->ip = ip;
16420+ }
16421+#endif
16422+
16423 nmi_enter();
16424
16425 inc_irq_stat(__nmi_count);
16426@@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
16427 /* It's safe to allow irq's after DR6 has been saved */
16428 preempt_conditional_sti(regs);
16429
16430- if (regs->flags & X86_VM_MASK) {
16431+ if (v8086_mode(regs)) {
16432 handle_vm86_trap((struct kernel_vm86_regs *) regs,
16433 error_code, 1);
16434 preempt_conditional_cli(regs);
16435@@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
16436 * We already checked v86 mode above, so we can check for kernel mode
16437 * by just checking the CPL of CS.
16438 */
16439- if ((dr6 & DR_STEP) && !user_mode(regs)) {
16440+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
16441 tsk->thread.debugreg6 &= ~DR_STEP;
16442 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
16443 regs->flags &= ~X86_EFLAGS_TF;
16444@@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
16445 return;
16446 conditional_sti(regs);
16447
16448- if (!user_mode_vm(regs))
16449+ if (!user_mode(regs))
16450 {
16451 if (!fixup_exception(regs)) {
16452 task->thread.error_code = error_code;
16453@@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
16454 void __math_state_restore(void)
16455 {
16456 struct thread_info *thread = current_thread_info();
16457- struct task_struct *tsk = thread->task;
16458+ struct task_struct *tsk = current;
16459
16460 /*
16461 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
16462@@ -750,8 +790,7 @@ void __math_state_restore(void)
16463 */
16464 asmlinkage void math_state_restore(void)
16465 {
16466- struct thread_info *thread = current_thread_info();
16467- struct task_struct *tsk = thread->task;
16468+ struct task_struct *tsk = current;
16469
16470 if (!tsk_used_math(tsk)) {
16471 local_irq_enable();
16472diff -urNp linux-3.1.1/arch/x86/kernel/verify_cpu.S linux-3.1.1/arch/x86/kernel/verify_cpu.S
16473--- linux-3.1.1/arch/x86/kernel/verify_cpu.S 2011-11-11 15:19:27.000000000 -0500
16474+++ linux-3.1.1/arch/x86/kernel/verify_cpu.S 2011-11-16 18:40:08.000000000 -0500
16475@@ -20,6 +20,7 @@
16476 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
16477 * arch/x86/kernel/trampoline_64.S: secondary processor verification
16478 * arch/x86/kernel/head_32.S: processor startup
16479+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
16480 *
16481 * verify_cpu, returns the status of longmode and SSE in register %eax.
16482 * 0: Success 1: Failure
16483diff -urNp linux-3.1.1/arch/x86/kernel/vm86_32.c linux-3.1.1/arch/x86/kernel/vm86_32.c
16484--- linux-3.1.1/arch/x86/kernel/vm86_32.c 2011-11-11 15:19:27.000000000 -0500
16485+++ linux-3.1.1/arch/x86/kernel/vm86_32.c 2011-11-16 18:40:08.000000000 -0500
16486@@ -41,6 +41,7 @@
16487 #include <linux/ptrace.h>
16488 #include <linux/audit.h>
16489 #include <linux/stddef.h>
16490+#include <linux/grsecurity.h>
16491
16492 #include <asm/uaccess.h>
16493 #include <asm/io.h>
16494@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
16495 do_exit(SIGSEGV);
16496 }
16497
16498- tss = &per_cpu(init_tss, get_cpu());
16499+ tss = init_tss + get_cpu();
16500 current->thread.sp0 = current->thread.saved_sp0;
16501 current->thread.sysenter_cs = __KERNEL_CS;
16502 load_sp0(tss, &current->thread);
16503@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
16504 struct task_struct *tsk;
16505 int tmp, ret = -EPERM;
16506
16507+#ifdef CONFIG_GRKERNSEC_VM86
16508+ if (!capable(CAP_SYS_RAWIO)) {
16509+ gr_handle_vm86();
16510+ goto out;
16511+ }
16512+#endif
16513+
16514 tsk = current;
16515 if (tsk->thread.saved_sp0)
16516 goto out;
16517@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
16518 int tmp, ret;
16519 struct vm86plus_struct __user *v86;
16520
16521+#ifdef CONFIG_GRKERNSEC_VM86
16522+ if (!capable(CAP_SYS_RAWIO)) {
16523+ gr_handle_vm86();
16524+ ret = -EPERM;
16525+ goto out;
16526+ }
16527+#endif
16528+
16529 tsk = current;
16530 switch (cmd) {
16531 case VM86_REQUEST_IRQ:
16532@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
16533 tsk->thread.saved_fs = info->regs32->fs;
16534 tsk->thread.saved_gs = get_user_gs(info->regs32);
16535
16536- tss = &per_cpu(init_tss, get_cpu());
16537+ tss = init_tss + get_cpu();
16538 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
16539 if (cpu_has_sep)
16540 tsk->thread.sysenter_cs = 0;
16541@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
16542 goto cannot_handle;
16543 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
16544 goto cannot_handle;
16545- intr_ptr = (unsigned long __user *) (i << 2);
16546+ intr_ptr = (__force unsigned long __user *) (i << 2);
16547 if (get_user(segoffs, intr_ptr))
16548 goto cannot_handle;
16549 if ((segoffs >> 16) == BIOSSEG)
16550diff -urNp linux-3.1.1/arch/x86/kernel/vmlinux.lds.S linux-3.1.1/arch/x86/kernel/vmlinux.lds.S
16551--- linux-3.1.1/arch/x86/kernel/vmlinux.lds.S 2011-11-11 15:19:27.000000000 -0500
16552+++ linux-3.1.1/arch/x86/kernel/vmlinux.lds.S 2011-11-16 18:39:07.000000000 -0500
16553@@ -26,6 +26,13 @@
16554 #include <asm/page_types.h>
16555 #include <asm/cache.h>
16556 #include <asm/boot.h>
16557+#include <asm/segment.h>
16558+
16559+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16560+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
16561+#else
16562+#define __KERNEL_TEXT_OFFSET 0
16563+#endif
16564
16565 #undef i386 /* in case the preprocessor is a 32bit one */
16566
16567@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
16568
16569 PHDRS {
16570 text PT_LOAD FLAGS(5); /* R_E */
16571+#ifdef CONFIG_X86_32
16572+ module PT_LOAD FLAGS(5); /* R_E */
16573+#endif
16574+#ifdef CONFIG_XEN
16575+ rodata PT_LOAD FLAGS(5); /* R_E */
16576+#else
16577+ rodata PT_LOAD FLAGS(4); /* R__ */
16578+#endif
16579 data PT_LOAD FLAGS(6); /* RW_ */
16580-#ifdef CONFIG_X86_64
16581+ init.begin PT_LOAD FLAGS(6); /* RW_ */
16582 #ifdef CONFIG_SMP
16583 percpu PT_LOAD FLAGS(6); /* RW_ */
16584 #endif
16585+ text.init PT_LOAD FLAGS(5); /* R_E */
16586+ text.exit PT_LOAD FLAGS(5); /* R_E */
16587 init PT_LOAD FLAGS(7); /* RWE */
16588-#endif
16589 note PT_NOTE FLAGS(0); /* ___ */
16590 }
16591
16592 SECTIONS
16593 {
16594 #ifdef CONFIG_X86_32
16595- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
16596- phys_startup_32 = startup_32 - LOAD_OFFSET;
16597+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
16598 #else
16599- . = __START_KERNEL;
16600- phys_startup_64 = startup_64 - LOAD_OFFSET;
16601+ . = __START_KERNEL;
16602 #endif
16603
16604 /* Text and read-only data */
16605- .text : AT(ADDR(.text) - LOAD_OFFSET) {
16606- _text = .;
16607+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
16608 /* bootstrapping code */
16609+#ifdef CONFIG_X86_32
16610+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16611+#else
16612+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16613+#endif
16614+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16615+ _text = .;
16616 HEAD_TEXT
16617 #ifdef CONFIG_X86_32
16618 . = ALIGN(PAGE_SIZE);
16619@@ -108,13 +128,47 @@ SECTIONS
16620 IRQENTRY_TEXT
16621 *(.fixup)
16622 *(.gnu.warning)
16623- /* End of text section */
16624- _etext = .;
16625 } :text = 0x9090
16626
16627- NOTES :text :note
16628+ . += __KERNEL_TEXT_OFFSET;
16629+
16630+#ifdef CONFIG_X86_32
16631+ . = ALIGN(PAGE_SIZE);
16632+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
16633+
16634+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
16635+ MODULES_EXEC_VADDR = .;
16636+ BYTE(0)
16637+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
16638+ . = ALIGN(HPAGE_SIZE);
16639+ MODULES_EXEC_END = . - 1;
16640+#endif
16641+
16642+ } :module
16643+#endif
16644+
16645+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
16646+ /* End of text section */
16647+ _etext = . - __KERNEL_TEXT_OFFSET;
16648+ }
16649+
16650+#ifdef CONFIG_X86_32
16651+ . = ALIGN(PAGE_SIZE);
16652+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
16653+ *(.idt)
16654+ . = ALIGN(PAGE_SIZE);
16655+ *(.empty_zero_page)
16656+ *(.initial_pg_fixmap)
16657+ *(.initial_pg_pmd)
16658+ *(.initial_page_table)
16659+ *(.swapper_pg_dir)
16660+ } :rodata
16661+#endif
16662+
16663+ . = ALIGN(PAGE_SIZE);
16664+ NOTES :rodata :note
16665
16666- EXCEPTION_TABLE(16) :text = 0x9090
16667+ EXCEPTION_TABLE(16) :rodata
16668
16669 #if defined(CONFIG_DEBUG_RODATA)
16670 /* .text should occupy whole number of pages */
16671@@ -126,16 +180,20 @@ SECTIONS
16672
16673 /* Data */
16674 .data : AT(ADDR(.data) - LOAD_OFFSET) {
16675+
16676+#ifdef CONFIG_PAX_KERNEXEC
16677+ . = ALIGN(HPAGE_SIZE);
16678+#else
16679+ . = ALIGN(PAGE_SIZE);
16680+#endif
16681+
16682 /* Start of data section */
16683 _sdata = .;
16684
16685 /* init_task */
16686 INIT_TASK_DATA(THREAD_SIZE)
16687
16688-#ifdef CONFIG_X86_32
16689- /* 32 bit has nosave before _edata */
16690 NOSAVE_DATA
16691-#endif
16692
16693 PAGE_ALIGNED_DATA(PAGE_SIZE)
16694
16695@@ -176,12 +234,19 @@ SECTIONS
16696 #endif /* CONFIG_X86_64 */
16697
16698 /* Init code and data - will be freed after init */
16699- . = ALIGN(PAGE_SIZE);
16700 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
16701+ BYTE(0)
16702+
16703+#ifdef CONFIG_PAX_KERNEXEC
16704+ . = ALIGN(HPAGE_SIZE);
16705+#else
16706+ . = ALIGN(PAGE_SIZE);
16707+#endif
16708+
16709 __init_begin = .; /* paired with __init_end */
16710- }
16711+ } :init.begin
16712
16713-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
16714+#ifdef CONFIG_SMP
16715 /*
16716 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
16717 * output PHDR, so the next output section - .init.text - should
16718@@ -190,12 +255,27 @@ SECTIONS
16719 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
16720 #endif
16721
16722- INIT_TEXT_SECTION(PAGE_SIZE)
16723-#ifdef CONFIG_X86_64
16724- :init
16725-#endif
16726+ . = ALIGN(PAGE_SIZE);
16727+ init_begin = .;
16728+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
16729+ VMLINUX_SYMBOL(_sinittext) = .;
16730+ INIT_TEXT
16731+ VMLINUX_SYMBOL(_einittext) = .;
16732+ . = ALIGN(PAGE_SIZE);
16733+ } :text.init
16734
16735- INIT_DATA_SECTION(16)
16736+ /*
16737+ * .exit.text is discard at runtime, not link time, to deal with
16738+ * references from .altinstructions and .eh_frame
16739+ */
16740+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
16741+ EXIT_TEXT
16742+ . = ALIGN(16);
16743+ } :text.exit
16744+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
16745+
16746+ . = ALIGN(PAGE_SIZE);
16747+ INIT_DATA_SECTION(16) :init
16748
16749 /*
16750 * Code and data for a variety of lowlevel trampolines, to be
16751@@ -269,19 +349,12 @@ SECTIONS
16752 }
16753
16754 . = ALIGN(8);
16755- /*
16756- * .exit.text is discard at runtime, not link time, to deal with
16757- * references from .altinstructions and .eh_frame
16758- */
16759- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
16760- EXIT_TEXT
16761- }
16762
16763 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
16764 EXIT_DATA
16765 }
16766
16767-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
16768+#ifndef CONFIG_SMP
16769 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
16770 #endif
16771
16772@@ -300,16 +373,10 @@ SECTIONS
16773 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
16774 __smp_locks = .;
16775 *(.smp_locks)
16776- . = ALIGN(PAGE_SIZE);
16777 __smp_locks_end = .;
16778+ . = ALIGN(PAGE_SIZE);
16779 }
16780
16781-#ifdef CONFIG_X86_64
16782- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
16783- NOSAVE_DATA
16784- }
16785-#endif
16786-
16787 /* BSS */
16788 . = ALIGN(PAGE_SIZE);
16789 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
16790@@ -325,6 +392,7 @@ SECTIONS
16791 __brk_base = .;
16792 . += 64 * 1024; /* 64k alignment slop space */
16793 *(.brk_reservation) /* areas brk users have reserved */
16794+ . = ALIGN(HPAGE_SIZE);
16795 __brk_limit = .;
16796 }
16797
16798@@ -351,13 +419,12 @@ SECTIONS
16799 * for the boot processor.
16800 */
16801 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
16802-INIT_PER_CPU(gdt_page);
16803 INIT_PER_CPU(irq_stack_union);
16804
16805 /*
16806 * Build-time check on the image size:
16807 */
16808-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
16809+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
16810 "kernel image bigger than KERNEL_IMAGE_SIZE");
16811
16812 #ifdef CONFIG_SMP
16813diff -urNp linux-3.1.1/arch/x86/kernel/vsyscall_64.c linux-3.1.1/arch/x86/kernel/vsyscall_64.c
16814--- linux-3.1.1/arch/x86/kernel/vsyscall_64.c 2011-11-11 15:19:27.000000000 -0500
16815+++ linux-3.1.1/arch/x86/kernel/vsyscall_64.c 2011-11-16 18:39:07.000000000 -0500
16816@@ -56,15 +56,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, v
16817 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
16818 };
16819
16820-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
16821+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
16822
16823 static int __init vsyscall_setup(char *str)
16824 {
16825 if (str) {
16826 if (!strcmp("emulate", str))
16827 vsyscall_mode = EMULATE;
16828- else if (!strcmp("native", str))
16829- vsyscall_mode = NATIVE;
16830 else if (!strcmp("none", str))
16831 vsyscall_mode = NONE;
16832 else
16833@@ -177,7 +175,7 @@ bool emulate_vsyscall(struct pt_regs *re
16834
16835 tsk = current;
16836 if (seccomp_mode(&tsk->seccomp))
16837- do_exit(SIGKILL);
16838+ do_group_exit(SIGKILL);
16839
16840 switch (vsyscall_nr) {
16841 case 0:
16842@@ -219,8 +217,7 @@ bool emulate_vsyscall(struct pt_regs *re
16843 return true;
16844
16845 sigsegv:
16846- force_sig(SIGSEGV, current);
16847- return true;
16848+ do_group_exit(SIGKILL);
16849 }
16850
16851 /*
16852@@ -273,10 +270,7 @@ void __init map_vsyscall(void)
16853 extern char __vvar_page;
16854 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
16855
16856- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
16857- vsyscall_mode == NATIVE
16858- ? PAGE_KERNEL_VSYSCALL
16859- : PAGE_KERNEL_VVAR);
16860+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
16861 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
16862 (unsigned long)VSYSCALL_START);
16863
16864diff -urNp linux-3.1.1/arch/x86/kernel/x8664_ksyms_64.c linux-3.1.1/arch/x86/kernel/x8664_ksyms_64.c
16865--- linux-3.1.1/arch/x86/kernel/x8664_ksyms_64.c 2011-11-11 15:19:27.000000000 -0500
16866+++ linux-3.1.1/arch/x86/kernel/x8664_ksyms_64.c 2011-11-16 18:39:07.000000000 -0500
16867@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
16868 EXPORT_SYMBOL(copy_user_generic_string);
16869 EXPORT_SYMBOL(copy_user_generic_unrolled);
16870 EXPORT_SYMBOL(__copy_user_nocache);
16871-EXPORT_SYMBOL(_copy_from_user);
16872-EXPORT_SYMBOL(_copy_to_user);
16873
16874 EXPORT_SYMBOL(copy_page);
16875 EXPORT_SYMBOL(clear_page);
16876diff -urNp linux-3.1.1/arch/x86/kernel/xsave.c linux-3.1.1/arch/x86/kernel/xsave.c
16877--- linux-3.1.1/arch/x86/kernel/xsave.c 2011-11-11 15:19:27.000000000 -0500
16878+++ linux-3.1.1/arch/x86/kernel/xsave.c 2011-11-16 18:39:07.000000000 -0500
16879@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
16880 fx_sw_user->xstate_size > fx_sw_user->extended_size)
16881 return -EINVAL;
16882
16883- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
16884+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
16885 fx_sw_user->extended_size -
16886 FP_XSTATE_MAGIC2_SIZE));
16887 if (err)
16888@@ -267,7 +267,7 @@ fx_only:
16889 * the other extended state.
16890 */
16891 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
16892- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
16893+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
16894 }
16895
16896 /*
16897@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
16898 if (use_xsave())
16899 err = restore_user_xstate(buf);
16900 else
16901- err = fxrstor_checking((__force struct i387_fxsave_struct *)
16902+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
16903 buf);
16904 if (unlikely(err)) {
16905 /*
16906diff -urNp linux-3.1.1/arch/x86/kvm/emulate.c linux-3.1.1/arch/x86/kvm/emulate.c
16907--- linux-3.1.1/arch/x86/kvm/emulate.c 2011-11-11 15:19:27.000000000 -0500
16908+++ linux-3.1.1/arch/x86/kvm/emulate.c 2011-11-16 18:39:07.000000000 -0500
16909@@ -96,7 +96,7 @@
16910 #define Src2ImmByte (2<<29)
16911 #define Src2One (3<<29)
16912 #define Src2Imm (4<<29)
16913-#define Src2Mask (7<<29)
16914+#define Src2Mask (7U<<29)
16915
16916 #define X2(x...) x, x
16917 #define X3(x...) X2(x), x
16918@@ -207,6 +207,7 @@ struct gprefix {
16919
16920 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
16921 do { \
16922+ unsigned long _tmp; \
16923 __asm__ __volatile__ ( \
16924 _PRE_EFLAGS("0", "4", "2") \
16925 _op _suffix " %"_x"3,%1; " \
16926@@ -220,8 +221,6 @@ struct gprefix {
16927 /* Raw emulation: instruction has two explicit operands. */
16928 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
16929 do { \
16930- unsigned long _tmp; \
16931- \
16932 switch ((_dst).bytes) { \
16933 case 2: \
16934 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
16935@@ -237,7 +236,6 @@ struct gprefix {
16936
16937 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
16938 do { \
16939- unsigned long _tmp; \
16940 switch ((_dst).bytes) { \
16941 case 1: \
16942 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
16943diff -urNp linux-3.1.1/arch/x86/kvm/lapic.c linux-3.1.1/arch/x86/kvm/lapic.c
16944--- linux-3.1.1/arch/x86/kvm/lapic.c 2011-11-11 15:19:27.000000000 -0500
16945+++ linux-3.1.1/arch/x86/kvm/lapic.c 2011-11-16 18:39:07.000000000 -0500
16946@@ -53,7 +53,7 @@
16947 #define APIC_BUS_CYCLE_NS 1
16948
16949 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
16950-#define apic_debug(fmt, arg...)
16951+#define apic_debug(fmt, arg...) do {} while (0)
16952
16953 #define APIC_LVT_NUM 6
16954 /* 14 is the version for Xeon and Pentium 8.4.8*/
16955diff -urNp linux-3.1.1/arch/x86/kvm/mmu.c linux-3.1.1/arch/x86/kvm/mmu.c
16956--- linux-3.1.1/arch/x86/kvm/mmu.c 2011-11-11 15:19:27.000000000 -0500
16957+++ linux-3.1.1/arch/x86/kvm/mmu.c 2011-11-16 18:39:07.000000000 -0500
16958@@ -3552,7 +3552,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16959
16960 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
16961
16962- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
16963+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
16964
16965 /*
16966 * Assume that the pte write on a page table of the same type
16967@@ -3584,7 +3584,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16968 }
16969
16970 spin_lock(&vcpu->kvm->mmu_lock);
16971- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16972+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16973 gentry = 0;
16974 kvm_mmu_free_some_pages(vcpu);
16975 ++vcpu->kvm->stat.mmu_pte_write;
16976diff -urNp linux-3.1.1/arch/x86/kvm/paging_tmpl.h linux-3.1.1/arch/x86/kvm/paging_tmpl.h
16977--- linux-3.1.1/arch/x86/kvm/paging_tmpl.h 2011-11-11 15:19:27.000000000 -0500
16978+++ linux-3.1.1/arch/x86/kvm/paging_tmpl.h 2011-11-16 19:40:44.000000000 -0500
16979@@ -197,7 +197,7 @@ retry_walk:
16980 if (unlikely(kvm_is_error_hva(host_addr)))
16981 goto error;
16982
16983- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
16984+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
16985 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
16986 goto error;
16987
16988@@ -575,6 +575,8 @@ static int FNAME(page_fault)(struct kvm_
16989 unsigned long mmu_seq;
16990 bool map_writable;
16991
16992+ pax_track_stack();
16993+
16994 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
16995
16996 if (unlikely(error_code & PFERR_RSVD_MASK))
16997@@ -701,7 +703,7 @@ static void FNAME(invlpg)(struct kvm_vcp
16998 if (need_flush)
16999 kvm_flush_remote_tlbs(vcpu->kvm);
17000
17001- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
17002+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
17003
17004 spin_unlock(&vcpu->kvm->mmu_lock);
17005
17006diff -urNp linux-3.1.1/arch/x86/kvm/svm.c linux-3.1.1/arch/x86/kvm/svm.c
17007--- linux-3.1.1/arch/x86/kvm/svm.c 2011-11-11 15:19:27.000000000 -0500
17008+++ linux-3.1.1/arch/x86/kvm/svm.c 2011-11-16 18:39:07.000000000 -0500
17009@@ -3381,7 +3381,11 @@ static void reload_tss(struct kvm_vcpu *
17010 int cpu = raw_smp_processor_id();
17011
17012 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
17013+
17014+ pax_open_kernel();
17015 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
17016+ pax_close_kernel();
17017+
17018 load_TR_desc();
17019 }
17020
17021@@ -3759,6 +3763,10 @@ static void svm_vcpu_run(struct kvm_vcpu
17022 #endif
17023 #endif
17024
17025+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17026+ __set_fs(current_thread_info()->addr_limit);
17027+#endif
17028+
17029 reload_tss(vcpu);
17030
17031 local_irq_disable();
17032diff -urNp linux-3.1.1/arch/x86/kvm/vmx.c linux-3.1.1/arch/x86/kvm/vmx.c
17033--- linux-3.1.1/arch/x86/kvm/vmx.c 2011-11-11 15:19:27.000000000 -0500
17034+++ linux-3.1.1/arch/x86/kvm/vmx.c 2011-11-16 18:39:07.000000000 -0500
17035@@ -1251,7 +1251,11 @@ static void reload_tss(void)
17036 struct desc_struct *descs;
17037
17038 descs = (void *)gdt->address;
17039+
17040+ pax_open_kernel();
17041 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
17042+ pax_close_kernel();
17043+
17044 load_TR_desc();
17045 }
17046
17047@@ -2520,8 +2524,11 @@ static __init int hardware_setup(void)
17048 if (!cpu_has_vmx_flexpriority())
17049 flexpriority_enabled = 0;
17050
17051- if (!cpu_has_vmx_tpr_shadow())
17052- kvm_x86_ops->update_cr8_intercept = NULL;
17053+ if (!cpu_has_vmx_tpr_shadow()) {
17054+ pax_open_kernel();
17055+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
17056+ pax_close_kernel();
17057+ }
17058
17059 if (enable_ept && !cpu_has_vmx_ept_2m_page())
17060 kvm_disable_largepages();
17061@@ -3535,7 +3542,7 @@ static void vmx_set_constant_host_state(
17062 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
17063
17064 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
17065- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
17066+ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
17067
17068 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
17069 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
17070@@ -6021,6 +6028,12 @@ static void __noclone vmx_vcpu_run(struc
17071 "jmp .Lkvm_vmx_return \n\t"
17072 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
17073 ".Lkvm_vmx_return: "
17074+
17075+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17076+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
17077+ ".Lkvm_vmx_return2: "
17078+#endif
17079+
17080 /* Save guest registers, load host registers, keep flags */
17081 "mov %0, %c[wordsize](%%"R"sp) \n\t"
17082 "pop %0 \n\t"
17083@@ -6069,6 +6082,11 @@ static void __noclone vmx_vcpu_run(struc
17084 #endif
17085 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
17086 [wordsize]"i"(sizeof(ulong))
17087+
17088+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17089+ ,[cs]"i"(__KERNEL_CS)
17090+#endif
17091+
17092 : "cc", "memory"
17093 , R"ax", R"bx", R"di", R"si"
17094 #ifdef CONFIG_X86_64
17095@@ -6097,7 +6115,16 @@ static void __noclone vmx_vcpu_run(struc
17096 }
17097 }
17098
17099- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
17100+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
17101+
17102+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17103+ loadsegment(fs, __KERNEL_PERCPU);
17104+#endif
17105+
17106+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17107+ __set_fs(current_thread_info()->addr_limit);
17108+#endif
17109+
17110 vmx->loaded_vmcs->launched = 1;
17111
17112 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
17113diff -urNp linux-3.1.1/arch/x86/kvm/x86.c linux-3.1.1/arch/x86/kvm/x86.c
17114--- linux-3.1.1/arch/x86/kvm/x86.c 2011-11-11 15:19:27.000000000 -0500
17115+++ linux-3.1.1/arch/x86/kvm/x86.c 2011-11-16 18:39:07.000000000 -0500
17116@@ -1334,8 +1334,8 @@ static int xen_hvm_config(struct kvm_vcp
17117 {
17118 struct kvm *kvm = vcpu->kvm;
17119 int lm = is_long_mode(vcpu);
17120- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
17121- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
17122+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
17123+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
17124 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
17125 : kvm->arch.xen_hvm_config.blob_size_32;
17126 u32 page_num = data & ~PAGE_MASK;
17127@@ -2137,6 +2137,8 @@ long kvm_arch_dev_ioctl(struct file *fil
17128 if (n < msr_list.nmsrs)
17129 goto out;
17130 r = -EFAULT;
17131+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
17132+ goto out;
17133 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
17134 num_msrs_to_save * sizeof(u32)))
17135 goto out;
17136@@ -2312,15 +2314,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
17137 struct kvm_cpuid2 *cpuid,
17138 struct kvm_cpuid_entry2 __user *entries)
17139 {
17140- int r;
17141+ int r, i;
17142
17143 r = -E2BIG;
17144 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
17145 goto out;
17146 r = -EFAULT;
17147- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
17148- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17149+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17150 goto out;
17151+ for (i = 0; i < cpuid->nent; ++i) {
17152+ struct kvm_cpuid_entry2 cpuid_entry;
17153+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
17154+ goto out;
17155+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
17156+ }
17157 vcpu->arch.cpuid_nent = cpuid->nent;
17158 kvm_apic_set_version(vcpu);
17159 kvm_x86_ops->cpuid_update(vcpu);
17160@@ -2335,15 +2342,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
17161 struct kvm_cpuid2 *cpuid,
17162 struct kvm_cpuid_entry2 __user *entries)
17163 {
17164- int r;
17165+ int r, i;
17166
17167 r = -E2BIG;
17168 if (cpuid->nent < vcpu->arch.cpuid_nent)
17169 goto out;
17170 r = -EFAULT;
17171- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
17172- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17173+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17174 goto out;
17175+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
17176+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
17177+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
17178+ goto out;
17179+ }
17180 return 0;
17181
17182 out:
17183@@ -2718,7 +2729,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
17184 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
17185 struct kvm_interrupt *irq)
17186 {
17187- if (irq->irq < 0 || irq->irq >= 256)
17188+ if (irq->irq >= 256)
17189 return -EINVAL;
17190 if (irqchip_in_kernel(vcpu->kvm))
17191 return -ENXIO;
17192@@ -5089,7 +5100,7 @@ static void kvm_set_mmio_spte_mask(void)
17193 kvm_mmu_set_mmio_spte_mask(mask);
17194 }
17195
17196-int kvm_arch_init(void *opaque)
17197+int kvm_arch_init(const void *opaque)
17198 {
17199 int r;
17200 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
17201diff -urNp linux-3.1.1/arch/x86/lguest/boot.c linux-3.1.1/arch/x86/lguest/boot.c
17202--- linux-3.1.1/arch/x86/lguest/boot.c 2011-11-11 15:19:27.000000000 -0500
17203+++ linux-3.1.1/arch/x86/lguest/boot.c 2011-11-16 18:39:07.000000000 -0500
17204@@ -1184,9 +1184,10 @@ static __init int early_put_chars(u32 vt
17205 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
17206 * Launcher to reboot us.
17207 */
17208-static void lguest_restart(char *reason)
17209+static __noreturn void lguest_restart(char *reason)
17210 {
17211 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
17212+ BUG();
17213 }
17214
17215 /*G:050
17216diff -urNp linux-3.1.1/arch/x86/lib/atomic64_32.c linux-3.1.1/arch/x86/lib/atomic64_32.c
17217--- linux-3.1.1/arch/x86/lib/atomic64_32.c 2011-11-11 15:19:27.000000000 -0500
17218+++ linux-3.1.1/arch/x86/lib/atomic64_32.c 2011-11-16 18:39:07.000000000 -0500
17219@@ -8,18 +8,30 @@
17220
17221 long long atomic64_read_cx8(long long, const atomic64_t *v);
17222 EXPORT_SYMBOL(atomic64_read_cx8);
17223+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
17224+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
17225 long long atomic64_set_cx8(long long, const atomic64_t *v);
17226 EXPORT_SYMBOL(atomic64_set_cx8);
17227+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
17228+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
17229 long long atomic64_xchg_cx8(long long, unsigned high);
17230 EXPORT_SYMBOL(atomic64_xchg_cx8);
17231 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
17232 EXPORT_SYMBOL(atomic64_add_return_cx8);
17233+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
17234+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
17235 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
17236 EXPORT_SYMBOL(atomic64_sub_return_cx8);
17237+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
17238+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
17239 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
17240 EXPORT_SYMBOL(atomic64_inc_return_cx8);
17241+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
17242+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
17243 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
17244 EXPORT_SYMBOL(atomic64_dec_return_cx8);
17245+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
17246+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
17247 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
17248 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
17249 int atomic64_inc_not_zero_cx8(atomic64_t *v);
17250@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
17251 #ifndef CONFIG_X86_CMPXCHG64
17252 long long atomic64_read_386(long long, const atomic64_t *v);
17253 EXPORT_SYMBOL(atomic64_read_386);
17254+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
17255+EXPORT_SYMBOL(atomic64_read_unchecked_386);
17256 long long atomic64_set_386(long long, const atomic64_t *v);
17257 EXPORT_SYMBOL(atomic64_set_386);
17258+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
17259+EXPORT_SYMBOL(atomic64_set_unchecked_386);
17260 long long atomic64_xchg_386(long long, unsigned high);
17261 EXPORT_SYMBOL(atomic64_xchg_386);
17262 long long atomic64_add_return_386(long long a, atomic64_t *v);
17263 EXPORT_SYMBOL(atomic64_add_return_386);
17264+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17265+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
17266 long long atomic64_sub_return_386(long long a, atomic64_t *v);
17267 EXPORT_SYMBOL(atomic64_sub_return_386);
17268+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17269+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
17270 long long atomic64_inc_return_386(long long a, atomic64_t *v);
17271 EXPORT_SYMBOL(atomic64_inc_return_386);
17272+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17273+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
17274 long long atomic64_dec_return_386(long long a, atomic64_t *v);
17275 EXPORT_SYMBOL(atomic64_dec_return_386);
17276+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17277+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
17278 long long atomic64_add_386(long long a, atomic64_t *v);
17279 EXPORT_SYMBOL(atomic64_add_386);
17280+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
17281+EXPORT_SYMBOL(atomic64_add_unchecked_386);
17282 long long atomic64_sub_386(long long a, atomic64_t *v);
17283 EXPORT_SYMBOL(atomic64_sub_386);
17284+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
17285+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
17286 long long atomic64_inc_386(long long a, atomic64_t *v);
17287 EXPORT_SYMBOL(atomic64_inc_386);
17288+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
17289+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
17290 long long atomic64_dec_386(long long a, atomic64_t *v);
17291 EXPORT_SYMBOL(atomic64_dec_386);
17292+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
17293+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
17294 long long atomic64_dec_if_positive_386(atomic64_t *v);
17295 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
17296 int atomic64_inc_not_zero_386(atomic64_t *v);
17297diff -urNp linux-3.1.1/arch/x86/lib/atomic64_386_32.S linux-3.1.1/arch/x86/lib/atomic64_386_32.S
17298--- linux-3.1.1/arch/x86/lib/atomic64_386_32.S 2011-11-11 15:19:27.000000000 -0500
17299+++ linux-3.1.1/arch/x86/lib/atomic64_386_32.S 2011-11-16 18:39:07.000000000 -0500
17300@@ -48,6 +48,10 @@ BEGIN(read)
17301 movl (v), %eax
17302 movl 4(v), %edx
17303 RET_ENDP
17304+BEGIN(read_unchecked)
17305+ movl (v), %eax
17306+ movl 4(v), %edx
17307+RET_ENDP
17308 #undef v
17309
17310 #define v %esi
17311@@ -55,6 +59,10 @@ BEGIN(set)
17312 movl %ebx, (v)
17313 movl %ecx, 4(v)
17314 RET_ENDP
17315+BEGIN(set_unchecked)
17316+ movl %ebx, (v)
17317+ movl %ecx, 4(v)
17318+RET_ENDP
17319 #undef v
17320
17321 #define v %esi
17322@@ -70,6 +78,20 @@ RET_ENDP
17323 BEGIN(add)
17324 addl %eax, (v)
17325 adcl %edx, 4(v)
17326+
17327+#ifdef CONFIG_PAX_REFCOUNT
17328+ jno 0f
17329+ subl %eax, (v)
17330+ sbbl %edx, 4(v)
17331+ int $4
17332+0:
17333+ _ASM_EXTABLE(0b, 0b)
17334+#endif
17335+
17336+RET_ENDP
17337+BEGIN(add_unchecked)
17338+ addl %eax, (v)
17339+ adcl %edx, 4(v)
17340 RET_ENDP
17341 #undef v
17342
17343@@ -77,6 +99,24 @@ RET_ENDP
17344 BEGIN(add_return)
17345 addl (v), %eax
17346 adcl 4(v), %edx
17347+
17348+#ifdef CONFIG_PAX_REFCOUNT
17349+ into
17350+1234:
17351+ _ASM_EXTABLE(1234b, 2f)
17352+#endif
17353+
17354+ movl %eax, (v)
17355+ movl %edx, 4(v)
17356+
17357+#ifdef CONFIG_PAX_REFCOUNT
17358+2:
17359+#endif
17360+
17361+RET_ENDP
17362+BEGIN(add_return_unchecked)
17363+ addl (v), %eax
17364+ adcl 4(v), %edx
17365 movl %eax, (v)
17366 movl %edx, 4(v)
17367 RET_ENDP
17368@@ -86,6 +126,20 @@ RET_ENDP
17369 BEGIN(sub)
17370 subl %eax, (v)
17371 sbbl %edx, 4(v)
17372+
17373+#ifdef CONFIG_PAX_REFCOUNT
17374+ jno 0f
17375+ addl %eax, (v)
17376+ adcl %edx, 4(v)
17377+ int $4
17378+0:
17379+ _ASM_EXTABLE(0b, 0b)
17380+#endif
17381+
17382+RET_ENDP
17383+BEGIN(sub_unchecked)
17384+ subl %eax, (v)
17385+ sbbl %edx, 4(v)
17386 RET_ENDP
17387 #undef v
17388
17389@@ -96,6 +150,27 @@ BEGIN(sub_return)
17390 sbbl $0, %edx
17391 addl (v), %eax
17392 adcl 4(v), %edx
17393+
17394+#ifdef CONFIG_PAX_REFCOUNT
17395+ into
17396+1234:
17397+ _ASM_EXTABLE(1234b, 2f)
17398+#endif
17399+
17400+ movl %eax, (v)
17401+ movl %edx, 4(v)
17402+
17403+#ifdef CONFIG_PAX_REFCOUNT
17404+2:
17405+#endif
17406+
17407+RET_ENDP
17408+BEGIN(sub_return_unchecked)
17409+ negl %edx
17410+ negl %eax
17411+ sbbl $0, %edx
17412+ addl (v), %eax
17413+ adcl 4(v), %edx
17414 movl %eax, (v)
17415 movl %edx, 4(v)
17416 RET_ENDP
17417@@ -105,6 +180,20 @@ RET_ENDP
17418 BEGIN(inc)
17419 addl $1, (v)
17420 adcl $0, 4(v)
17421+
17422+#ifdef CONFIG_PAX_REFCOUNT
17423+ jno 0f
17424+ subl $1, (v)
17425+ sbbl $0, 4(v)
17426+ int $4
17427+0:
17428+ _ASM_EXTABLE(0b, 0b)
17429+#endif
17430+
17431+RET_ENDP
17432+BEGIN(inc_unchecked)
17433+ addl $1, (v)
17434+ adcl $0, 4(v)
17435 RET_ENDP
17436 #undef v
17437
17438@@ -114,6 +203,26 @@ BEGIN(inc_return)
17439 movl 4(v), %edx
17440 addl $1, %eax
17441 adcl $0, %edx
17442+
17443+#ifdef CONFIG_PAX_REFCOUNT
17444+ into
17445+1234:
17446+ _ASM_EXTABLE(1234b, 2f)
17447+#endif
17448+
17449+ movl %eax, (v)
17450+ movl %edx, 4(v)
17451+
17452+#ifdef CONFIG_PAX_REFCOUNT
17453+2:
17454+#endif
17455+
17456+RET_ENDP
17457+BEGIN(inc_return_unchecked)
17458+ movl (v), %eax
17459+ movl 4(v), %edx
17460+ addl $1, %eax
17461+ adcl $0, %edx
17462 movl %eax, (v)
17463 movl %edx, 4(v)
17464 RET_ENDP
17465@@ -123,6 +232,20 @@ RET_ENDP
17466 BEGIN(dec)
17467 subl $1, (v)
17468 sbbl $0, 4(v)
17469+
17470+#ifdef CONFIG_PAX_REFCOUNT
17471+ jno 0f
17472+ addl $1, (v)
17473+ adcl $0, 4(v)
17474+ int $4
17475+0:
17476+ _ASM_EXTABLE(0b, 0b)
17477+#endif
17478+
17479+RET_ENDP
17480+BEGIN(dec_unchecked)
17481+ subl $1, (v)
17482+ sbbl $0, 4(v)
17483 RET_ENDP
17484 #undef v
17485
17486@@ -132,6 +255,26 @@ BEGIN(dec_return)
17487 movl 4(v), %edx
17488 subl $1, %eax
17489 sbbl $0, %edx
17490+
17491+#ifdef CONFIG_PAX_REFCOUNT
17492+ into
17493+1234:
17494+ _ASM_EXTABLE(1234b, 2f)
17495+#endif
17496+
17497+ movl %eax, (v)
17498+ movl %edx, 4(v)
17499+
17500+#ifdef CONFIG_PAX_REFCOUNT
17501+2:
17502+#endif
17503+
17504+RET_ENDP
17505+BEGIN(dec_return_unchecked)
17506+ movl (v), %eax
17507+ movl 4(v), %edx
17508+ subl $1, %eax
17509+ sbbl $0, %edx
17510 movl %eax, (v)
17511 movl %edx, 4(v)
17512 RET_ENDP
17513@@ -143,6 +286,13 @@ BEGIN(add_unless)
17514 adcl %edx, %edi
17515 addl (v), %eax
17516 adcl 4(v), %edx
17517+
17518+#ifdef CONFIG_PAX_REFCOUNT
17519+ into
17520+1234:
17521+ _ASM_EXTABLE(1234b, 2f)
17522+#endif
17523+
17524 cmpl %eax, %esi
17525 je 3f
17526 1:
17527@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
17528 1:
17529 addl $1, %eax
17530 adcl $0, %edx
17531+
17532+#ifdef CONFIG_PAX_REFCOUNT
17533+ into
17534+1234:
17535+ _ASM_EXTABLE(1234b, 2f)
17536+#endif
17537+
17538 movl %eax, (v)
17539 movl %edx, 4(v)
17540 movl $1, %eax
17541@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
17542 movl 4(v), %edx
17543 subl $1, %eax
17544 sbbl $0, %edx
17545+
17546+#ifdef CONFIG_PAX_REFCOUNT
17547+ into
17548+1234:
17549+ _ASM_EXTABLE(1234b, 1f)
17550+#endif
17551+
17552 js 1f
17553 movl %eax, (v)
17554 movl %edx, 4(v)
17555diff -urNp linux-3.1.1/arch/x86/lib/atomic64_cx8_32.S linux-3.1.1/arch/x86/lib/atomic64_cx8_32.S
17556--- linux-3.1.1/arch/x86/lib/atomic64_cx8_32.S 2011-11-11 15:19:27.000000000 -0500
17557+++ linux-3.1.1/arch/x86/lib/atomic64_cx8_32.S 2011-11-16 18:39:07.000000000 -0500
17558@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
17559 CFI_STARTPROC
17560
17561 read64 %ecx
17562+ pax_force_retaddr
17563 ret
17564 CFI_ENDPROC
17565 ENDPROC(atomic64_read_cx8)
17566
17567+ENTRY(atomic64_read_unchecked_cx8)
17568+ CFI_STARTPROC
17569+
17570+ read64 %ecx
17571+ pax_force_retaddr
17572+ ret
17573+ CFI_ENDPROC
17574+ENDPROC(atomic64_read_unchecked_cx8)
17575+
17576 ENTRY(atomic64_set_cx8)
17577 CFI_STARTPROC
17578
17579@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
17580 cmpxchg8b (%esi)
17581 jne 1b
17582
17583+ pax_force_retaddr
17584 ret
17585 CFI_ENDPROC
17586 ENDPROC(atomic64_set_cx8)
17587
17588+ENTRY(atomic64_set_unchecked_cx8)
17589+ CFI_STARTPROC
17590+
17591+1:
17592+/* we don't need LOCK_PREFIX since aligned 64-bit writes
17593+ * are atomic on 586 and newer */
17594+ cmpxchg8b (%esi)
17595+ jne 1b
17596+
17597+ pax_force_retaddr
17598+ ret
17599+ CFI_ENDPROC
17600+ENDPROC(atomic64_set_unchecked_cx8)
17601+
17602 ENTRY(atomic64_xchg_cx8)
17603 CFI_STARTPROC
17604
17605@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
17606 cmpxchg8b (%esi)
17607 jne 1b
17608
17609+ pax_force_retaddr
17610 ret
17611 CFI_ENDPROC
17612 ENDPROC(atomic64_xchg_cx8)
17613
17614-.macro addsub_return func ins insc
17615-ENTRY(atomic64_\func\()_return_cx8)
17616+.macro addsub_return func ins insc unchecked=""
17617+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
17618 CFI_STARTPROC
17619 SAVE ebp
17620 SAVE ebx
17621@@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
17622 movl %edx, %ecx
17623 \ins\()l %esi, %ebx
17624 \insc\()l %edi, %ecx
17625+
17626+.ifb \unchecked
17627+#ifdef CONFIG_PAX_REFCOUNT
17628+ into
17629+2:
17630+ _ASM_EXTABLE(2b, 3f)
17631+#endif
17632+.endif
17633+
17634 LOCK_PREFIX
17635 cmpxchg8b (%ebp)
17636 jne 1b
17637-
17638-10:
17639 movl %ebx, %eax
17640 movl %ecx, %edx
17641+
17642+.ifb \unchecked
17643+#ifdef CONFIG_PAX_REFCOUNT
17644+3:
17645+#endif
17646+.endif
17647+
17648 RESTORE edi
17649 RESTORE esi
17650 RESTORE ebx
17651 RESTORE ebp
17652+ pax_force_retaddr
17653 ret
17654 CFI_ENDPROC
17655-ENDPROC(atomic64_\func\()_return_cx8)
17656+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
17657 .endm
17658
17659 addsub_return add add adc
17660 addsub_return sub sub sbb
17661+addsub_return add add adc _unchecked
17662+addsub_return sub sub sbb _unchecked
17663
17664-.macro incdec_return func ins insc
17665-ENTRY(atomic64_\func\()_return_cx8)
17666+.macro incdec_return func ins insc unchecked
17667+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
17668 CFI_STARTPROC
17669 SAVE ebx
17670
17671@@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
17672 movl %edx, %ecx
17673 \ins\()l $1, %ebx
17674 \insc\()l $0, %ecx
17675+
17676+.ifb \unchecked
17677+#ifdef CONFIG_PAX_REFCOUNT
17678+ into
17679+2:
17680+ _ASM_EXTABLE(2b, 3f)
17681+#endif
17682+.endif
17683+
17684 LOCK_PREFIX
17685 cmpxchg8b (%esi)
17686 jne 1b
17687
17688-10:
17689 movl %ebx, %eax
17690 movl %ecx, %edx
17691+
17692+.ifb \unchecked
17693+#ifdef CONFIG_PAX_REFCOUNT
17694+3:
17695+#endif
17696+.endif
17697+
17698 RESTORE ebx
17699+ pax_force_retaddr
17700 ret
17701 CFI_ENDPROC
17702-ENDPROC(atomic64_\func\()_return_cx8)
17703+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
17704 .endm
17705
17706 incdec_return inc add adc
17707 incdec_return dec sub sbb
17708+incdec_return inc add adc _unchecked
17709+incdec_return dec sub sbb _unchecked
17710
17711 ENTRY(atomic64_dec_if_positive_cx8)
17712 CFI_STARTPROC
17713@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
17714 movl %edx, %ecx
17715 subl $1, %ebx
17716 sbb $0, %ecx
17717+
17718+#ifdef CONFIG_PAX_REFCOUNT
17719+ into
17720+1234:
17721+ _ASM_EXTABLE(1234b, 2f)
17722+#endif
17723+
17724 js 2f
17725 LOCK_PREFIX
17726 cmpxchg8b (%esi)
17727@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
17728 movl %ebx, %eax
17729 movl %ecx, %edx
17730 RESTORE ebx
17731+ pax_force_retaddr
17732 ret
17733 CFI_ENDPROC
17734 ENDPROC(atomic64_dec_if_positive_cx8)
17735@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
17736 movl %edx, %ecx
17737 addl %esi, %ebx
17738 adcl %edi, %ecx
17739+
17740+#ifdef CONFIG_PAX_REFCOUNT
17741+ into
17742+1234:
17743+ _ASM_EXTABLE(1234b, 3f)
17744+#endif
17745+
17746 LOCK_PREFIX
17747 cmpxchg8b (%ebp)
17748 jne 1b
17749@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
17750 CFI_ADJUST_CFA_OFFSET -8
17751 RESTORE ebx
17752 RESTORE ebp
17753+ pax_force_retaddr
17754 ret
17755 4:
17756 cmpl %edx, 4(%esp)
17757@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
17758 movl %edx, %ecx
17759 addl $1, %ebx
17760 adcl $0, %ecx
17761+
17762+#ifdef CONFIG_PAX_REFCOUNT
17763+ into
17764+1234:
17765+ _ASM_EXTABLE(1234b, 3f)
17766+#endif
17767+
17768 LOCK_PREFIX
17769 cmpxchg8b (%esi)
17770 jne 1b
17771@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
17772 movl $1, %eax
17773 3:
17774 RESTORE ebx
17775+ pax_force_retaddr
17776 ret
17777 4:
17778 testl %edx, %edx
17779diff -urNp linux-3.1.1/arch/x86/lib/checksum_32.S linux-3.1.1/arch/x86/lib/checksum_32.S
17780--- linux-3.1.1/arch/x86/lib/checksum_32.S 2011-11-11 15:19:27.000000000 -0500
17781+++ linux-3.1.1/arch/x86/lib/checksum_32.S 2011-11-16 18:39:07.000000000 -0500
17782@@ -28,7 +28,8 @@
17783 #include <linux/linkage.h>
17784 #include <asm/dwarf2.h>
17785 #include <asm/errno.h>
17786-
17787+#include <asm/segment.h>
17788+
17789 /*
17790 * computes a partial checksum, e.g. for TCP/UDP fragments
17791 */
17792@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
17793
17794 #define ARGBASE 16
17795 #define FP 12
17796-
17797-ENTRY(csum_partial_copy_generic)
17798+
17799+ENTRY(csum_partial_copy_generic_to_user)
17800 CFI_STARTPROC
17801+
17802+#ifdef CONFIG_PAX_MEMORY_UDEREF
17803+ pushl_cfi %gs
17804+ popl_cfi %es
17805+ jmp csum_partial_copy_generic
17806+#endif
17807+
17808+ENTRY(csum_partial_copy_generic_from_user)
17809+
17810+#ifdef CONFIG_PAX_MEMORY_UDEREF
17811+ pushl_cfi %gs
17812+ popl_cfi %ds
17813+#endif
17814+
17815+ENTRY(csum_partial_copy_generic)
17816 subl $4,%esp
17817 CFI_ADJUST_CFA_OFFSET 4
17818 pushl_cfi %edi
17819@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
17820 jmp 4f
17821 SRC(1: movw (%esi), %bx )
17822 addl $2, %esi
17823-DST( movw %bx, (%edi) )
17824+DST( movw %bx, %es:(%edi) )
17825 addl $2, %edi
17826 addw %bx, %ax
17827 adcl $0, %eax
17828@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
17829 SRC(1: movl (%esi), %ebx )
17830 SRC( movl 4(%esi), %edx )
17831 adcl %ebx, %eax
17832-DST( movl %ebx, (%edi) )
17833+DST( movl %ebx, %es:(%edi) )
17834 adcl %edx, %eax
17835-DST( movl %edx, 4(%edi) )
17836+DST( movl %edx, %es:4(%edi) )
17837
17838 SRC( movl 8(%esi), %ebx )
17839 SRC( movl 12(%esi), %edx )
17840 adcl %ebx, %eax
17841-DST( movl %ebx, 8(%edi) )
17842+DST( movl %ebx, %es:8(%edi) )
17843 adcl %edx, %eax
17844-DST( movl %edx, 12(%edi) )
17845+DST( movl %edx, %es:12(%edi) )
17846
17847 SRC( movl 16(%esi), %ebx )
17848 SRC( movl 20(%esi), %edx )
17849 adcl %ebx, %eax
17850-DST( movl %ebx, 16(%edi) )
17851+DST( movl %ebx, %es:16(%edi) )
17852 adcl %edx, %eax
17853-DST( movl %edx, 20(%edi) )
17854+DST( movl %edx, %es:20(%edi) )
17855
17856 SRC( movl 24(%esi), %ebx )
17857 SRC( movl 28(%esi), %edx )
17858 adcl %ebx, %eax
17859-DST( movl %ebx, 24(%edi) )
17860+DST( movl %ebx, %es:24(%edi) )
17861 adcl %edx, %eax
17862-DST( movl %edx, 28(%edi) )
17863+DST( movl %edx, %es:28(%edi) )
17864
17865 lea 32(%esi), %esi
17866 lea 32(%edi), %edi
17867@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
17868 shrl $2, %edx # This clears CF
17869 SRC(3: movl (%esi), %ebx )
17870 adcl %ebx, %eax
17871-DST( movl %ebx, (%edi) )
17872+DST( movl %ebx, %es:(%edi) )
17873 lea 4(%esi), %esi
17874 lea 4(%edi), %edi
17875 dec %edx
17876@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
17877 jb 5f
17878 SRC( movw (%esi), %cx )
17879 leal 2(%esi), %esi
17880-DST( movw %cx, (%edi) )
17881+DST( movw %cx, %es:(%edi) )
17882 leal 2(%edi), %edi
17883 je 6f
17884 shll $16,%ecx
17885 SRC(5: movb (%esi), %cl )
17886-DST( movb %cl, (%edi) )
17887+DST( movb %cl, %es:(%edi) )
17888 6: addl %ecx, %eax
17889 adcl $0, %eax
17890 7:
17891@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
17892
17893 6001:
17894 movl ARGBASE+20(%esp), %ebx # src_err_ptr
17895- movl $-EFAULT, (%ebx)
17896+ movl $-EFAULT, %ss:(%ebx)
17897
17898 # zero the complete destination - computing the rest
17899 # is too much work
17900@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
17901
17902 6002:
17903 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17904- movl $-EFAULT,(%ebx)
17905+ movl $-EFAULT,%ss:(%ebx)
17906 jmp 5000b
17907
17908 .previous
17909
17910+ pushl_cfi %ss
17911+ popl_cfi %ds
17912+ pushl_cfi %ss
17913+ popl_cfi %es
17914 popl_cfi %ebx
17915 CFI_RESTORE ebx
17916 popl_cfi %esi
17917@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
17918 popl_cfi %ecx # equivalent to addl $4,%esp
17919 ret
17920 CFI_ENDPROC
17921-ENDPROC(csum_partial_copy_generic)
17922+ENDPROC(csum_partial_copy_generic_to_user)
17923
17924 #else
17925
17926 /* Version for PentiumII/PPro */
17927
17928 #define ROUND1(x) \
17929+ nop; nop; nop; \
17930 SRC(movl x(%esi), %ebx ) ; \
17931 addl %ebx, %eax ; \
17932- DST(movl %ebx, x(%edi) ) ;
17933+ DST(movl %ebx, %es:x(%edi)) ;
17934
17935 #define ROUND(x) \
17936+ nop; nop; nop; \
17937 SRC(movl x(%esi), %ebx ) ; \
17938 adcl %ebx, %eax ; \
17939- DST(movl %ebx, x(%edi) ) ;
17940+ DST(movl %ebx, %es:x(%edi)) ;
17941
17942 #define ARGBASE 12
17943-
17944-ENTRY(csum_partial_copy_generic)
17945+
17946+ENTRY(csum_partial_copy_generic_to_user)
17947 CFI_STARTPROC
17948+
17949+#ifdef CONFIG_PAX_MEMORY_UDEREF
17950+ pushl_cfi %gs
17951+ popl_cfi %es
17952+ jmp csum_partial_copy_generic
17953+#endif
17954+
17955+ENTRY(csum_partial_copy_generic_from_user)
17956+
17957+#ifdef CONFIG_PAX_MEMORY_UDEREF
17958+ pushl_cfi %gs
17959+ popl_cfi %ds
17960+#endif
17961+
17962+ENTRY(csum_partial_copy_generic)
17963 pushl_cfi %ebx
17964 CFI_REL_OFFSET ebx, 0
17965 pushl_cfi %edi
17966@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
17967 subl %ebx, %edi
17968 lea -1(%esi),%edx
17969 andl $-32,%edx
17970- lea 3f(%ebx,%ebx), %ebx
17971+ lea 3f(%ebx,%ebx,2), %ebx
17972 testl %esi, %esi
17973 jmp *%ebx
17974 1: addl $64,%esi
17975@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
17976 jb 5f
17977 SRC( movw (%esi), %dx )
17978 leal 2(%esi), %esi
17979-DST( movw %dx, (%edi) )
17980+DST( movw %dx, %es:(%edi) )
17981 leal 2(%edi), %edi
17982 je 6f
17983 shll $16,%edx
17984 5:
17985 SRC( movb (%esi), %dl )
17986-DST( movb %dl, (%edi) )
17987+DST( movb %dl, %es:(%edi) )
17988 6: addl %edx, %eax
17989 adcl $0, %eax
17990 7:
17991 .section .fixup, "ax"
17992 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
17993- movl $-EFAULT, (%ebx)
17994+ movl $-EFAULT, %ss:(%ebx)
17995 # zero the complete destination (computing the rest is too much work)
17996 movl ARGBASE+8(%esp),%edi # dst
17997 movl ARGBASE+12(%esp),%ecx # len
17998@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
17999 rep; stosb
18000 jmp 7b
18001 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18002- movl $-EFAULT, (%ebx)
18003+ movl $-EFAULT, %ss:(%ebx)
18004 jmp 7b
18005 .previous
18006
18007+#ifdef CONFIG_PAX_MEMORY_UDEREF
18008+ pushl_cfi %ss
18009+ popl_cfi %ds
18010+ pushl_cfi %ss
18011+ popl_cfi %es
18012+#endif
18013+
18014 popl_cfi %esi
18015 CFI_RESTORE esi
18016 popl_cfi %edi
18017@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
18018 CFI_RESTORE ebx
18019 ret
18020 CFI_ENDPROC
18021-ENDPROC(csum_partial_copy_generic)
18022+ENDPROC(csum_partial_copy_generic_to_user)
18023
18024 #undef ROUND
18025 #undef ROUND1
18026diff -urNp linux-3.1.1/arch/x86/lib/clear_page_64.S linux-3.1.1/arch/x86/lib/clear_page_64.S
18027--- linux-3.1.1/arch/x86/lib/clear_page_64.S 2011-11-11 15:19:27.000000000 -0500
18028+++ linux-3.1.1/arch/x86/lib/clear_page_64.S 2011-11-16 18:39:07.000000000 -0500
18029@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
18030 movl $4096/8,%ecx
18031 xorl %eax,%eax
18032 rep stosq
18033+ pax_force_retaddr
18034 ret
18035 CFI_ENDPROC
18036 ENDPROC(clear_page_c)
18037@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
18038 movl $4096,%ecx
18039 xorl %eax,%eax
18040 rep stosb
18041+ pax_force_retaddr
18042 ret
18043 CFI_ENDPROC
18044 ENDPROC(clear_page_c_e)
18045@@ -43,6 +45,7 @@ ENTRY(clear_page)
18046 leaq 64(%rdi),%rdi
18047 jnz .Lloop
18048 nop
18049+ pax_force_retaddr
18050 ret
18051 CFI_ENDPROC
18052 .Lclear_page_end:
18053@@ -58,7 +61,7 @@ ENDPROC(clear_page)
18054
18055 #include <asm/cpufeature.h>
18056
18057- .section .altinstr_replacement,"ax"
18058+ .section .altinstr_replacement,"a"
18059 1: .byte 0xeb /* jmp <disp8> */
18060 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
18061 2: .byte 0xeb /* jmp <disp8> */
18062diff -urNp linux-3.1.1/arch/x86/lib/cmpxchg16b_emu.S linux-3.1.1/arch/x86/lib/cmpxchg16b_emu.S
18063--- linux-3.1.1/arch/x86/lib/cmpxchg16b_emu.S 2011-11-11 15:19:27.000000000 -0500
18064+++ linux-3.1.1/arch/x86/lib/cmpxchg16b_emu.S 2011-11-16 18:39:07.000000000 -0500
18065@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
18066
18067 popf
18068 mov $1, %al
18069+ pax_force_retaddr
18070 ret
18071
18072 not_same:
18073 popf
18074 xor %al,%al
18075+ pax_force_retaddr
18076 ret
18077
18078 CFI_ENDPROC
18079diff -urNp linux-3.1.1/arch/x86/lib/copy_page_64.S linux-3.1.1/arch/x86/lib/copy_page_64.S
18080--- linux-3.1.1/arch/x86/lib/copy_page_64.S 2011-11-11 15:19:27.000000000 -0500
18081+++ linux-3.1.1/arch/x86/lib/copy_page_64.S 2011-11-16 18:39:07.000000000 -0500
18082@@ -9,6 +9,7 @@ copy_page_c:
18083 CFI_STARTPROC
18084 movl $4096/8,%ecx
18085 rep movsq
18086+ pax_force_retaddr
18087 ret
18088 CFI_ENDPROC
18089 ENDPROC(copy_page_c)
18090@@ -95,6 +96,7 @@ ENTRY(copy_page)
18091 CFI_RESTORE r13
18092 addq $3*8,%rsp
18093 CFI_ADJUST_CFA_OFFSET -3*8
18094+ pax_force_retaddr
18095 ret
18096 .Lcopy_page_end:
18097 CFI_ENDPROC
18098@@ -105,7 +107,7 @@ ENDPROC(copy_page)
18099
18100 #include <asm/cpufeature.h>
18101
18102- .section .altinstr_replacement,"ax"
18103+ .section .altinstr_replacement,"a"
18104 1: .byte 0xeb /* jmp <disp8> */
18105 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
18106 2:
18107diff -urNp linux-3.1.1/arch/x86/lib/copy_user_64.S linux-3.1.1/arch/x86/lib/copy_user_64.S
18108--- linux-3.1.1/arch/x86/lib/copy_user_64.S 2011-11-11 15:19:27.000000000 -0500
18109+++ linux-3.1.1/arch/x86/lib/copy_user_64.S 2011-11-16 18:39:07.000000000 -0500
18110@@ -16,6 +16,7 @@
18111 #include <asm/thread_info.h>
18112 #include <asm/cpufeature.h>
18113 #include <asm/alternative-asm.h>
18114+#include <asm/pgtable.h>
18115
18116 /*
18117 * By placing feature2 after feature1 in altinstructions section, we logically
18118@@ -29,7 +30,7 @@
18119 .byte 0xe9 /* 32bit jump */
18120 .long \orig-1f /* by default jump to orig */
18121 1:
18122- .section .altinstr_replacement,"ax"
18123+ .section .altinstr_replacement,"a"
18124 2: .byte 0xe9 /* near jump with 32bit immediate */
18125 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
18126 3: .byte 0xe9 /* near jump with 32bit immediate */
18127@@ -71,47 +72,20 @@
18128 #endif
18129 .endm
18130
18131-/* Standard copy_to_user with segment limit checking */
18132-ENTRY(_copy_to_user)
18133- CFI_STARTPROC
18134- GET_THREAD_INFO(%rax)
18135- movq %rdi,%rcx
18136- addq %rdx,%rcx
18137- jc bad_to_user
18138- cmpq TI_addr_limit(%rax),%rcx
18139- ja bad_to_user
18140- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
18141- copy_user_generic_unrolled,copy_user_generic_string, \
18142- copy_user_enhanced_fast_string
18143- CFI_ENDPROC
18144-ENDPROC(_copy_to_user)
18145-
18146-/* Standard copy_from_user with segment limit checking */
18147-ENTRY(_copy_from_user)
18148- CFI_STARTPROC
18149- GET_THREAD_INFO(%rax)
18150- movq %rsi,%rcx
18151- addq %rdx,%rcx
18152- jc bad_from_user
18153- cmpq TI_addr_limit(%rax),%rcx
18154- ja bad_from_user
18155- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
18156- copy_user_generic_unrolled,copy_user_generic_string, \
18157- copy_user_enhanced_fast_string
18158- CFI_ENDPROC
18159-ENDPROC(_copy_from_user)
18160-
18161 .section .fixup,"ax"
18162 /* must zero dest */
18163 ENTRY(bad_from_user)
18164 bad_from_user:
18165 CFI_STARTPROC
18166+ testl %edx,%edx
18167+ js bad_to_user
18168 movl %edx,%ecx
18169 xorl %eax,%eax
18170 rep
18171 stosb
18172 bad_to_user:
18173 movl %edx,%eax
18174+ pax_force_retaddr
18175 ret
18176 CFI_ENDPROC
18177 ENDPROC(bad_from_user)
18178@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
18179 decl %ecx
18180 jnz 21b
18181 23: xor %eax,%eax
18182+ pax_force_retaddr
18183 ret
18184
18185 .section .fixup,"ax"
18186@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
18187 3: rep
18188 movsb
18189 4: xorl %eax,%eax
18190+ pax_force_retaddr
18191 ret
18192
18193 .section .fixup,"ax"
18194@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
18195 1: rep
18196 movsb
18197 2: xorl %eax,%eax
18198+ pax_force_retaddr
18199 ret
18200
18201 .section .fixup,"ax"
18202diff -urNp linux-3.1.1/arch/x86/lib/copy_user_nocache_64.S linux-3.1.1/arch/x86/lib/copy_user_nocache_64.S
18203--- linux-3.1.1/arch/x86/lib/copy_user_nocache_64.S 2011-11-11 15:19:27.000000000 -0500
18204+++ linux-3.1.1/arch/x86/lib/copy_user_nocache_64.S 2011-11-16 18:39:07.000000000 -0500
18205@@ -8,12 +8,14 @@
18206
18207 #include <linux/linkage.h>
18208 #include <asm/dwarf2.h>
18209+#include <asm/alternative-asm.h>
18210
18211 #define FIX_ALIGNMENT 1
18212
18213 #include <asm/current.h>
18214 #include <asm/asm-offsets.h>
18215 #include <asm/thread_info.h>
18216+#include <asm/pgtable.h>
18217
18218 .macro ALIGN_DESTINATION
18219 #ifdef FIX_ALIGNMENT
18220@@ -50,6 +52,15 @@
18221 */
18222 ENTRY(__copy_user_nocache)
18223 CFI_STARTPROC
18224+
18225+#ifdef CONFIG_PAX_MEMORY_UDEREF
18226+ mov $PAX_USER_SHADOW_BASE,%rcx
18227+ cmp %rcx,%rsi
18228+ jae 1f
18229+ add %rcx,%rsi
18230+1:
18231+#endif
18232+
18233 cmpl $8,%edx
18234 jb 20f /* less then 8 bytes, go to byte copy loop */
18235 ALIGN_DESTINATION
18236@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
18237 jnz 21b
18238 23: xorl %eax,%eax
18239 sfence
18240+ pax_force_retaddr
18241 ret
18242
18243 .section .fixup,"ax"
18244diff -urNp linux-3.1.1/arch/x86/lib/csum-copy_64.S linux-3.1.1/arch/x86/lib/csum-copy_64.S
18245--- linux-3.1.1/arch/x86/lib/csum-copy_64.S 2011-11-11 15:19:27.000000000 -0500
18246+++ linux-3.1.1/arch/x86/lib/csum-copy_64.S 2011-11-16 18:39:07.000000000 -0500
18247@@ -8,6 +8,7 @@
18248 #include <linux/linkage.h>
18249 #include <asm/dwarf2.h>
18250 #include <asm/errno.h>
18251+#include <asm/alternative-asm.h>
18252
18253 /*
18254 * Checksum copy with exception handling.
18255@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
18256 CFI_RESTORE rbp
18257 addq $7*8, %rsp
18258 CFI_ADJUST_CFA_OFFSET -7*8
18259+ pax_force_retaddr
18260 ret
18261 CFI_RESTORE_STATE
18262
18263diff -urNp linux-3.1.1/arch/x86/lib/csum-wrappers_64.c linux-3.1.1/arch/x86/lib/csum-wrappers_64.c
18264--- linux-3.1.1/arch/x86/lib/csum-wrappers_64.c 2011-11-11 15:19:27.000000000 -0500
18265+++ linux-3.1.1/arch/x86/lib/csum-wrappers_64.c 2011-11-16 18:39:07.000000000 -0500
18266@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void _
18267 len -= 2;
18268 }
18269 }
18270- isum = csum_partial_copy_generic((__force const void *)src,
18271+
18272+#ifdef CONFIG_PAX_MEMORY_UDEREF
18273+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18274+ src += PAX_USER_SHADOW_BASE;
18275+#endif
18276+
18277+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
18278 dst, len, isum, errp, NULL);
18279 if (unlikely(*errp))
18280 goto out_err;
18281@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *sr
18282 }
18283
18284 *errp = 0;
18285- return csum_partial_copy_generic(src, (void __force *)dst,
18286+
18287+#ifdef CONFIG_PAX_MEMORY_UDEREF
18288+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
18289+ dst += PAX_USER_SHADOW_BASE;
18290+#endif
18291+
18292+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
18293 len, isum, NULL, errp);
18294 }
18295 EXPORT_SYMBOL(csum_partial_copy_to_user);
18296diff -urNp linux-3.1.1/arch/x86/lib/getuser.S linux-3.1.1/arch/x86/lib/getuser.S
18297--- linux-3.1.1/arch/x86/lib/getuser.S 2011-11-11 15:19:27.000000000 -0500
18298+++ linux-3.1.1/arch/x86/lib/getuser.S 2011-11-16 18:39:07.000000000 -0500
18299@@ -33,15 +33,38 @@
18300 #include <asm/asm-offsets.h>
18301 #include <asm/thread_info.h>
18302 #include <asm/asm.h>
18303+#include <asm/segment.h>
18304+#include <asm/pgtable.h>
18305+#include <asm/alternative-asm.h>
18306+
18307+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18308+#define __copyuser_seg gs;
18309+#else
18310+#define __copyuser_seg
18311+#endif
18312
18313 .text
18314 ENTRY(__get_user_1)
18315 CFI_STARTPROC
18316+
18317+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18318 GET_THREAD_INFO(%_ASM_DX)
18319 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18320 jae bad_get_user
18321-1: movzb (%_ASM_AX),%edx
18322+
18323+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18324+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18325+ cmp %_ASM_DX,%_ASM_AX
18326+ jae 1234f
18327+ add %_ASM_DX,%_ASM_AX
18328+1234:
18329+#endif
18330+
18331+#endif
18332+
18333+1: __copyuser_seg movzb (%_ASM_AX),%edx
18334 xor %eax,%eax
18335+ pax_force_retaddr
18336 ret
18337 CFI_ENDPROC
18338 ENDPROC(__get_user_1)
18339@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
18340 ENTRY(__get_user_2)
18341 CFI_STARTPROC
18342 add $1,%_ASM_AX
18343+
18344+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18345 jc bad_get_user
18346 GET_THREAD_INFO(%_ASM_DX)
18347 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18348 jae bad_get_user
18349-2: movzwl -1(%_ASM_AX),%edx
18350+
18351+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18352+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18353+ cmp %_ASM_DX,%_ASM_AX
18354+ jae 1234f
18355+ add %_ASM_DX,%_ASM_AX
18356+1234:
18357+#endif
18358+
18359+#endif
18360+
18361+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
18362 xor %eax,%eax
18363+ pax_force_retaddr
18364 ret
18365 CFI_ENDPROC
18366 ENDPROC(__get_user_2)
18367@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
18368 ENTRY(__get_user_4)
18369 CFI_STARTPROC
18370 add $3,%_ASM_AX
18371+
18372+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18373 jc bad_get_user
18374 GET_THREAD_INFO(%_ASM_DX)
18375 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18376 jae bad_get_user
18377-3: mov -3(%_ASM_AX),%edx
18378+
18379+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18380+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18381+ cmp %_ASM_DX,%_ASM_AX
18382+ jae 1234f
18383+ add %_ASM_DX,%_ASM_AX
18384+1234:
18385+#endif
18386+
18387+#endif
18388+
18389+3: __copyuser_seg mov -3(%_ASM_AX),%edx
18390 xor %eax,%eax
18391+ pax_force_retaddr
18392 ret
18393 CFI_ENDPROC
18394 ENDPROC(__get_user_4)
18395@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
18396 GET_THREAD_INFO(%_ASM_DX)
18397 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18398 jae bad_get_user
18399+
18400+#ifdef CONFIG_PAX_MEMORY_UDEREF
18401+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18402+ cmp %_ASM_DX,%_ASM_AX
18403+ jae 1234f
18404+ add %_ASM_DX,%_ASM_AX
18405+1234:
18406+#endif
18407+
18408 4: movq -7(%_ASM_AX),%_ASM_DX
18409 xor %eax,%eax
18410+ pax_force_retaddr
18411 ret
18412 CFI_ENDPROC
18413 ENDPROC(__get_user_8)
18414@@ -91,6 +152,7 @@ bad_get_user:
18415 CFI_STARTPROC
18416 xor %edx,%edx
18417 mov $(-EFAULT),%_ASM_AX
18418+ pax_force_retaddr
18419 ret
18420 CFI_ENDPROC
18421 END(bad_get_user)
18422diff -urNp linux-3.1.1/arch/x86/lib/insn.c linux-3.1.1/arch/x86/lib/insn.c
18423--- linux-3.1.1/arch/x86/lib/insn.c 2011-11-11 15:19:27.000000000 -0500
18424+++ linux-3.1.1/arch/x86/lib/insn.c 2011-11-16 18:39:07.000000000 -0500
18425@@ -21,6 +21,11 @@
18426 #include <linux/string.h>
18427 #include <asm/inat.h>
18428 #include <asm/insn.h>
18429+#ifdef __KERNEL__
18430+#include <asm/pgtable_types.h>
18431+#else
18432+#define ktla_ktva(addr) addr
18433+#endif
18434
18435 #define get_next(t, insn) \
18436 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
18437@@ -40,8 +45,8 @@
18438 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
18439 {
18440 memset(insn, 0, sizeof(*insn));
18441- insn->kaddr = kaddr;
18442- insn->next_byte = kaddr;
18443+ insn->kaddr = ktla_ktva(kaddr);
18444+ insn->next_byte = ktla_ktva(kaddr);
18445 insn->x86_64 = x86_64 ? 1 : 0;
18446 insn->opnd_bytes = 4;
18447 if (x86_64)
18448diff -urNp linux-3.1.1/arch/x86/lib/iomap_copy_64.S linux-3.1.1/arch/x86/lib/iomap_copy_64.S
18449--- linux-3.1.1/arch/x86/lib/iomap_copy_64.S 2011-11-11 15:19:27.000000000 -0500
18450+++ linux-3.1.1/arch/x86/lib/iomap_copy_64.S 2011-11-16 18:39:07.000000000 -0500
18451@@ -17,6 +17,7 @@
18452
18453 #include <linux/linkage.h>
18454 #include <asm/dwarf2.h>
18455+#include <asm/alternative-asm.h>
18456
18457 /*
18458 * override generic version in lib/iomap_copy.c
18459@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
18460 CFI_STARTPROC
18461 movl %edx,%ecx
18462 rep movsd
18463+ pax_force_retaddr
18464 ret
18465 CFI_ENDPROC
18466 ENDPROC(__iowrite32_copy)
18467diff -urNp linux-3.1.1/arch/x86/lib/memcpy_64.S linux-3.1.1/arch/x86/lib/memcpy_64.S
18468--- linux-3.1.1/arch/x86/lib/memcpy_64.S 2011-11-11 15:19:27.000000000 -0500
18469+++ linux-3.1.1/arch/x86/lib/memcpy_64.S 2011-11-16 18:39:07.000000000 -0500
18470@@ -34,6 +34,7 @@
18471 rep movsq
18472 movl %edx, %ecx
18473 rep movsb
18474+ pax_force_retaddr
18475 ret
18476 .Lmemcpy_e:
18477 .previous
18478@@ -51,6 +52,7 @@
18479
18480 movl %edx, %ecx
18481 rep movsb
18482+ pax_force_retaddr
18483 ret
18484 .Lmemcpy_e_e:
18485 .previous
18486@@ -141,6 +143,7 @@ ENTRY(memcpy)
18487 movq %r9, 1*8(%rdi)
18488 movq %r10, -2*8(%rdi, %rdx)
18489 movq %r11, -1*8(%rdi, %rdx)
18490+ pax_force_retaddr
18491 retq
18492 .p2align 4
18493 .Lless_16bytes:
18494@@ -153,6 +156,7 @@ ENTRY(memcpy)
18495 movq -1*8(%rsi, %rdx), %r9
18496 movq %r8, 0*8(%rdi)
18497 movq %r9, -1*8(%rdi, %rdx)
18498+ pax_force_retaddr
18499 retq
18500 .p2align 4
18501 .Lless_8bytes:
18502@@ -166,6 +170,7 @@ ENTRY(memcpy)
18503 movl -4(%rsi, %rdx), %r8d
18504 movl %ecx, (%rdi)
18505 movl %r8d, -4(%rdi, %rdx)
18506+ pax_force_retaddr
18507 retq
18508 .p2align 4
18509 .Lless_3bytes:
18510@@ -183,6 +188,7 @@ ENTRY(memcpy)
18511 jnz .Lloop_1
18512
18513 .Lend:
18514+ pax_force_retaddr
18515 retq
18516 CFI_ENDPROC
18517 ENDPROC(memcpy)
18518diff -urNp linux-3.1.1/arch/x86/lib/memmove_64.S linux-3.1.1/arch/x86/lib/memmove_64.S
18519--- linux-3.1.1/arch/x86/lib/memmove_64.S 2011-11-11 15:19:27.000000000 -0500
18520+++ linux-3.1.1/arch/x86/lib/memmove_64.S 2011-11-16 18:39:07.000000000 -0500
18521@@ -202,6 +202,7 @@ ENTRY(memmove)
18522 movb (%rsi), %r11b
18523 movb %r11b, (%rdi)
18524 13:
18525+ pax_force_retaddr
18526 retq
18527 CFI_ENDPROC
18528
18529@@ -210,6 +211,7 @@ ENTRY(memmove)
18530 /* Forward moving data. */
18531 movq %rdx, %rcx
18532 rep movsb
18533+ pax_force_retaddr
18534 retq
18535 .Lmemmove_end_forward_efs:
18536 .previous
18537diff -urNp linux-3.1.1/arch/x86/lib/memset_64.S linux-3.1.1/arch/x86/lib/memset_64.S
18538--- linux-3.1.1/arch/x86/lib/memset_64.S 2011-11-11 15:19:27.000000000 -0500
18539+++ linux-3.1.1/arch/x86/lib/memset_64.S 2011-11-16 18:39:07.000000000 -0500
18540@@ -31,6 +31,7 @@
18541 movl %r8d,%ecx
18542 rep stosb
18543 movq %r9,%rax
18544+ pax_force_retaddr
18545 ret
18546 .Lmemset_e:
18547 .previous
18548@@ -53,6 +54,7 @@
18549 movl %edx,%ecx
18550 rep stosb
18551 movq %r9,%rax
18552+ pax_force_retaddr
18553 ret
18554 .Lmemset_e_e:
18555 .previous
18556@@ -121,6 +123,7 @@ ENTRY(__memset)
18557
18558 .Lende:
18559 movq %r10,%rax
18560+ pax_force_retaddr
18561 ret
18562
18563 CFI_RESTORE_STATE
18564diff -urNp linux-3.1.1/arch/x86/lib/mmx_32.c linux-3.1.1/arch/x86/lib/mmx_32.c
18565--- linux-3.1.1/arch/x86/lib/mmx_32.c 2011-11-11 15:19:27.000000000 -0500
18566+++ linux-3.1.1/arch/x86/lib/mmx_32.c 2011-11-16 18:39:07.000000000 -0500
18567@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
18568 {
18569 void *p;
18570 int i;
18571+ unsigned long cr0;
18572
18573 if (unlikely(in_interrupt()))
18574 return __memcpy(to, from, len);
18575@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
18576 kernel_fpu_begin();
18577
18578 __asm__ __volatile__ (
18579- "1: prefetch (%0)\n" /* This set is 28 bytes */
18580- " prefetch 64(%0)\n"
18581- " prefetch 128(%0)\n"
18582- " prefetch 192(%0)\n"
18583- " prefetch 256(%0)\n"
18584+ "1: prefetch (%1)\n" /* This set is 28 bytes */
18585+ " prefetch 64(%1)\n"
18586+ " prefetch 128(%1)\n"
18587+ " prefetch 192(%1)\n"
18588+ " prefetch 256(%1)\n"
18589 "2: \n"
18590 ".section .fixup, \"ax\"\n"
18591- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18592+ "3: \n"
18593+
18594+#ifdef CONFIG_PAX_KERNEXEC
18595+ " movl %%cr0, %0\n"
18596+ " movl %0, %%eax\n"
18597+ " andl $0xFFFEFFFF, %%eax\n"
18598+ " movl %%eax, %%cr0\n"
18599+#endif
18600+
18601+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18602+
18603+#ifdef CONFIG_PAX_KERNEXEC
18604+ " movl %0, %%cr0\n"
18605+#endif
18606+
18607 " jmp 2b\n"
18608 ".previous\n"
18609 _ASM_EXTABLE(1b, 3b)
18610- : : "r" (from));
18611+ : "=&r" (cr0) : "r" (from) : "ax");
18612
18613 for ( ; i > 5; i--) {
18614 __asm__ __volatile__ (
18615- "1: prefetch 320(%0)\n"
18616- "2: movq (%0), %%mm0\n"
18617- " movq 8(%0), %%mm1\n"
18618- " movq 16(%0), %%mm2\n"
18619- " movq 24(%0), %%mm3\n"
18620- " movq %%mm0, (%1)\n"
18621- " movq %%mm1, 8(%1)\n"
18622- " movq %%mm2, 16(%1)\n"
18623- " movq %%mm3, 24(%1)\n"
18624- " movq 32(%0), %%mm0\n"
18625- " movq 40(%0), %%mm1\n"
18626- " movq 48(%0), %%mm2\n"
18627- " movq 56(%0), %%mm3\n"
18628- " movq %%mm0, 32(%1)\n"
18629- " movq %%mm1, 40(%1)\n"
18630- " movq %%mm2, 48(%1)\n"
18631- " movq %%mm3, 56(%1)\n"
18632+ "1: prefetch 320(%1)\n"
18633+ "2: movq (%1), %%mm0\n"
18634+ " movq 8(%1), %%mm1\n"
18635+ " movq 16(%1), %%mm2\n"
18636+ " movq 24(%1), %%mm3\n"
18637+ " movq %%mm0, (%2)\n"
18638+ " movq %%mm1, 8(%2)\n"
18639+ " movq %%mm2, 16(%2)\n"
18640+ " movq %%mm3, 24(%2)\n"
18641+ " movq 32(%1), %%mm0\n"
18642+ " movq 40(%1), %%mm1\n"
18643+ " movq 48(%1), %%mm2\n"
18644+ " movq 56(%1), %%mm3\n"
18645+ " movq %%mm0, 32(%2)\n"
18646+ " movq %%mm1, 40(%2)\n"
18647+ " movq %%mm2, 48(%2)\n"
18648+ " movq %%mm3, 56(%2)\n"
18649 ".section .fixup, \"ax\"\n"
18650- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18651+ "3:\n"
18652+
18653+#ifdef CONFIG_PAX_KERNEXEC
18654+ " movl %%cr0, %0\n"
18655+ " movl %0, %%eax\n"
18656+ " andl $0xFFFEFFFF, %%eax\n"
18657+ " movl %%eax, %%cr0\n"
18658+#endif
18659+
18660+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18661+
18662+#ifdef CONFIG_PAX_KERNEXEC
18663+ " movl %0, %%cr0\n"
18664+#endif
18665+
18666 " jmp 2b\n"
18667 ".previous\n"
18668 _ASM_EXTABLE(1b, 3b)
18669- : : "r" (from), "r" (to) : "memory");
18670+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18671
18672 from += 64;
18673 to += 64;
18674@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
18675 static void fast_copy_page(void *to, void *from)
18676 {
18677 int i;
18678+ unsigned long cr0;
18679
18680 kernel_fpu_begin();
18681
18682@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
18683 * but that is for later. -AV
18684 */
18685 __asm__ __volatile__(
18686- "1: prefetch (%0)\n"
18687- " prefetch 64(%0)\n"
18688- " prefetch 128(%0)\n"
18689- " prefetch 192(%0)\n"
18690- " prefetch 256(%0)\n"
18691+ "1: prefetch (%1)\n"
18692+ " prefetch 64(%1)\n"
18693+ " prefetch 128(%1)\n"
18694+ " prefetch 192(%1)\n"
18695+ " prefetch 256(%1)\n"
18696 "2: \n"
18697 ".section .fixup, \"ax\"\n"
18698- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18699+ "3: \n"
18700+
18701+#ifdef CONFIG_PAX_KERNEXEC
18702+ " movl %%cr0, %0\n"
18703+ " movl %0, %%eax\n"
18704+ " andl $0xFFFEFFFF, %%eax\n"
18705+ " movl %%eax, %%cr0\n"
18706+#endif
18707+
18708+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18709+
18710+#ifdef CONFIG_PAX_KERNEXEC
18711+ " movl %0, %%cr0\n"
18712+#endif
18713+
18714 " jmp 2b\n"
18715 ".previous\n"
18716- _ASM_EXTABLE(1b, 3b) : : "r" (from));
18717+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18718
18719 for (i = 0; i < (4096-320)/64; i++) {
18720 __asm__ __volatile__ (
18721- "1: prefetch 320(%0)\n"
18722- "2: movq (%0), %%mm0\n"
18723- " movntq %%mm0, (%1)\n"
18724- " movq 8(%0), %%mm1\n"
18725- " movntq %%mm1, 8(%1)\n"
18726- " movq 16(%0), %%mm2\n"
18727- " movntq %%mm2, 16(%1)\n"
18728- " movq 24(%0), %%mm3\n"
18729- " movntq %%mm3, 24(%1)\n"
18730- " movq 32(%0), %%mm4\n"
18731- " movntq %%mm4, 32(%1)\n"
18732- " movq 40(%0), %%mm5\n"
18733- " movntq %%mm5, 40(%1)\n"
18734- " movq 48(%0), %%mm6\n"
18735- " movntq %%mm6, 48(%1)\n"
18736- " movq 56(%0), %%mm7\n"
18737- " movntq %%mm7, 56(%1)\n"
18738+ "1: prefetch 320(%1)\n"
18739+ "2: movq (%1), %%mm0\n"
18740+ " movntq %%mm0, (%2)\n"
18741+ " movq 8(%1), %%mm1\n"
18742+ " movntq %%mm1, 8(%2)\n"
18743+ " movq 16(%1), %%mm2\n"
18744+ " movntq %%mm2, 16(%2)\n"
18745+ " movq 24(%1), %%mm3\n"
18746+ " movntq %%mm3, 24(%2)\n"
18747+ " movq 32(%1), %%mm4\n"
18748+ " movntq %%mm4, 32(%2)\n"
18749+ " movq 40(%1), %%mm5\n"
18750+ " movntq %%mm5, 40(%2)\n"
18751+ " movq 48(%1), %%mm6\n"
18752+ " movntq %%mm6, 48(%2)\n"
18753+ " movq 56(%1), %%mm7\n"
18754+ " movntq %%mm7, 56(%2)\n"
18755 ".section .fixup, \"ax\"\n"
18756- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18757+ "3:\n"
18758+
18759+#ifdef CONFIG_PAX_KERNEXEC
18760+ " movl %%cr0, %0\n"
18761+ " movl %0, %%eax\n"
18762+ " andl $0xFFFEFFFF, %%eax\n"
18763+ " movl %%eax, %%cr0\n"
18764+#endif
18765+
18766+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18767+
18768+#ifdef CONFIG_PAX_KERNEXEC
18769+ " movl %0, %%cr0\n"
18770+#endif
18771+
18772 " jmp 2b\n"
18773 ".previous\n"
18774- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
18775+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18776
18777 from += 64;
18778 to += 64;
18779@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
18780 static void fast_copy_page(void *to, void *from)
18781 {
18782 int i;
18783+ unsigned long cr0;
18784
18785 kernel_fpu_begin();
18786
18787 __asm__ __volatile__ (
18788- "1: prefetch (%0)\n"
18789- " prefetch 64(%0)\n"
18790- " prefetch 128(%0)\n"
18791- " prefetch 192(%0)\n"
18792- " prefetch 256(%0)\n"
18793+ "1: prefetch (%1)\n"
18794+ " prefetch 64(%1)\n"
18795+ " prefetch 128(%1)\n"
18796+ " prefetch 192(%1)\n"
18797+ " prefetch 256(%1)\n"
18798 "2: \n"
18799 ".section .fixup, \"ax\"\n"
18800- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18801+ "3: \n"
18802+
18803+#ifdef CONFIG_PAX_KERNEXEC
18804+ " movl %%cr0, %0\n"
18805+ " movl %0, %%eax\n"
18806+ " andl $0xFFFEFFFF, %%eax\n"
18807+ " movl %%eax, %%cr0\n"
18808+#endif
18809+
18810+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18811+
18812+#ifdef CONFIG_PAX_KERNEXEC
18813+ " movl %0, %%cr0\n"
18814+#endif
18815+
18816 " jmp 2b\n"
18817 ".previous\n"
18818- _ASM_EXTABLE(1b, 3b) : : "r" (from));
18819+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18820
18821 for (i = 0; i < 4096/64; i++) {
18822 __asm__ __volatile__ (
18823- "1: prefetch 320(%0)\n"
18824- "2: movq (%0), %%mm0\n"
18825- " movq 8(%0), %%mm1\n"
18826- " movq 16(%0), %%mm2\n"
18827- " movq 24(%0), %%mm3\n"
18828- " movq %%mm0, (%1)\n"
18829- " movq %%mm1, 8(%1)\n"
18830- " movq %%mm2, 16(%1)\n"
18831- " movq %%mm3, 24(%1)\n"
18832- " movq 32(%0), %%mm0\n"
18833- " movq 40(%0), %%mm1\n"
18834- " movq 48(%0), %%mm2\n"
18835- " movq 56(%0), %%mm3\n"
18836- " movq %%mm0, 32(%1)\n"
18837- " movq %%mm1, 40(%1)\n"
18838- " movq %%mm2, 48(%1)\n"
18839- " movq %%mm3, 56(%1)\n"
18840+ "1: prefetch 320(%1)\n"
18841+ "2: movq (%1), %%mm0\n"
18842+ " movq 8(%1), %%mm1\n"
18843+ " movq 16(%1), %%mm2\n"
18844+ " movq 24(%1), %%mm3\n"
18845+ " movq %%mm0, (%2)\n"
18846+ " movq %%mm1, 8(%2)\n"
18847+ " movq %%mm2, 16(%2)\n"
18848+ " movq %%mm3, 24(%2)\n"
18849+ " movq 32(%1), %%mm0\n"
18850+ " movq 40(%1), %%mm1\n"
18851+ " movq 48(%1), %%mm2\n"
18852+ " movq 56(%1), %%mm3\n"
18853+ " movq %%mm0, 32(%2)\n"
18854+ " movq %%mm1, 40(%2)\n"
18855+ " movq %%mm2, 48(%2)\n"
18856+ " movq %%mm3, 56(%2)\n"
18857 ".section .fixup, \"ax\"\n"
18858- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18859+ "3:\n"
18860+
18861+#ifdef CONFIG_PAX_KERNEXEC
18862+ " movl %%cr0, %0\n"
18863+ " movl %0, %%eax\n"
18864+ " andl $0xFFFEFFFF, %%eax\n"
18865+ " movl %%eax, %%cr0\n"
18866+#endif
18867+
18868+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18869+
18870+#ifdef CONFIG_PAX_KERNEXEC
18871+ " movl %0, %%cr0\n"
18872+#endif
18873+
18874 " jmp 2b\n"
18875 ".previous\n"
18876 _ASM_EXTABLE(1b, 3b)
18877- : : "r" (from), "r" (to) : "memory");
18878+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18879
18880 from += 64;
18881 to += 64;
18882diff -urNp linux-3.1.1/arch/x86/lib/msr-reg.S linux-3.1.1/arch/x86/lib/msr-reg.S
18883--- linux-3.1.1/arch/x86/lib/msr-reg.S 2011-11-11 15:19:27.000000000 -0500
18884+++ linux-3.1.1/arch/x86/lib/msr-reg.S 2011-11-16 18:39:07.000000000 -0500
18885@@ -3,6 +3,7 @@
18886 #include <asm/dwarf2.h>
18887 #include <asm/asm.h>
18888 #include <asm/msr.h>
18889+#include <asm/alternative-asm.h>
18890
18891 #ifdef CONFIG_X86_64
18892 /*
18893@@ -37,6 +38,7 @@ ENTRY(native_\op\()_safe_regs)
18894 movl %edi, 28(%r10)
18895 popq_cfi %rbp
18896 popq_cfi %rbx
18897+ pax_force_retaddr
18898 ret
18899 3:
18900 CFI_RESTORE_STATE
18901diff -urNp linux-3.1.1/arch/x86/lib/putuser.S linux-3.1.1/arch/x86/lib/putuser.S
18902--- linux-3.1.1/arch/x86/lib/putuser.S 2011-11-11 15:19:27.000000000 -0500
18903+++ linux-3.1.1/arch/x86/lib/putuser.S 2011-11-16 18:39:07.000000000 -0500
18904@@ -15,7 +15,9 @@
18905 #include <asm/thread_info.h>
18906 #include <asm/errno.h>
18907 #include <asm/asm.h>
18908-
18909+#include <asm/segment.h>
18910+#include <asm/pgtable.h>
18911+#include <asm/alternative-asm.h>
18912
18913 /*
18914 * __put_user_X
18915@@ -29,52 +31,119 @@
18916 * as they get called from within inline assembly.
18917 */
18918
18919-#define ENTER CFI_STARTPROC ; \
18920- GET_THREAD_INFO(%_ASM_BX)
18921-#define EXIT ret ; \
18922+#define ENTER CFI_STARTPROC
18923+#define EXIT pax_force_retaddr; ret ; \
18924 CFI_ENDPROC
18925
18926+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18927+#define _DEST %_ASM_CX,%_ASM_BX
18928+#else
18929+#define _DEST %_ASM_CX
18930+#endif
18931+
18932+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18933+#define __copyuser_seg gs;
18934+#else
18935+#define __copyuser_seg
18936+#endif
18937+
18938 .text
18939 ENTRY(__put_user_1)
18940 ENTER
18941+
18942+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18943+ GET_THREAD_INFO(%_ASM_BX)
18944 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
18945 jae bad_put_user
18946-1: movb %al,(%_ASM_CX)
18947+
18948+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18949+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18950+ cmp %_ASM_BX,%_ASM_CX
18951+ jb 1234f
18952+ xor %ebx,%ebx
18953+1234:
18954+#endif
18955+
18956+#endif
18957+
18958+1: __copyuser_seg movb %al,(_DEST)
18959 xor %eax,%eax
18960 EXIT
18961 ENDPROC(__put_user_1)
18962
18963 ENTRY(__put_user_2)
18964 ENTER
18965+
18966+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18967+ GET_THREAD_INFO(%_ASM_BX)
18968 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18969 sub $1,%_ASM_BX
18970 cmp %_ASM_BX,%_ASM_CX
18971 jae bad_put_user
18972-2: movw %ax,(%_ASM_CX)
18973+
18974+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18975+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18976+ cmp %_ASM_BX,%_ASM_CX
18977+ jb 1234f
18978+ xor %ebx,%ebx
18979+1234:
18980+#endif
18981+
18982+#endif
18983+
18984+2: __copyuser_seg movw %ax,(_DEST)
18985 xor %eax,%eax
18986 EXIT
18987 ENDPROC(__put_user_2)
18988
18989 ENTRY(__put_user_4)
18990 ENTER
18991+
18992+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18993+ GET_THREAD_INFO(%_ASM_BX)
18994 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18995 sub $3,%_ASM_BX
18996 cmp %_ASM_BX,%_ASM_CX
18997 jae bad_put_user
18998-3: movl %eax,(%_ASM_CX)
18999+
19000+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19001+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19002+ cmp %_ASM_BX,%_ASM_CX
19003+ jb 1234f
19004+ xor %ebx,%ebx
19005+1234:
19006+#endif
19007+
19008+#endif
19009+
19010+3: __copyuser_seg movl %eax,(_DEST)
19011 xor %eax,%eax
19012 EXIT
19013 ENDPROC(__put_user_4)
19014
19015 ENTRY(__put_user_8)
19016 ENTER
19017+
19018+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19019+ GET_THREAD_INFO(%_ASM_BX)
19020 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19021 sub $7,%_ASM_BX
19022 cmp %_ASM_BX,%_ASM_CX
19023 jae bad_put_user
19024-4: mov %_ASM_AX,(%_ASM_CX)
19025+
19026+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19027+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19028+ cmp %_ASM_BX,%_ASM_CX
19029+ jb 1234f
19030+ xor %ebx,%ebx
19031+1234:
19032+#endif
19033+
19034+#endif
19035+
19036+4: __copyuser_seg mov %_ASM_AX,(_DEST)
19037 #ifdef CONFIG_X86_32
19038-5: movl %edx,4(%_ASM_CX)
19039+5: __copyuser_seg movl %edx,4(_DEST)
19040 #endif
19041 xor %eax,%eax
19042 EXIT
19043diff -urNp linux-3.1.1/arch/x86/lib/rwlock.S linux-3.1.1/arch/x86/lib/rwlock.S
19044--- linux-3.1.1/arch/x86/lib/rwlock.S 2011-11-11 15:19:27.000000000 -0500
19045+++ linux-3.1.1/arch/x86/lib/rwlock.S 2011-11-16 18:39:07.000000000 -0500
19046@@ -23,6 +23,7 @@ ENTRY(__write_lock_failed)
19047 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
19048 jnz 0b
19049 ENDFRAME
19050+ pax_force_retaddr
19051 ret
19052 CFI_ENDPROC
19053 END(__write_lock_failed)
19054@@ -39,6 +40,7 @@ ENTRY(__read_lock_failed)
19055 READ_LOCK_SIZE(dec) (%__lock_ptr)
19056 js 0b
19057 ENDFRAME
19058+ pax_force_retaddr
19059 ret
19060 CFI_ENDPROC
19061 END(__read_lock_failed)
19062diff -urNp linux-3.1.1/arch/x86/lib/rwsem.S linux-3.1.1/arch/x86/lib/rwsem.S
19063--- linux-3.1.1/arch/x86/lib/rwsem.S 2011-11-11 15:19:27.000000000 -0500
19064+++ linux-3.1.1/arch/x86/lib/rwsem.S 2011-11-16 18:39:07.000000000 -0500
19065@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
19066 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
19067 CFI_RESTORE __ASM_REG(dx)
19068 restore_common_regs
19069+ pax_force_retaddr
19070 ret
19071 CFI_ENDPROC
19072 ENDPROC(call_rwsem_down_read_failed)
19073@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
19074 movq %rax,%rdi
19075 call rwsem_down_write_failed
19076 restore_common_regs
19077+ pax_force_retaddr
19078 ret
19079 CFI_ENDPROC
19080 ENDPROC(call_rwsem_down_write_failed)
19081@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
19082 movq %rax,%rdi
19083 call rwsem_wake
19084 restore_common_regs
19085-1: ret
19086+1: pax_force_retaddr
19087+ ret
19088 CFI_ENDPROC
19089 ENDPROC(call_rwsem_wake)
19090
19091@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
19092 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
19093 CFI_RESTORE __ASM_REG(dx)
19094 restore_common_regs
19095+ pax_force_retaddr
19096 ret
19097 CFI_ENDPROC
19098 ENDPROC(call_rwsem_downgrade_wake)
19099diff -urNp linux-3.1.1/arch/x86/lib/thunk_64.S linux-3.1.1/arch/x86/lib/thunk_64.S
19100--- linux-3.1.1/arch/x86/lib/thunk_64.S 2011-11-11 15:19:27.000000000 -0500
19101+++ linux-3.1.1/arch/x86/lib/thunk_64.S 2011-11-16 18:39:07.000000000 -0500
19102@@ -8,6 +8,7 @@
19103 #include <linux/linkage.h>
19104 #include <asm/dwarf2.h>
19105 #include <asm/calling.h>
19106+#include <asm/alternative-asm.h>
19107
19108 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
19109 .macro THUNK name, func, put_ret_addr_in_rdi=0
19110@@ -41,5 +42,6 @@
19111 SAVE_ARGS
19112 restore:
19113 RESTORE_ARGS
19114+ pax_force_retaddr
19115 ret
19116 CFI_ENDPROC
19117diff -urNp linux-3.1.1/arch/x86/lib/usercopy_32.c linux-3.1.1/arch/x86/lib/usercopy_32.c
19118--- linux-3.1.1/arch/x86/lib/usercopy_32.c 2011-11-11 15:19:27.000000000 -0500
19119+++ linux-3.1.1/arch/x86/lib/usercopy_32.c 2011-11-16 18:39:07.000000000 -0500
19120@@ -43,7 +43,7 @@ do { \
19121 __asm__ __volatile__( \
19122 " testl %1,%1\n" \
19123 " jz 2f\n" \
19124- "0: lodsb\n" \
19125+ "0: "__copyuser_seg"lodsb\n" \
19126 " stosb\n" \
19127 " testb %%al,%%al\n" \
19128 " jz 1f\n" \
19129@@ -128,10 +128,12 @@ do { \
19130 int __d0; \
19131 might_fault(); \
19132 __asm__ __volatile__( \
19133+ __COPYUSER_SET_ES \
19134 "0: rep; stosl\n" \
19135 " movl %2,%0\n" \
19136 "1: rep; stosb\n" \
19137 "2:\n" \
19138+ __COPYUSER_RESTORE_ES \
19139 ".section .fixup,\"ax\"\n" \
19140 "3: lea 0(%2,%0,4),%0\n" \
19141 " jmp 2b\n" \
19142@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
19143 might_fault();
19144
19145 __asm__ __volatile__(
19146+ __COPYUSER_SET_ES
19147 " testl %0, %0\n"
19148 " jz 3f\n"
19149 " andl %0,%%ecx\n"
19150@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
19151 " subl %%ecx,%0\n"
19152 " addl %0,%%eax\n"
19153 "1:\n"
19154+ __COPYUSER_RESTORE_ES
19155 ".section .fixup,\"ax\"\n"
19156 "2: xorl %%eax,%%eax\n"
19157 " jmp 1b\n"
19158@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
19159
19160 #ifdef CONFIG_X86_INTEL_USERCOPY
19161 static unsigned long
19162-__copy_user_intel(void __user *to, const void *from, unsigned long size)
19163+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
19164 {
19165 int d0, d1;
19166 __asm__ __volatile__(
19167@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
19168 " .align 2,0x90\n"
19169 "3: movl 0(%4), %%eax\n"
19170 "4: movl 4(%4), %%edx\n"
19171- "5: movl %%eax, 0(%3)\n"
19172- "6: movl %%edx, 4(%3)\n"
19173+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
19174+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
19175 "7: movl 8(%4), %%eax\n"
19176 "8: movl 12(%4),%%edx\n"
19177- "9: movl %%eax, 8(%3)\n"
19178- "10: movl %%edx, 12(%3)\n"
19179+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
19180+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
19181 "11: movl 16(%4), %%eax\n"
19182 "12: movl 20(%4), %%edx\n"
19183- "13: movl %%eax, 16(%3)\n"
19184- "14: movl %%edx, 20(%3)\n"
19185+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
19186+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
19187 "15: movl 24(%4), %%eax\n"
19188 "16: movl 28(%4), %%edx\n"
19189- "17: movl %%eax, 24(%3)\n"
19190- "18: movl %%edx, 28(%3)\n"
19191+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
19192+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
19193 "19: movl 32(%4), %%eax\n"
19194 "20: movl 36(%4), %%edx\n"
19195- "21: movl %%eax, 32(%3)\n"
19196- "22: movl %%edx, 36(%3)\n"
19197+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
19198+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
19199 "23: movl 40(%4), %%eax\n"
19200 "24: movl 44(%4), %%edx\n"
19201- "25: movl %%eax, 40(%3)\n"
19202- "26: movl %%edx, 44(%3)\n"
19203+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
19204+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
19205 "27: movl 48(%4), %%eax\n"
19206 "28: movl 52(%4), %%edx\n"
19207- "29: movl %%eax, 48(%3)\n"
19208- "30: movl %%edx, 52(%3)\n"
19209+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
19210+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
19211 "31: movl 56(%4), %%eax\n"
19212 "32: movl 60(%4), %%edx\n"
19213- "33: movl %%eax, 56(%3)\n"
19214- "34: movl %%edx, 60(%3)\n"
19215+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
19216+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
19217 " addl $-64, %0\n"
19218 " addl $64, %4\n"
19219 " addl $64, %3\n"
19220@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
19221 " shrl $2, %0\n"
19222 " andl $3, %%eax\n"
19223 " cld\n"
19224+ __COPYUSER_SET_ES
19225 "99: rep; movsl\n"
19226 "36: movl %%eax, %0\n"
19227 "37: rep; movsb\n"
19228 "100:\n"
19229+ __COPYUSER_RESTORE_ES
19230+ ".section .fixup,\"ax\"\n"
19231+ "101: lea 0(%%eax,%0,4),%0\n"
19232+ " jmp 100b\n"
19233+ ".previous\n"
19234+ ".section __ex_table,\"a\"\n"
19235+ " .align 4\n"
19236+ " .long 1b,100b\n"
19237+ " .long 2b,100b\n"
19238+ " .long 3b,100b\n"
19239+ " .long 4b,100b\n"
19240+ " .long 5b,100b\n"
19241+ " .long 6b,100b\n"
19242+ " .long 7b,100b\n"
19243+ " .long 8b,100b\n"
19244+ " .long 9b,100b\n"
19245+ " .long 10b,100b\n"
19246+ " .long 11b,100b\n"
19247+ " .long 12b,100b\n"
19248+ " .long 13b,100b\n"
19249+ " .long 14b,100b\n"
19250+ " .long 15b,100b\n"
19251+ " .long 16b,100b\n"
19252+ " .long 17b,100b\n"
19253+ " .long 18b,100b\n"
19254+ " .long 19b,100b\n"
19255+ " .long 20b,100b\n"
19256+ " .long 21b,100b\n"
19257+ " .long 22b,100b\n"
19258+ " .long 23b,100b\n"
19259+ " .long 24b,100b\n"
19260+ " .long 25b,100b\n"
19261+ " .long 26b,100b\n"
19262+ " .long 27b,100b\n"
19263+ " .long 28b,100b\n"
19264+ " .long 29b,100b\n"
19265+ " .long 30b,100b\n"
19266+ " .long 31b,100b\n"
19267+ " .long 32b,100b\n"
19268+ " .long 33b,100b\n"
19269+ " .long 34b,100b\n"
19270+ " .long 35b,100b\n"
19271+ " .long 36b,100b\n"
19272+ " .long 37b,100b\n"
19273+ " .long 99b,101b\n"
19274+ ".previous"
19275+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
19276+ : "1"(to), "2"(from), "0"(size)
19277+ : "eax", "edx", "memory");
19278+ return size;
19279+}
19280+
19281+static unsigned long
19282+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
19283+{
19284+ int d0, d1;
19285+ __asm__ __volatile__(
19286+ " .align 2,0x90\n"
19287+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
19288+ " cmpl $67, %0\n"
19289+ " jbe 3f\n"
19290+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
19291+ " .align 2,0x90\n"
19292+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
19293+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
19294+ "5: movl %%eax, 0(%3)\n"
19295+ "6: movl %%edx, 4(%3)\n"
19296+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
19297+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
19298+ "9: movl %%eax, 8(%3)\n"
19299+ "10: movl %%edx, 12(%3)\n"
19300+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
19301+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
19302+ "13: movl %%eax, 16(%3)\n"
19303+ "14: movl %%edx, 20(%3)\n"
19304+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
19305+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
19306+ "17: movl %%eax, 24(%3)\n"
19307+ "18: movl %%edx, 28(%3)\n"
19308+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
19309+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
19310+ "21: movl %%eax, 32(%3)\n"
19311+ "22: movl %%edx, 36(%3)\n"
19312+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
19313+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
19314+ "25: movl %%eax, 40(%3)\n"
19315+ "26: movl %%edx, 44(%3)\n"
19316+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
19317+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
19318+ "29: movl %%eax, 48(%3)\n"
19319+ "30: movl %%edx, 52(%3)\n"
19320+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
19321+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
19322+ "33: movl %%eax, 56(%3)\n"
19323+ "34: movl %%edx, 60(%3)\n"
19324+ " addl $-64, %0\n"
19325+ " addl $64, %4\n"
19326+ " addl $64, %3\n"
19327+ " cmpl $63, %0\n"
19328+ " ja 1b\n"
19329+ "35: movl %0, %%eax\n"
19330+ " shrl $2, %0\n"
19331+ " andl $3, %%eax\n"
19332+ " cld\n"
19333+ "99: rep; "__copyuser_seg" movsl\n"
19334+ "36: movl %%eax, %0\n"
19335+ "37: rep; "__copyuser_seg" movsb\n"
19336+ "100:\n"
19337 ".section .fixup,\"ax\"\n"
19338 "101: lea 0(%%eax,%0,4),%0\n"
19339 " jmp 100b\n"
19340@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
19341 int d0, d1;
19342 __asm__ __volatile__(
19343 " .align 2,0x90\n"
19344- "0: movl 32(%4), %%eax\n"
19345+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19346 " cmpl $67, %0\n"
19347 " jbe 2f\n"
19348- "1: movl 64(%4), %%eax\n"
19349+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19350 " .align 2,0x90\n"
19351- "2: movl 0(%4), %%eax\n"
19352- "21: movl 4(%4), %%edx\n"
19353+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19354+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19355 " movl %%eax, 0(%3)\n"
19356 " movl %%edx, 4(%3)\n"
19357- "3: movl 8(%4), %%eax\n"
19358- "31: movl 12(%4),%%edx\n"
19359+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19360+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19361 " movl %%eax, 8(%3)\n"
19362 " movl %%edx, 12(%3)\n"
19363- "4: movl 16(%4), %%eax\n"
19364- "41: movl 20(%4), %%edx\n"
19365+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19366+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19367 " movl %%eax, 16(%3)\n"
19368 " movl %%edx, 20(%3)\n"
19369- "10: movl 24(%4), %%eax\n"
19370- "51: movl 28(%4), %%edx\n"
19371+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19372+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19373 " movl %%eax, 24(%3)\n"
19374 " movl %%edx, 28(%3)\n"
19375- "11: movl 32(%4), %%eax\n"
19376- "61: movl 36(%4), %%edx\n"
19377+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19378+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19379 " movl %%eax, 32(%3)\n"
19380 " movl %%edx, 36(%3)\n"
19381- "12: movl 40(%4), %%eax\n"
19382- "71: movl 44(%4), %%edx\n"
19383+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19384+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19385 " movl %%eax, 40(%3)\n"
19386 " movl %%edx, 44(%3)\n"
19387- "13: movl 48(%4), %%eax\n"
19388- "81: movl 52(%4), %%edx\n"
19389+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19390+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19391 " movl %%eax, 48(%3)\n"
19392 " movl %%edx, 52(%3)\n"
19393- "14: movl 56(%4), %%eax\n"
19394- "91: movl 60(%4), %%edx\n"
19395+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19396+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19397 " movl %%eax, 56(%3)\n"
19398 " movl %%edx, 60(%3)\n"
19399 " addl $-64, %0\n"
19400@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
19401 " shrl $2, %0\n"
19402 " andl $3, %%eax\n"
19403 " cld\n"
19404- "6: rep; movsl\n"
19405+ "6: rep; "__copyuser_seg" movsl\n"
19406 " movl %%eax,%0\n"
19407- "7: rep; movsb\n"
19408+ "7: rep; "__copyuser_seg" movsb\n"
19409 "8:\n"
19410 ".section .fixup,\"ax\"\n"
19411 "9: lea 0(%%eax,%0,4),%0\n"
19412@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
19413
19414 __asm__ __volatile__(
19415 " .align 2,0x90\n"
19416- "0: movl 32(%4), %%eax\n"
19417+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19418 " cmpl $67, %0\n"
19419 " jbe 2f\n"
19420- "1: movl 64(%4), %%eax\n"
19421+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19422 " .align 2,0x90\n"
19423- "2: movl 0(%4), %%eax\n"
19424- "21: movl 4(%4), %%edx\n"
19425+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19426+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19427 " movnti %%eax, 0(%3)\n"
19428 " movnti %%edx, 4(%3)\n"
19429- "3: movl 8(%4), %%eax\n"
19430- "31: movl 12(%4),%%edx\n"
19431+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19432+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19433 " movnti %%eax, 8(%3)\n"
19434 " movnti %%edx, 12(%3)\n"
19435- "4: movl 16(%4), %%eax\n"
19436- "41: movl 20(%4), %%edx\n"
19437+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19438+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19439 " movnti %%eax, 16(%3)\n"
19440 " movnti %%edx, 20(%3)\n"
19441- "10: movl 24(%4), %%eax\n"
19442- "51: movl 28(%4), %%edx\n"
19443+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19444+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19445 " movnti %%eax, 24(%3)\n"
19446 " movnti %%edx, 28(%3)\n"
19447- "11: movl 32(%4), %%eax\n"
19448- "61: movl 36(%4), %%edx\n"
19449+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19450+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19451 " movnti %%eax, 32(%3)\n"
19452 " movnti %%edx, 36(%3)\n"
19453- "12: movl 40(%4), %%eax\n"
19454- "71: movl 44(%4), %%edx\n"
19455+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19456+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19457 " movnti %%eax, 40(%3)\n"
19458 " movnti %%edx, 44(%3)\n"
19459- "13: movl 48(%4), %%eax\n"
19460- "81: movl 52(%4), %%edx\n"
19461+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19462+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19463 " movnti %%eax, 48(%3)\n"
19464 " movnti %%edx, 52(%3)\n"
19465- "14: movl 56(%4), %%eax\n"
19466- "91: movl 60(%4), %%edx\n"
19467+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19468+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19469 " movnti %%eax, 56(%3)\n"
19470 " movnti %%edx, 60(%3)\n"
19471 " addl $-64, %0\n"
19472@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
19473 " shrl $2, %0\n"
19474 " andl $3, %%eax\n"
19475 " cld\n"
19476- "6: rep; movsl\n"
19477+ "6: rep; "__copyuser_seg" movsl\n"
19478 " movl %%eax,%0\n"
19479- "7: rep; movsb\n"
19480+ "7: rep; "__copyuser_seg" movsb\n"
19481 "8:\n"
19482 ".section .fixup,\"ax\"\n"
19483 "9: lea 0(%%eax,%0,4),%0\n"
19484@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
19485
19486 __asm__ __volatile__(
19487 " .align 2,0x90\n"
19488- "0: movl 32(%4), %%eax\n"
19489+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19490 " cmpl $67, %0\n"
19491 " jbe 2f\n"
19492- "1: movl 64(%4), %%eax\n"
19493+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19494 " .align 2,0x90\n"
19495- "2: movl 0(%4), %%eax\n"
19496- "21: movl 4(%4), %%edx\n"
19497+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19498+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19499 " movnti %%eax, 0(%3)\n"
19500 " movnti %%edx, 4(%3)\n"
19501- "3: movl 8(%4), %%eax\n"
19502- "31: movl 12(%4),%%edx\n"
19503+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19504+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19505 " movnti %%eax, 8(%3)\n"
19506 " movnti %%edx, 12(%3)\n"
19507- "4: movl 16(%4), %%eax\n"
19508- "41: movl 20(%4), %%edx\n"
19509+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19510+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19511 " movnti %%eax, 16(%3)\n"
19512 " movnti %%edx, 20(%3)\n"
19513- "10: movl 24(%4), %%eax\n"
19514- "51: movl 28(%4), %%edx\n"
19515+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19516+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19517 " movnti %%eax, 24(%3)\n"
19518 " movnti %%edx, 28(%3)\n"
19519- "11: movl 32(%4), %%eax\n"
19520- "61: movl 36(%4), %%edx\n"
19521+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19522+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19523 " movnti %%eax, 32(%3)\n"
19524 " movnti %%edx, 36(%3)\n"
19525- "12: movl 40(%4), %%eax\n"
19526- "71: movl 44(%4), %%edx\n"
19527+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19528+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19529 " movnti %%eax, 40(%3)\n"
19530 " movnti %%edx, 44(%3)\n"
19531- "13: movl 48(%4), %%eax\n"
19532- "81: movl 52(%4), %%edx\n"
19533+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19534+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19535 " movnti %%eax, 48(%3)\n"
19536 " movnti %%edx, 52(%3)\n"
19537- "14: movl 56(%4), %%eax\n"
19538- "91: movl 60(%4), %%edx\n"
19539+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19540+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19541 " movnti %%eax, 56(%3)\n"
19542 " movnti %%edx, 60(%3)\n"
19543 " addl $-64, %0\n"
19544@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
19545 " shrl $2, %0\n"
19546 " andl $3, %%eax\n"
19547 " cld\n"
19548- "6: rep; movsl\n"
19549+ "6: rep; "__copyuser_seg" movsl\n"
19550 " movl %%eax,%0\n"
19551- "7: rep; movsb\n"
19552+ "7: rep; "__copyuser_seg" movsb\n"
19553 "8:\n"
19554 ".section .fixup,\"ax\"\n"
19555 "9: lea 0(%%eax,%0,4),%0\n"
19556@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
19557 */
19558 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
19559 unsigned long size);
19560-unsigned long __copy_user_intel(void __user *to, const void *from,
19561+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
19562+ unsigned long size);
19563+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
19564 unsigned long size);
19565 unsigned long __copy_user_zeroing_intel_nocache(void *to,
19566 const void __user *from, unsigned long size);
19567 #endif /* CONFIG_X86_INTEL_USERCOPY */
19568
19569 /* Generic arbitrary sized copy. */
19570-#define __copy_user(to, from, size) \
19571+#define __copy_user(to, from, size, prefix, set, restore) \
19572 do { \
19573 int __d0, __d1, __d2; \
19574 __asm__ __volatile__( \
19575+ set \
19576 " cmp $7,%0\n" \
19577 " jbe 1f\n" \
19578 " movl %1,%0\n" \
19579 " negl %0\n" \
19580 " andl $7,%0\n" \
19581 " subl %0,%3\n" \
19582- "4: rep; movsb\n" \
19583+ "4: rep; "prefix"movsb\n" \
19584 " movl %3,%0\n" \
19585 " shrl $2,%0\n" \
19586 " andl $3,%3\n" \
19587 " .align 2,0x90\n" \
19588- "0: rep; movsl\n" \
19589+ "0: rep; "prefix"movsl\n" \
19590 " movl %3,%0\n" \
19591- "1: rep; movsb\n" \
19592+ "1: rep; "prefix"movsb\n" \
19593 "2:\n" \
19594+ restore \
19595 ".section .fixup,\"ax\"\n" \
19596 "5: addl %3,%0\n" \
19597 " jmp 2b\n" \
19598@@ -682,14 +799,14 @@ do { \
19599 " negl %0\n" \
19600 " andl $7,%0\n" \
19601 " subl %0,%3\n" \
19602- "4: rep; movsb\n" \
19603+ "4: rep; "__copyuser_seg"movsb\n" \
19604 " movl %3,%0\n" \
19605 " shrl $2,%0\n" \
19606 " andl $3,%3\n" \
19607 " .align 2,0x90\n" \
19608- "0: rep; movsl\n" \
19609+ "0: rep; "__copyuser_seg"movsl\n" \
19610 " movl %3,%0\n" \
19611- "1: rep; movsb\n" \
19612+ "1: rep; "__copyuser_seg"movsb\n" \
19613 "2:\n" \
19614 ".section .fixup,\"ax\"\n" \
19615 "5: addl %3,%0\n" \
19616@@ -775,9 +892,9 @@ survive:
19617 }
19618 #endif
19619 if (movsl_is_ok(to, from, n))
19620- __copy_user(to, from, n);
19621+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
19622 else
19623- n = __copy_user_intel(to, from, n);
19624+ n = __generic_copy_to_user_intel(to, from, n);
19625 return n;
19626 }
19627 EXPORT_SYMBOL(__copy_to_user_ll);
19628@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
19629 unsigned long n)
19630 {
19631 if (movsl_is_ok(to, from, n))
19632- __copy_user(to, from, n);
19633+ __copy_user(to, from, n, __copyuser_seg, "", "");
19634 else
19635- n = __copy_user_intel((void __user *)to,
19636- (const void *)from, n);
19637+ n = __generic_copy_from_user_intel(to, from, n);
19638 return n;
19639 }
19640 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
19641@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
19642 if (n > 64 && cpu_has_xmm2)
19643 n = __copy_user_intel_nocache(to, from, n);
19644 else
19645- __copy_user(to, from, n);
19646+ __copy_user(to, from, n, __copyuser_seg, "", "");
19647 #else
19648- __copy_user(to, from, n);
19649+ __copy_user(to, from, n, __copyuser_seg, "", "");
19650 #endif
19651 return n;
19652 }
19653 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
19654
19655-/**
19656- * copy_to_user: - Copy a block of data into user space.
19657- * @to: Destination address, in user space.
19658- * @from: Source address, in kernel space.
19659- * @n: Number of bytes to copy.
19660- *
19661- * Context: User context only. This function may sleep.
19662- *
19663- * Copy data from kernel space to user space.
19664- *
19665- * Returns number of bytes that could not be copied.
19666- * On success, this will be zero.
19667- */
19668-unsigned long
19669-copy_to_user(void __user *to, const void *from, unsigned long n)
19670+void copy_from_user_overflow(void)
19671 {
19672- if (access_ok(VERIFY_WRITE, to, n))
19673- n = __copy_to_user(to, from, n);
19674- return n;
19675+ WARN(1, "Buffer overflow detected!\n");
19676 }
19677-EXPORT_SYMBOL(copy_to_user);
19678+EXPORT_SYMBOL(copy_from_user_overflow);
19679
19680-/**
19681- * copy_from_user: - Copy a block of data from user space.
19682- * @to: Destination address, in kernel space.
19683- * @from: Source address, in user space.
19684- * @n: Number of bytes to copy.
19685- *
19686- * Context: User context only. This function may sleep.
19687- *
19688- * Copy data from user space to kernel space.
19689- *
19690- * Returns number of bytes that could not be copied.
19691- * On success, this will be zero.
19692- *
19693- * If some data could not be copied, this function will pad the copied
19694- * data to the requested size using zero bytes.
19695- */
19696-unsigned long
19697-_copy_from_user(void *to, const void __user *from, unsigned long n)
19698+void copy_to_user_overflow(void)
19699 {
19700- if (access_ok(VERIFY_READ, from, n))
19701- n = __copy_from_user(to, from, n);
19702- else
19703- memset(to, 0, n);
19704- return n;
19705+ WARN(1, "Buffer overflow detected!\n");
19706 }
19707-EXPORT_SYMBOL(_copy_from_user);
19708+EXPORT_SYMBOL(copy_to_user_overflow);
19709
19710-void copy_from_user_overflow(void)
19711+#ifdef CONFIG_PAX_MEMORY_UDEREF
19712+void __set_fs(mm_segment_t x)
19713 {
19714- WARN(1, "Buffer overflow detected!\n");
19715+ switch (x.seg) {
19716+ case 0:
19717+ loadsegment(gs, 0);
19718+ break;
19719+ case TASK_SIZE_MAX:
19720+ loadsegment(gs, __USER_DS);
19721+ break;
19722+ case -1UL:
19723+ loadsegment(gs, __KERNEL_DS);
19724+ break;
19725+ default:
19726+ BUG();
19727+ }
19728+ return;
19729 }
19730-EXPORT_SYMBOL(copy_from_user_overflow);
19731+EXPORT_SYMBOL(__set_fs);
19732+
19733+void set_fs(mm_segment_t x)
19734+{
19735+ current_thread_info()->addr_limit = x;
19736+ __set_fs(x);
19737+}
19738+EXPORT_SYMBOL(set_fs);
19739+#endif
19740diff -urNp linux-3.1.1/arch/x86/lib/usercopy_64.c linux-3.1.1/arch/x86/lib/usercopy_64.c
19741--- linux-3.1.1/arch/x86/lib/usercopy_64.c 2011-11-11 15:19:27.000000000 -0500
19742+++ linux-3.1.1/arch/x86/lib/usercopy_64.c 2011-11-16 18:39:07.000000000 -0500
19743@@ -42,6 +42,12 @@ long
19744 __strncpy_from_user(char *dst, const char __user *src, long count)
19745 {
19746 long res;
19747+
19748+#ifdef CONFIG_PAX_MEMORY_UDEREF
19749+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19750+ src += PAX_USER_SHADOW_BASE;
19751+#endif
19752+
19753 __do_strncpy_from_user(dst, src, count, res);
19754 return res;
19755 }
19756@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
19757 {
19758 long __d0;
19759 might_fault();
19760+
19761+#ifdef CONFIG_PAX_MEMORY_UDEREF
19762+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
19763+ addr += PAX_USER_SHADOW_BASE;
19764+#endif
19765+
19766 /* no memory constraint because it doesn't change any memory gcc knows
19767 about */
19768 asm volatile(
19769@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
19770
19771 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
19772 {
19773- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19774- return copy_user_generic((__force void *)to, (__force void *)from, len);
19775- }
19776- return len;
19777+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19778+
19779+#ifdef CONFIG_PAX_MEMORY_UDEREF
19780+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
19781+ to += PAX_USER_SHADOW_BASE;
19782+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
19783+ from += PAX_USER_SHADOW_BASE;
19784+#endif
19785+
19786+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
19787+ }
19788+ return len;
19789 }
19790 EXPORT_SYMBOL(copy_in_user);
19791
19792@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
19793 * it is not necessary to optimize tail handling.
19794 */
19795 unsigned long
19796-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
19797+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest)
19798 {
19799 char c;
19800 unsigned zero_len;
19801diff -urNp linux-3.1.1/arch/x86/Makefile linux-3.1.1/arch/x86/Makefile
19802--- linux-3.1.1/arch/x86/Makefile 2011-11-11 15:19:27.000000000 -0500
19803+++ linux-3.1.1/arch/x86/Makefile 2011-11-17 18:30:30.000000000 -0500
19804@@ -46,6 +46,7 @@ else
19805 UTS_MACHINE := x86_64
19806 CHECKFLAGS += -D__x86_64__ -m64
19807
19808+ biarch := $(call cc-option,-m64)
19809 KBUILD_AFLAGS += -m64
19810 KBUILD_CFLAGS += -m64
19811
19812@@ -195,3 +196,12 @@ define archhelp
19813 echo ' FDARGS="..." arguments for the booted kernel'
19814 echo ' FDINITRD=file initrd for the booted kernel'
19815 endef
19816+
19817+define OLD_LD
19818+
19819+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
19820+*** Please upgrade your binutils to 2.18 or newer
19821+endef
19822+
19823+archprepare:
19824+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
19825diff -urNp linux-3.1.1/arch/x86/mm/extable.c linux-3.1.1/arch/x86/mm/extable.c
19826--- linux-3.1.1/arch/x86/mm/extable.c 2011-11-11 15:19:27.000000000 -0500
19827+++ linux-3.1.1/arch/x86/mm/extable.c 2011-11-16 18:39:07.000000000 -0500
19828@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs
19829 const struct exception_table_entry *fixup;
19830
19831 #ifdef CONFIG_PNPBIOS
19832- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
19833+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
19834 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
19835 extern u32 pnp_bios_is_utter_crap;
19836 pnp_bios_is_utter_crap = 1;
19837diff -urNp linux-3.1.1/arch/x86/mm/fault.c linux-3.1.1/arch/x86/mm/fault.c
19838--- linux-3.1.1/arch/x86/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
19839+++ linux-3.1.1/arch/x86/mm/fault.c 2011-11-16 20:43:50.000000000 -0500
19840@@ -13,11 +13,18 @@
19841 #include <linux/perf_event.h> /* perf_sw_event */
19842 #include <linux/hugetlb.h> /* hstate_index_to_shift */
19843 #include <linux/prefetch.h> /* prefetchw */
19844+#include <linux/unistd.h>
19845+#include <linux/compiler.h>
19846
19847 #include <asm/traps.h> /* dotraplinkage, ... */
19848 #include <asm/pgalloc.h> /* pgd_*(), ... */
19849 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
19850 #include <asm/vsyscall.h>
19851+#include <asm/tlbflush.h>
19852+
19853+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19854+#include <asm/stacktrace.h>
19855+#endif
19856
19857 /*
19858 * Page fault error code bits:
19859@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_
19860 int ret = 0;
19861
19862 /* kprobe_running() needs smp_processor_id() */
19863- if (kprobes_built_in() && !user_mode_vm(regs)) {
19864+ if (kprobes_built_in() && !user_mode(regs)) {
19865 preempt_disable();
19866 if (kprobe_running() && kprobe_fault_handler(regs, 14))
19867 ret = 1;
19868@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *re
19869 return !instr_lo || (instr_lo>>1) == 1;
19870 case 0x00:
19871 /* Prefetch instruction is 0x0F0D or 0x0F18 */
19872- if (probe_kernel_address(instr, opcode))
19873+ if (user_mode(regs)) {
19874+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
19875+ return 0;
19876+ } else if (probe_kernel_address(instr, opcode))
19877 return 0;
19878
19879 *prefetch = (instr_lo == 0xF) &&
19880@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsign
19881 while (instr < max_instr) {
19882 unsigned char opcode;
19883
19884- if (probe_kernel_address(instr, opcode))
19885+ if (user_mode(regs)) {
19886+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
19887+ break;
19888+ } else if (probe_kernel_address(instr, opcode))
19889 break;
19890
19891 instr++;
19892@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int s
19893 force_sig_info(si_signo, &info, tsk);
19894 }
19895
19896+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19897+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
19898+#endif
19899+
19900+#ifdef CONFIG_PAX_EMUTRAMP
19901+static int pax_handle_fetch_fault(struct pt_regs *regs);
19902+#endif
19903+
19904+#ifdef CONFIG_PAX_PAGEEXEC
19905+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
19906+{
19907+ pgd_t *pgd;
19908+ pud_t *pud;
19909+ pmd_t *pmd;
19910+
19911+ pgd = pgd_offset(mm, address);
19912+ if (!pgd_present(*pgd))
19913+ return NULL;
19914+ pud = pud_offset(pgd, address);
19915+ if (!pud_present(*pud))
19916+ return NULL;
19917+ pmd = pmd_offset(pud, address);
19918+ if (!pmd_present(*pmd))
19919+ return NULL;
19920+ return pmd;
19921+}
19922+#endif
19923+
19924 DEFINE_SPINLOCK(pgd_lock);
19925 LIST_HEAD(pgd_list);
19926
19927@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
19928 for (address = VMALLOC_START & PMD_MASK;
19929 address >= TASK_SIZE && address < FIXADDR_TOP;
19930 address += PMD_SIZE) {
19931+
19932+#ifdef CONFIG_PAX_PER_CPU_PGD
19933+ unsigned long cpu;
19934+#else
19935 struct page *page;
19936+#endif
19937
19938 spin_lock(&pgd_lock);
19939+
19940+#ifdef CONFIG_PAX_PER_CPU_PGD
19941+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19942+ pgd_t *pgd = get_cpu_pgd(cpu);
19943+ pmd_t *ret;
19944+#else
19945 list_for_each_entry(page, &pgd_list, lru) {
19946+ pgd_t *pgd = page_address(page);
19947 spinlock_t *pgt_lock;
19948 pmd_t *ret;
19949
19950@@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
19951 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19952
19953 spin_lock(pgt_lock);
19954- ret = vmalloc_sync_one(page_address(page), address);
19955+#endif
19956+
19957+ ret = vmalloc_sync_one(pgd, address);
19958+
19959+#ifndef CONFIG_PAX_PER_CPU_PGD
19960 spin_unlock(pgt_lock);
19961+#endif
19962
19963 if (!ret)
19964 break;
19965@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fa
19966 * an interrupt in the middle of a task switch..
19967 */
19968 pgd_paddr = read_cr3();
19969+
19970+#ifdef CONFIG_PAX_PER_CPU_PGD
19971+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
19972+#endif
19973+
19974 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
19975 if (!pmd_k)
19976 return -1;
19977@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fa
19978 * happen within a race in page table update. In the later
19979 * case just flush:
19980 */
19981+
19982+#ifdef CONFIG_PAX_PER_CPU_PGD
19983+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
19984+ pgd = pgd_offset_cpu(smp_processor_id(), address);
19985+#else
19986 pgd = pgd_offset(current->active_mm, address);
19987+#endif
19988+
19989 pgd_ref = pgd_offset_k(address);
19990 if (pgd_none(*pgd_ref))
19991 return -1;
19992@@ -534,7 +604,7 @@ static int is_errata93(struct pt_regs *r
19993 static int is_errata100(struct pt_regs *regs, unsigned long address)
19994 {
19995 #ifdef CONFIG_X86_64
19996- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
19997+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
19998 return 1;
19999 #endif
20000 return 0;
20001@@ -561,7 +631,7 @@ static int is_f00f_bug(struct pt_regs *r
20002 }
20003
20004 static const char nx_warning[] = KERN_CRIT
20005-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
20006+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
20007
20008 static void
20009 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
20010@@ -570,14 +640,25 @@ show_fault_oops(struct pt_regs *regs, un
20011 if (!oops_may_print())
20012 return;
20013
20014- if (error_code & PF_INSTR) {
20015+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
20016 unsigned int level;
20017
20018 pte_t *pte = lookup_address(address, &level);
20019
20020 if (pte && pte_present(*pte) && !pte_exec(*pte))
20021- printk(nx_warning, current_uid());
20022+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
20023+ }
20024+
20025+#ifdef CONFIG_PAX_KERNEXEC
20026+ if (init_mm.start_code <= address && address < init_mm.end_code) {
20027+ if (current->signal->curr_ip)
20028+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20029+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
20030+ else
20031+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20032+ current->comm, task_pid_nr(current), current_uid(), current_euid());
20033 }
20034+#endif
20035
20036 printk(KERN_ALERT "BUG: unable to handle kernel ");
20037 if (address < PAGE_SIZE)
20038@@ -733,6 +814,21 @@ __bad_area_nosemaphore(struct pt_regs *r
20039 }
20040 #endif
20041
20042+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20043+ if (pax_is_fetch_fault(regs, error_code, address)) {
20044+
20045+#ifdef CONFIG_PAX_EMUTRAMP
20046+ switch (pax_handle_fetch_fault(regs)) {
20047+ case 2:
20048+ return;
20049+ }
20050+#endif
20051+
20052+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20053+ do_group_exit(SIGKILL);
20054+ }
20055+#endif
20056+
20057 if (unlikely(show_unhandled_signals))
20058 show_signal_msg(regs, error_code, address, tsk);
20059
20060@@ -829,7 +925,7 @@ do_sigbus(struct pt_regs *regs, unsigned
20061 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
20062 printk(KERN_ERR
20063 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
20064- tsk->comm, tsk->pid, address);
20065+ tsk->comm, task_pid_nr(tsk), address);
20066 code = BUS_MCEERR_AR;
20067 }
20068 #endif
20069@@ -884,6 +980,99 @@ static int spurious_fault_check(unsigned
20070 return 1;
20071 }
20072
20073+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20074+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
20075+{
20076+ pte_t *pte;
20077+ pmd_t *pmd;
20078+ spinlock_t *ptl;
20079+ unsigned char pte_mask;
20080+
20081+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
20082+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
20083+ return 0;
20084+
20085+ /* PaX: it's our fault, let's handle it if we can */
20086+
20087+ /* PaX: take a look at read faults before acquiring any locks */
20088+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
20089+ /* instruction fetch attempt from a protected page in user mode */
20090+ up_read(&mm->mmap_sem);
20091+
20092+#ifdef CONFIG_PAX_EMUTRAMP
20093+ switch (pax_handle_fetch_fault(regs)) {
20094+ case 2:
20095+ return 1;
20096+ }
20097+#endif
20098+
20099+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20100+ do_group_exit(SIGKILL);
20101+ }
20102+
20103+ pmd = pax_get_pmd(mm, address);
20104+ if (unlikely(!pmd))
20105+ return 0;
20106+
20107+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
20108+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
20109+ pte_unmap_unlock(pte, ptl);
20110+ return 0;
20111+ }
20112+
20113+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
20114+ /* write attempt to a protected page in user mode */
20115+ pte_unmap_unlock(pte, ptl);
20116+ return 0;
20117+ }
20118+
20119+#ifdef CONFIG_SMP
20120+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
20121+#else
20122+ if (likely(address > get_limit(regs->cs)))
20123+#endif
20124+ {
20125+ set_pte(pte, pte_mkread(*pte));
20126+ __flush_tlb_one(address);
20127+ pte_unmap_unlock(pte, ptl);
20128+ up_read(&mm->mmap_sem);
20129+ return 1;
20130+ }
20131+
20132+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
20133+
20134+ /*
20135+ * PaX: fill DTLB with user rights and retry
20136+ */
20137+ __asm__ __volatile__ (
20138+ "orb %2,(%1)\n"
20139+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
20140+/*
20141+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
20142+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
20143+ * page fault when examined during a TLB load attempt. this is true not only
20144+ * for PTEs holding a non-present entry but also present entries that will
20145+ * raise a page fault (such as those set up by PaX, or the copy-on-write
20146+ * mechanism). in effect it means that we do *not* need to flush the TLBs
20147+ * for our target pages since their PTEs are simply not in the TLBs at all.
20148+
20149+ * the best thing in omitting it is that we gain around 15-20% speed in the
20150+ * fast path of the page fault handler and can get rid of tracing since we
20151+ * can no longer flush unintended entries.
20152+ */
20153+ "invlpg (%0)\n"
20154+#endif
20155+ __copyuser_seg"testb $0,(%0)\n"
20156+ "xorb %3,(%1)\n"
20157+ :
20158+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
20159+ : "memory", "cc");
20160+ pte_unmap_unlock(pte, ptl);
20161+ up_read(&mm->mmap_sem);
20162+ return 1;
20163+}
20164+#endif
20165+
20166 /*
20167 * Handle a spurious fault caused by a stale TLB entry.
20168 *
20169@@ -956,6 +1145,9 @@ int show_unhandled_signals = 1;
20170 static inline int
20171 access_error(unsigned long error_code, struct vm_area_struct *vma)
20172 {
20173+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
20174+ return 1;
20175+
20176 if (error_code & PF_WRITE) {
20177 /* write, present and write, not present: */
20178 if (unlikely(!(vma->vm_flags & VM_WRITE)))
20179@@ -989,19 +1181,33 @@ do_page_fault(struct pt_regs *regs, unsi
20180 {
20181 struct vm_area_struct *vma;
20182 struct task_struct *tsk;
20183- unsigned long address;
20184 struct mm_struct *mm;
20185 int fault;
20186 int write = error_code & PF_WRITE;
20187 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
20188 (write ? FAULT_FLAG_WRITE : 0);
20189
20190+ /* Get the faulting address: */
20191+ unsigned long address = read_cr2();
20192+
20193+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20194+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
20195+ if (!search_exception_tables(regs->ip)) {
20196+ bad_area_nosemaphore(regs, error_code, address);
20197+ return;
20198+ }
20199+ if (address < PAX_USER_SHADOW_BASE) {
20200+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
20201+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
20202+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
20203+ } else
20204+ address -= PAX_USER_SHADOW_BASE;
20205+ }
20206+#endif
20207+
20208 tsk = current;
20209 mm = tsk->mm;
20210
20211- /* Get the faulting address: */
20212- address = read_cr2();
20213-
20214 /*
20215 * Detect and handle instructions that would cause a page fault for
20216 * both a tracked kernel page and a userspace page.
20217@@ -1061,7 +1267,7 @@ do_page_fault(struct pt_regs *regs, unsi
20218 * User-mode registers count as a user access even for any
20219 * potential system fault or CPU buglet:
20220 */
20221- if (user_mode_vm(regs)) {
20222+ if (user_mode(regs)) {
20223 local_irq_enable();
20224 error_code |= PF_USER;
20225 } else {
20226@@ -1116,6 +1322,11 @@ retry:
20227 might_sleep();
20228 }
20229
20230+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20231+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
20232+ return;
20233+#endif
20234+
20235 vma = find_vma(mm, address);
20236 if (unlikely(!vma)) {
20237 bad_area(regs, error_code, address);
20238@@ -1127,18 +1338,24 @@ retry:
20239 bad_area(regs, error_code, address);
20240 return;
20241 }
20242- if (error_code & PF_USER) {
20243- /*
20244- * Accessing the stack below %sp is always a bug.
20245- * The large cushion allows instructions like enter
20246- * and pusha to work. ("enter $65535, $31" pushes
20247- * 32 pointers and then decrements %sp by 65535.)
20248- */
20249- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
20250- bad_area(regs, error_code, address);
20251- return;
20252- }
20253+ /*
20254+ * Accessing the stack below %sp is always a bug.
20255+ * The large cushion allows instructions like enter
20256+ * and pusha to work. ("enter $65535, $31" pushes
20257+ * 32 pointers and then decrements %sp by 65535.)
20258+ */
20259+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
20260+ bad_area(regs, error_code, address);
20261+ return;
20262 }
20263+
20264+#ifdef CONFIG_PAX_SEGMEXEC
20265+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
20266+ bad_area(regs, error_code, address);
20267+ return;
20268+ }
20269+#endif
20270+
20271 if (unlikely(expand_stack(vma, address))) {
20272 bad_area(regs, error_code, address);
20273 return;
20274@@ -1193,3 +1410,240 @@ good_area:
20275
20276 up_read(&mm->mmap_sem);
20277 }
20278+
20279+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20280+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
20281+{
20282+ struct mm_struct *mm = current->mm;
20283+ unsigned long ip = regs->ip;
20284+
20285+ if (v8086_mode(regs))
20286+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
20287+
20288+#ifdef CONFIG_PAX_PAGEEXEC
20289+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
20290+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
20291+ return true;
20292+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
20293+ return true;
20294+ return false;
20295+ }
20296+#endif
20297+
20298+#ifdef CONFIG_PAX_SEGMEXEC
20299+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
20300+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
20301+ return true;
20302+ return false;
20303+ }
20304+#endif
20305+
20306+ return false;
20307+}
20308+#endif
20309+
20310+#ifdef CONFIG_PAX_EMUTRAMP
20311+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
20312+{
20313+ int err;
20314+
20315+ do { /* PaX: gcc trampoline emulation #1 */
20316+ unsigned char mov1, mov2;
20317+ unsigned short jmp;
20318+ unsigned int addr1, addr2;
20319+
20320+#ifdef CONFIG_X86_64
20321+ if ((regs->ip + 11) >> 32)
20322+ break;
20323+#endif
20324+
20325+ err = get_user(mov1, (unsigned char __user *)regs->ip);
20326+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20327+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
20328+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20329+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
20330+
20331+ if (err)
20332+ break;
20333+
20334+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
20335+ regs->cx = addr1;
20336+ regs->ax = addr2;
20337+ regs->ip = addr2;
20338+ return 2;
20339+ }
20340+ } while (0);
20341+
20342+ do { /* PaX: gcc trampoline emulation #2 */
20343+ unsigned char mov, jmp;
20344+ unsigned int addr1, addr2;
20345+
20346+#ifdef CONFIG_X86_64
20347+ if ((regs->ip + 9) >> 32)
20348+ break;
20349+#endif
20350+
20351+ err = get_user(mov, (unsigned char __user *)regs->ip);
20352+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20353+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
20354+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20355+
20356+ if (err)
20357+ break;
20358+
20359+ if (mov == 0xB9 && jmp == 0xE9) {
20360+ regs->cx = addr1;
20361+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
20362+ return 2;
20363+ }
20364+ } while (0);
20365+
20366+ return 1; /* PaX in action */
20367+}
20368+
20369+#ifdef CONFIG_X86_64
20370+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
20371+{
20372+ int err;
20373+
20374+ do { /* PaX: gcc trampoline emulation #1 */
20375+ unsigned short mov1, mov2, jmp1;
20376+ unsigned char jmp2;
20377+ unsigned int addr1;
20378+ unsigned long addr2;
20379+
20380+ err = get_user(mov1, (unsigned short __user *)regs->ip);
20381+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
20382+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
20383+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
20384+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
20385+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
20386+
20387+ if (err)
20388+ break;
20389+
20390+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20391+ regs->r11 = addr1;
20392+ regs->r10 = addr2;
20393+ regs->ip = addr1;
20394+ return 2;
20395+ }
20396+ } while (0);
20397+
20398+ do { /* PaX: gcc trampoline emulation #2 */
20399+ unsigned short mov1, mov2, jmp1;
20400+ unsigned char jmp2;
20401+ unsigned long addr1, addr2;
20402+
20403+ err = get_user(mov1, (unsigned short __user *)regs->ip);
20404+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
20405+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
20406+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
20407+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
20408+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
20409+
20410+ if (err)
20411+ break;
20412+
20413+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20414+ regs->r11 = addr1;
20415+ regs->r10 = addr2;
20416+ regs->ip = addr1;
20417+ return 2;
20418+ }
20419+ } while (0);
20420+
20421+ return 1; /* PaX in action */
20422+}
20423+#endif
20424+
20425+/*
20426+ * PaX: decide what to do with offenders (regs->ip = fault address)
20427+ *
20428+ * returns 1 when task should be killed
20429+ * 2 when gcc trampoline was detected
20430+ */
20431+static int pax_handle_fetch_fault(struct pt_regs *regs)
20432+{
20433+ if (v8086_mode(regs))
20434+ return 1;
20435+
20436+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
20437+ return 1;
20438+
20439+#ifdef CONFIG_X86_32
20440+ return pax_handle_fetch_fault_32(regs);
20441+#else
20442+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
20443+ return pax_handle_fetch_fault_32(regs);
20444+ else
20445+ return pax_handle_fetch_fault_64(regs);
20446+#endif
20447+}
20448+#endif
20449+
20450+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20451+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
20452+{
20453+ long i;
20454+
20455+ printk(KERN_ERR "PAX: bytes at PC: ");
20456+ for (i = 0; i < 20; i++) {
20457+ unsigned char c;
20458+ if (get_user(c, (unsigned char __force_user *)pc+i))
20459+ printk(KERN_CONT "?? ");
20460+ else
20461+ printk(KERN_CONT "%02x ", c);
20462+ }
20463+ printk("\n");
20464+
20465+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
20466+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
20467+ unsigned long c;
20468+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
20469+#ifdef CONFIG_X86_32
20470+ printk(KERN_CONT "???????? ");
20471+#else
20472+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
20473+ printk(KERN_CONT "???????? ???????? ");
20474+ else
20475+ printk(KERN_CONT "???????????????? ");
20476+#endif
20477+ } else {
20478+#ifdef CONFIG_X86_64
20479+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
20480+ printk(KERN_CONT "%08x ", (unsigned int)c);
20481+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
20482+ } else
20483+#endif
20484+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
20485+ }
20486+ }
20487+ printk("\n");
20488+}
20489+#endif
20490+
20491+/**
20492+ * probe_kernel_write(): safely attempt to write to a location
20493+ * @dst: address to write to
20494+ * @src: pointer to the data that shall be written
20495+ * @size: size of the data chunk
20496+ *
20497+ * Safely write to address @dst from the buffer at @src. If a kernel fault
20498+ * happens, handle that and return -EFAULT.
20499+ */
20500+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
20501+{
20502+ long ret;
20503+ mm_segment_t old_fs = get_fs();
20504+
20505+ set_fs(KERNEL_DS);
20506+ pagefault_disable();
20507+ pax_open_kernel();
20508+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
20509+ pax_close_kernel();
20510+ pagefault_enable();
20511+ set_fs(old_fs);
20512+
20513+ return ret ? -EFAULT : 0;
20514+}
20515diff -urNp linux-3.1.1/arch/x86/mm/gup.c linux-3.1.1/arch/x86/mm/gup.c
20516--- linux-3.1.1/arch/x86/mm/gup.c 2011-11-11 15:19:27.000000000 -0500
20517+++ linux-3.1.1/arch/x86/mm/gup.c 2011-11-16 18:39:07.000000000 -0500
20518@@ -253,7 +253,7 @@ int __get_user_pages_fast(unsigned long
20519 addr = start;
20520 len = (unsigned long) nr_pages << PAGE_SHIFT;
20521 end = start + len;
20522- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20523+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20524 (void __user *)start, len)))
20525 return 0;
20526
20527diff -urNp linux-3.1.1/arch/x86/mm/highmem_32.c linux-3.1.1/arch/x86/mm/highmem_32.c
20528--- linux-3.1.1/arch/x86/mm/highmem_32.c 2011-11-11 15:19:27.000000000 -0500
20529+++ linux-3.1.1/arch/x86/mm/highmem_32.c 2011-11-16 18:39:07.000000000 -0500
20530@@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
20531 idx = type + KM_TYPE_NR*smp_processor_id();
20532 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
20533 BUG_ON(!pte_none(*(kmap_pte-idx)));
20534+
20535+ pax_open_kernel();
20536 set_pte(kmap_pte-idx, mk_pte(page, prot));
20537+ pax_close_kernel();
20538
20539 return (void *)vaddr;
20540 }
20541diff -urNp linux-3.1.1/arch/x86/mm/hugetlbpage.c linux-3.1.1/arch/x86/mm/hugetlbpage.c
20542--- linux-3.1.1/arch/x86/mm/hugetlbpage.c 2011-11-11 15:19:27.000000000 -0500
20543+++ linux-3.1.1/arch/x86/mm/hugetlbpage.c 2011-11-16 18:39:07.000000000 -0500
20544@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
20545 struct hstate *h = hstate_file(file);
20546 struct mm_struct *mm = current->mm;
20547 struct vm_area_struct *vma;
20548- unsigned long start_addr;
20549+ unsigned long start_addr, pax_task_size = TASK_SIZE;
20550+
20551+#ifdef CONFIG_PAX_SEGMEXEC
20552+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20553+ pax_task_size = SEGMEXEC_TASK_SIZE;
20554+#endif
20555+
20556+ pax_task_size -= PAGE_SIZE;
20557
20558 if (len > mm->cached_hole_size) {
20559- start_addr = mm->free_area_cache;
20560+ start_addr = mm->free_area_cache;
20561 } else {
20562- start_addr = TASK_UNMAPPED_BASE;
20563- mm->cached_hole_size = 0;
20564+ start_addr = mm->mmap_base;
20565+ mm->cached_hole_size = 0;
20566 }
20567
20568 full_search:
20569@@ -280,26 +287,27 @@ full_search:
20570
20571 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
20572 /* At this point: (!vma || addr < vma->vm_end). */
20573- if (TASK_SIZE - len < addr) {
20574+ if (pax_task_size - len < addr) {
20575 /*
20576 * Start a new search - just in case we missed
20577 * some holes.
20578 */
20579- if (start_addr != TASK_UNMAPPED_BASE) {
20580- start_addr = TASK_UNMAPPED_BASE;
20581+ if (start_addr != mm->mmap_base) {
20582+ start_addr = mm->mmap_base;
20583 mm->cached_hole_size = 0;
20584 goto full_search;
20585 }
20586 return -ENOMEM;
20587 }
20588- if (!vma || addr + len <= vma->vm_start) {
20589- mm->free_area_cache = addr + len;
20590- return addr;
20591- }
20592+ if (check_heap_stack_gap(vma, addr, len))
20593+ break;
20594 if (addr + mm->cached_hole_size < vma->vm_start)
20595 mm->cached_hole_size = vma->vm_start - addr;
20596 addr = ALIGN(vma->vm_end, huge_page_size(h));
20597 }
20598+
20599+ mm->free_area_cache = addr + len;
20600+ return addr;
20601 }
20602
20603 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
20604@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
20605 {
20606 struct hstate *h = hstate_file(file);
20607 struct mm_struct *mm = current->mm;
20608- struct vm_area_struct *vma, *prev_vma;
20609- unsigned long base = mm->mmap_base, addr = addr0;
20610+ struct vm_area_struct *vma;
20611+ unsigned long base = mm->mmap_base, addr;
20612 unsigned long largest_hole = mm->cached_hole_size;
20613- int first_time = 1;
20614
20615 /* don't allow allocations above current base */
20616 if (mm->free_area_cache > base)
20617@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
20618 largest_hole = 0;
20619 mm->free_area_cache = base;
20620 }
20621-try_again:
20622+
20623 /* make sure it can fit in the remaining address space */
20624 if (mm->free_area_cache < len)
20625 goto fail;
20626
20627 /* either no address requested or can't fit in requested address hole */
20628- addr = (mm->free_area_cache - len) & huge_page_mask(h);
20629+ addr = (mm->free_area_cache - len);
20630 do {
20631+ addr &= huge_page_mask(h);
20632+ vma = find_vma(mm, addr);
20633 /*
20634 * Lookup failure means no vma is above this address,
20635 * i.e. return with success:
20636- */
20637- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
20638- return addr;
20639-
20640- /*
20641 * new region fits between prev_vma->vm_end and
20642 * vma->vm_start, use it:
20643 */
20644- if (addr + len <= vma->vm_start &&
20645- (!prev_vma || (addr >= prev_vma->vm_end))) {
20646+ if (check_heap_stack_gap(vma, addr, len)) {
20647 /* remember the address as a hint for next time */
20648- mm->cached_hole_size = largest_hole;
20649- return (mm->free_area_cache = addr);
20650- } else {
20651- /* pull free_area_cache down to the first hole */
20652- if (mm->free_area_cache == vma->vm_end) {
20653- mm->free_area_cache = vma->vm_start;
20654- mm->cached_hole_size = largest_hole;
20655- }
20656+ mm->cached_hole_size = largest_hole;
20657+ return (mm->free_area_cache = addr);
20658+ }
20659+ /* pull free_area_cache down to the first hole */
20660+ if (mm->free_area_cache == vma->vm_end) {
20661+ mm->free_area_cache = vma->vm_start;
20662+ mm->cached_hole_size = largest_hole;
20663 }
20664
20665 /* remember the largest hole we saw so far */
20666 if (addr + largest_hole < vma->vm_start)
20667- largest_hole = vma->vm_start - addr;
20668+ largest_hole = vma->vm_start - addr;
20669
20670 /* try just below the current vma->vm_start */
20671- addr = (vma->vm_start - len) & huge_page_mask(h);
20672- } while (len <= vma->vm_start);
20673+ addr = skip_heap_stack_gap(vma, len);
20674+ } while (!IS_ERR_VALUE(addr));
20675
20676 fail:
20677 /*
20678- * if hint left us with no space for the requested
20679- * mapping then try again:
20680- */
20681- if (first_time) {
20682- mm->free_area_cache = base;
20683- largest_hole = 0;
20684- first_time = 0;
20685- goto try_again;
20686- }
20687- /*
20688 * A failed mmap() very likely causes application failure,
20689 * so fall back to the bottom-up function here. This scenario
20690 * can happen with large stack limits and large mmap()
20691 * allocations.
20692 */
20693- mm->free_area_cache = TASK_UNMAPPED_BASE;
20694+
20695+#ifdef CONFIG_PAX_SEGMEXEC
20696+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20697+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
20698+ else
20699+#endif
20700+
20701+ mm->mmap_base = TASK_UNMAPPED_BASE;
20702+
20703+#ifdef CONFIG_PAX_RANDMMAP
20704+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20705+ mm->mmap_base += mm->delta_mmap;
20706+#endif
20707+
20708+ mm->free_area_cache = mm->mmap_base;
20709 mm->cached_hole_size = ~0UL;
20710 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
20711 len, pgoff, flags);
20712@@ -386,6 +392,7 @@ fail:
20713 /*
20714 * Restore the topdown base:
20715 */
20716+ mm->mmap_base = base;
20717 mm->free_area_cache = base;
20718 mm->cached_hole_size = ~0UL;
20719
20720@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
20721 struct hstate *h = hstate_file(file);
20722 struct mm_struct *mm = current->mm;
20723 struct vm_area_struct *vma;
20724+ unsigned long pax_task_size = TASK_SIZE;
20725
20726 if (len & ~huge_page_mask(h))
20727 return -EINVAL;
20728- if (len > TASK_SIZE)
20729+
20730+#ifdef CONFIG_PAX_SEGMEXEC
20731+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20732+ pax_task_size = SEGMEXEC_TASK_SIZE;
20733+#endif
20734+
20735+ pax_task_size -= PAGE_SIZE;
20736+
20737+ if (len > pax_task_size)
20738 return -ENOMEM;
20739
20740 if (flags & MAP_FIXED) {
20741@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
20742 if (addr) {
20743 addr = ALIGN(addr, huge_page_size(h));
20744 vma = find_vma(mm, addr);
20745- if (TASK_SIZE - len >= addr &&
20746- (!vma || addr + len <= vma->vm_start))
20747+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
20748 return addr;
20749 }
20750 if (mm->get_unmapped_area == arch_get_unmapped_area)
20751diff -urNp linux-3.1.1/arch/x86/mm/init_32.c linux-3.1.1/arch/x86/mm/init_32.c
20752--- linux-3.1.1/arch/x86/mm/init_32.c 2011-11-11 15:19:27.000000000 -0500
20753+++ linux-3.1.1/arch/x86/mm/init_32.c 2011-11-16 18:39:07.000000000 -0500
20754@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
20755 }
20756
20757 /*
20758- * Creates a middle page table and puts a pointer to it in the
20759- * given global directory entry. This only returns the gd entry
20760- * in non-PAE compilation mode, since the middle layer is folded.
20761- */
20762-static pmd_t * __init one_md_table_init(pgd_t *pgd)
20763-{
20764- pud_t *pud;
20765- pmd_t *pmd_table;
20766-
20767-#ifdef CONFIG_X86_PAE
20768- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
20769- if (after_bootmem)
20770- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
20771- else
20772- pmd_table = (pmd_t *)alloc_low_page();
20773- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
20774- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
20775- pud = pud_offset(pgd, 0);
20776- BUG_ON(pmd_table != pmd_offset(pud, 0));
20777-
20778- return pmd_table;
20779- }
20780-#endif
20781- pud = pud_offset(pgd, 0);
20782- pmd_table = pmd_offset(pud, 0);
20783-
20784- return pmd_table;
20785-}
20786-
20787-/*
20788 * Create a page table and place a pointer to it in a middle page
20789 * directory entry:
20790 */
20791@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
20792 page_table = (pte_t *)alloc_low_page();
20793
20794 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
20795+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20796+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
20797+#else
20798 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
20799+#endif
20800 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
20801 }
20802
20803 return pte_offset_kernel(pmd, 0);
20804 }
20805
20806+static pmd_t * __init one_md_table_init(pgd_t *pgd)
20807+{
20808+ pud_t *pud;
20809+ pmd_t *pmd_table;
20810+
20811+ pud = pud_offset(pgd, 0);
20812+ pmd_table = pmd_offset(pud, 0);
20813+
20814+ return pmd_table;
20815+}
20816+
20817 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
20818 {
20819 int pgd_idx = pgd_index(vaddr);
20820@@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
20821 int pgd_idx, pmd_idx;
20822 unsigned long vaddr;
20823 pgd_t *pgd;
20824+ pud_t *pud;
20825 pmd_t *pmd;
20826 pte_t *pte = NULL;
20827
20828@@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
20829 pgd = pgd_base + pgd_idx;
20830
20831 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
20832- pmd = one_md_table_init(pgd);
20833- pmd = pmd + pmd_index(vaddr);
20834+ pud = pud_offset(pgd, vaddr);
20835+ pmd = pmd_offset(pud, vaddr);
20836+
20837+#ifdef CONFIG_X86_PAE
20838+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20839+#endif
20840+
20841 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
20842 pmd++, pmd_idx++) {
20843 pte = page_table_kmap_check(one_page_table_init(pmd),
20844@@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
20845 }
20846 }
20847
20848-static inline int is_kernel_text(unsigned long addr)
20849+static inline int is_kernel_text(unsigned long start, unsigned long end)
20850 {
20851- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
20852- return 1;
20853- return 0;
20854+ if ((start > ktla_ktva((unsigned long)_etext) ||
20855+ end <= ktla_ktva((unsigned long)_stext)) &&
20856+ (start > ktla_ktva((unsigned long)_einittext) ||
20857+ end <= ktla_ktva((unsigned long)_sinittext)) &&
20858+
20859+#ifdef CONFIG_ACPI_SLEEP
20860+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
20861+#endif
20862+
20863+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
20864+ return 0;
20865+ return 1;
20866 }
20867
20868 /*
20869@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
20870 unsigned long last_map_addr = end;
20871 unsigned long start_pfn, end_pfn;
20872 pgd_t *pgd_base = swapper_pg_dir;
20873- int pgd_idx, pmd_idx, pte_ofs;
20874+ unsigned int pgd_idx, pmd_idx, pte_ofs;
20875 unsigned long pfn;
20876 pgd_t *pgd;
20877+ pud_t *pud;
20878 pmd_t *pmd;
20879 pte_t *pte;
20880 unsigned pages_2m, pages_4k;
20881@@ -281,8 +282,13 @@ repeat:
20882 pfn = start_pfn;
20883 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20884 pgd = pgd_base + pgd_idx;
20885- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
20886- pmd = one_md_table_init(pgd);
20887+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
20888+ pud = pud_offset(pgd, 0);
20889+ pmd = pmd_offset(pud, 0);
20890+
20891+#ifdef CONFIG_X86_PAE
20892+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20893+#endif
20894
20895 if (pfn >= end_pfn)
20896 continue;
20897@@ -294,14 +300,13 @@ repeat:
20898 #endif
20899 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
20900 pmd++, pmd_idx++) {
20901- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
20902+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
20903
20904 /*
20905 * Map with big pages if possible, otherwise
20906 * create normal page tables:
20907 */
20908 if (use_pse) {
20909- unsigned int addr2;
20910 pgprot_t prot = PAGE_KERNEL_LARGE;
20911 /*
20912 * first pass will use the same initial
20913@@ -311,11 +316,7 @@ repeat:
20914 __pgprot(PTE_IDENT_ATTR |
20915 _PAGE_PSE);
20916
20917- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
20918- PAGE_OFFSET + PAGE_SIZE-1;
20919-
20920- if (is_kernel_text(addr) ||
20921- is_kernel_text(addr2))
20922+ if (is_kernel_text(address, address + PMD_SIZE))
20923 prot = PAGE_KERNEL_LARGE_EXEC;
20924
20925 pages_2m++;
20926@@ -332,7 +333,7 @@ repeat:
20927 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20928 pte += pte_ofs;
20929 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
20930- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
20931+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
20932 pgprot_t prot = PAGE_KERNEL;
20933 /*
20934 * first pass will use the same initial
20935@@ -340,7 +341,7 @@ repeat:
20936 */
20937 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
20938
20939- if (is_kernel_text(addr))
20940+ if (is_kernel_text(address, address + PAGE_SIZE))
20941 prot = PAGE_KERNEL_EXEC;
20942
20943 pages_4k++;
20944@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
20945
20946 pud = pud_offset(pgd, va);
20947 pmd = pmd_offset(pud, va);
20948- if (!pmd_present(*pmd))
20949+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
20950 break;
20951
20952 pte = pte_offset_kernel(pmd, va);
20953@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
20954
20955 static void __init pagetable_init(void)
20956 {
20957- pgd_t *pgd_base = swapper_pg_dir;
20958-
20959- permanent_kmaps_init(pgd_base);
20960+ permanent_kmaps_init(swapper_pg_dir);
20961 }
20962
20963-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20964+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20965 EXPORT_SYMBOL_GPL(__supported_pte_mask);
20966
20967 /* user-defined highmem size */
20968@@ -757,6 +756,12 @@ void __init mem_init(void)
20969
20970 pci_iommu_alloc();
20971
20972+#ifdef CONFIG_PAX_PER_CPU_PGD
20973+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20974+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20975+ KERNEL_PGD_PTRS);
20976+#endif
20977+
20978 #ifdef CONFIG_FLATMEM
20979 BUG_ON(!mem_map);
20980 #endif
20981@@ -774,7 +779,7 @@ void __init mem_init(void)
20982 set_highmem_pages_init();
20983
20984 codesize = (unsigned long) &_etext - (unsigned long) &_text;
20985- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
20986+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
20987 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
20988
20989 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
20990@@ -815,10 +820,10 @@ void __init mem_init(void)
20991 ((unsigned long)&__init_end -
20992 (unsigned long)&__init_begin) >> 10,
20993
20994- (unsigned long)&_etext, (unsigned long)&_edata,
20995- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
20996+ (unsigned long)&_sdata, (unsigned long)&_edata,
20997+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
20998
20999- (unsigned long)&_text, (unsigned long)&_etext,
21000+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
21001 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
21002
21003 /*
21004@@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
21005 if (!kernel_set_to_readonly)
21006 return;
21007
21008+ start = ktla_ktva(start);
21009 pr_debug("Set kernel text: %lx - %lx for read write\n",
21010 start, start+size);
21011
21012@@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
21013 if (!kernel_set_to_readonly)
21014 return;
21015
21016+ start = ktla_ktva(start);
21017 pr_debug("Set kernel text: %lx - %lx for read only\n",
21018 start, start+size);
21019
21020@@ -938,6 +945,7 @@ void mark_rodata_ro(void)
21021 unsigned long start = PFN_ALIGN(_text);
21022 unsigned long size = PFN_ALIGN(_etext) - start;
21023
21024+ start = ktla_ktva(start);
21025 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
21026 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
21027 size >> 10);
21028diff -urNp linux-3.1.1/arch/x86/mm/init_64.c linux-3.1.1/arch/x86/mm/init_64.c
21029--- linux-3.1.1/arch/x86/mm/init_64.c 2011-11-11 15:19:27.000000000 -0500
21030+++ linux-3.1.1/arch/x86/mm/init_64.c 2011-11-16 18:39:07.000000000 -0500
21031@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpa
21032 * around without checking the pgd every time.
21033 */
21034
21035-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
21036+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
21037 EXPORT_SYMBOL_GPL(__supported_pte_mask);
21038
21039 int force_personality32;
21040@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long star
21041
21042 for (address = start; address <= end; address += PGDIR_SIZE) {
21043 const pgd_t *pgd_ref = pgd_offset_k(address);
21044+
21045+#ifdef CONFIG_PAX_PER_CPU_PGD
21046+ unsigned long cpu;
21047+#else
21048 struct page *page;
21049+#endif
21050
21051 if (pgd_none(*pgd_ref))
21052 continue;
21053
21054 spin_lock(&pgd_lock);
21055+
21056+#ifdef CONFIG_PAX_PER_CPU_PGD
21057+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21058+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
21059+#else
21060 list_for_each_entry(page, &pgd_list, lru) {
21061 pgd_t *pgd;
21062 spinlock_t *pgt_lock;
21063@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long star
21064 /* the pgt_lock only for Xen */
21065 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
21066 spin_lock(pgt_lock);
21067+#endif
21068
21069 if (pgd_none(*pgd))
21070 set_pgd(pgd, *pgd_ref);
21071@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long star
21072 BUG_ON(pgd_page_vaddr(*pgd)
21073 != pgd_page_vaddr(*pgd_ref));
21074
21075+#ifndef CONFIG_PAX_PER_CPU_PGD
21076 spin_unlock(pgt_lock);
21077+#endif
21078+
21079 }
21080 spin_unlock(&pgd_lock);
21081 }
21082@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
21083 pmd = fill_pmd(pud, vaddr);
21084 pte = fill_pte(pmd, vaddr);
21085
21086+ pax_open_kernel();
21087 set_pte(pte, new_pte);
21088+ pax_close_kernel();
21089
21090 /*
21091 * It's enough to flush this one mapping.
21092@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(
21093 pgd = pgd_offset_k((unsigned long)__va(phys));
21094 if (pgd_none(*pgd)) {
21095 pud = (pud_t *) spp_getpage();
21096- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
21097- _PAGE_USER));
21098+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
21099 }
21100 pud = pud_offset(pgd, (unsigned long)__va(phys));
21101 if (pud_none(*pud)) {
21102 pmd = (pmd_t *) spp_getpage();
21103- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
21104- _PAGE_USER));
21105+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
21106 }
21107 pmd = pmd_offset(pud, phys);
21108 BUG_ON(!pmd_none(*pmd));
21109@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsign
21110 if (pfn >= pgt_buf_top)
21111 panic("alloc_low_page: ran out of memory");
21112
21113- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
21114+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
21115 clear_page(adr);
21116 *phys = pfn * PAGE_SIZE;
21117 return adr;
21118@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *vi
21119
21120 phys = __pa(virt);
21121 left = phys & (PAGE_SIZE - 1);
21122- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
21123+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
21124 adr = (void *)(((unsigned long)adr) | left);
21125
21126 return adr;
21127@@ -693,6 +707,12 @@ void __init mem_init(void)
21128
21129 pci_iommu_alloc();
21130
21131+#ifdef CONFIG_PAX_PER_CPU_PGD
21132+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21133+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21134+ KERNEL_PGD_PTRS);
21135+#endif
21136+
21137 /* clear_bss() already clear the empty_zero_page */
21138
21139 reservedpages = 0;
21140@@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
21141 static struct vm_area_struct gate_vma = {
21142 .vm_start = VSYSCALL_START,
21143 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
21144- .vm_page_prot = PAGE_READONLY_EXEC,
21145- .vm_flags = VM_READ | VM_EXEC
21146+ .vm_page_prot = PAGE_READONLY,
21147+ .vm_flags = VM_READ
21148 };
21149
21150 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
21151@@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long add
21152
21153 const char *arch_vma_name(struct vm_area_struct *vma)
21154 {
21155- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21156+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21157 return "[vdso]";
21158 if (vma == &gate_vma)
21159 return "[vsyscall]";
21160diff -urNp linux-3.1.1/arch/x86/mm/init.c linux-3.1.1/arch/x86/mm/init.c
21161--- linux-3.1.1/arch/x86/mm/init.c 2011-11-11 15:19:27.000000000 -0500
21162+++ linux-3.1.1/arch/x86/mm/init.c 2011-11-17 18:31:28.000000000 -0500
21163@@ -31,7 +31,7 @@ int direct_gbpages
21164 static void __init find_early_table_space(unsigned long end, int use_pse,
21165 int use_gbpages)
21166 {
21167- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
21168+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
21169 phys_addr_t base;
21170
21171 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
21172@@ -312,8 +312,29 @@ unsigned long __init_refok init_memory_m
21173 */
21174 int devmem_is_allowed(unsigned long pagenr)
21175 {
21176- if (pagenr <= 256)
21177+#ifdef CONFIG_GRKERNSEC_KMEM
21178+ /* allow BDA */
21179+ if (!pagenr)
21180+ return 1;
21181+ /* allow EBDA */
21182+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
21183+ return 1;
21184+#else
21185+ if (!pagenr)
21186+ return 1;
21187+#ifdef CONFIG_VM86
21188+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
21189+ return 1;
21190+#endif
21191+#endif
21192+
21193+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21194 return 1;
21195+#ifdef CONFIG_GRKERNSEC_KMEM
21196+ /* throw out everything else below 1MB */
21197+ if (pagenr <= 256)
21198+ return 0;
21199+#endif
21200 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
21201 return 0;
21202 if (!page_is_ram(pagenr))
21203@@ -372,6 +393,86 @@ void free_init_pages(char *what, unsigne
21204
21205 void free_initmem(void)
21206 {
21207+
21208+#ifdef CONFIG_PAX_KERNEXEC
21209+#ifdef CONFIG_X86_32
21210+ /* PaX: limit KERNEL_CS to actual size */
21211+ unsigned long addr, limit;
21212+ struct desc_struct d;
21213+ int cpu;
21214+
21215+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
21216+ limit = (limit - 1UL) >> PAGE_SHIFT;
21217+
21218+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
21219+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
21220+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
21221+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
21222+ }
21223+
21224+ /* PaX: make KERNEL_CS read-only */
21225+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
21226+ if (!paravirt_enabled())
21227+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
21228+/*
21229+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
21230+ pgd = pgd_offset_k(addr);
21231+ pud = pud_offset(pgd, addr);
21232+ pmd = pmd_offset(pud, addr);
21233+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21234+ }
21235+*/
21236+#ifdef CONFIG_X86_PAE
21237+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
21238+/*
21239+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
21240+ pgd = pgd_offset_k(addr);
21241+ pud = pud_offset(pgd, addr);
21242+ pmd = pmd_offset(pud, addr);
21243+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21244+ }
21245+*/
21246+#endif
21247+
21248+#ifdef CONFIG_MODULES
21249+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
21250+#endif
21251+
21252+#else
21253+ pgd_t *pgd;
21254+ pud_t *pud;
21255+ pmd_t *pmd;
21256+ unsigned long addr, end;
21257+
21258+ /* PaX: make kernel code/rodata read-only, rest non-executable */
21259+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
21260+ pgd = pgd_offset_k(addr);
21261+ pud = pud_offset(pgd, addr);
21262+ pmd = pmd_offset(pud, addr);
21263+ if (!pmd_present(*pmd))
21264+ continue;
21265+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
21266+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21267+ else
21268+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21269+ }
21270+
21271+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
21272+ end = addr + KERNEL_IMAGE_SIZE;
21273+ for (; addr < end; addr += PMD_SIZE) {
21274+ pgd = pgd_offset_k(addr);
21275+ pud = pud_offset(pgd, addr);
21276+ pmd = pmd_offset(pud, addr);
21277+ if (!pmd_present(*pmd))
21278+ continue;
21279+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
21280+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21281+ }
21282+#endif
21283+
21284+ flush_tlb_all();
21285+#endif
21286+
21287 free_init_pages("unused kernel memory",
21288 (unsigned long)(&__init_begin),
21289 (unsigned long)(&__init_end));
21290diff -urNp linux-3.1.1/arch/x86/mm/iomap_32.c linux-3.1.1/arch/x86/mm/iomap_32.c
21291--- linux-3.1.1/arch/x86/mm/iomap_32.c 2011-11-11 15:19:27.000000000 -0500
21292+++ linux-3.1.1/arch/x86/mm/iomap_32.c 2011-11-16 18:39:07.000000000 -0500
21293@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
21294 type = kmap_atomic_idx_push();
21295 idx = type + KM_TYPE_NR * smp_processor_id();
21296 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21297+
21298+ pax_open_kernel();
21299 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
21300+ pax_close_kernel();
21301+
21302 arch_flush_lazy_mmu_mode();
21303
21304 return (void *)vaddr;
21305diff -urNp linux-3.1.1/arch/x86/mm/ioremap.c linux-3.1.1/arch/x86/mm/ioremap.c
21306--- linux-3.1.1/arch/x86/mm/ioremap.c 2011-11-11 15:19:27.000000000 -0500
21307+++ linux-3.1.1/arch/x86/mm/ioremap.c 2011-11-16 18:39:07.000000000 -0500
21308@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
21309 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
21310 int is_ram = page_is_ram(pfn);
21311
21312- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
21313+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
21314 return NULL;
21315 WARN_ON_ONCE(is_ram);
21316 }
21317@@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
21318 early_param("early_ioremap_debug", early_ioremap_debug_setup);
21319
21320 static __initdata int after_paging_init;
21321-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
21322+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
21323
21324 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
21325 {
21326@@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
21327 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
21328
21329 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
21330- memset(bm_pte, 0, sizeof(bm_pte));
21331- pmd_populate_kernel(&init_mm, pmd, bm_pte);
21332+ pmd_populate_user(&init_mm, pmd, bm_pte);
21333
21334 /*
21335 * The boot-ioremap range spans multiple pmds, for which
21336diff -urNp linux-3.1.1/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.1.1/arch/x86/mm/kmemcheck/kmemcheck.c
21337--- linux-3.1.1/arch/x86/mm/kmemcheck/kmemcheck.c 2011-11-11 15:19:27.000000000 -0500
21338+++ linux-3.1.1/arch/x86/mm/kmemcheck/kmemcheck.c 2011-11-16 18:39:07.000000000 -0500
21339@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
21340 * memory (e.g. tracked pages)? For now, we need this to avoid
21341 * invoking kmemcheck for PnP BIOS calls.
21342 */
21343- if (regs->flags & X86_VM_MASK)
21344+ if (v8086_mode(regs))
21345 return false;
21346- if (regs->cs != __KERNEL_CS)
21347+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
21348 return false;
21349
21350 pte = kmemcheck_pte_lookup(address);
21351diff -urNp linux-3.1.1/arch/x86/mm/mmap.c linux-3.1.1/arch/x86/mm/mmap.c
21352--- linux-3.1.1/arch/x86/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
21353+++ linux-3.1.1/arch/x86/mm/mmap.c 2011-11-16 18:39:07.000000000 -0500
21354@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
21355 * Leave an at least ~128 MB hole with possible stack randomization.
21356 */
21357 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
21358-#define MAX_GAP (TASK_SIZE/6*5)
21359+#define MAX_GAP (pax_task_size/6*5)
21360
21361 /*
21362 * True on X86_32 or when emulating IA32 on X86_64
21363@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
21364 return rnd << PAGE_SHIFT;
21365 }
21366
21367-static unsigned long mmap_base(void)
21368+static unsigned long mmap_base(struct mm_struct *mm)
21369 {
21370 unsigned long gap = rlimit(RLIMIT_STACK);
21371+ unsigned long pax_task_size = TASK_SIZE;
21372+
21373+#ifdef CONFIG_PAX_SEGMEXEC
21374+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21375+ pax_task_size = SEGMEXEC_TASK_SIZE;
21376+#endif
21377
21378 if (gap < MIN_GAP)
21379 gap = MIN_GAP;
21380 else if (gap > MAX_GAP)
21381 gap = MAX_GAP;
21382
21383- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
21384+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
21385 }
21386
21387 /*
21388 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
21389 * does, but not when emulating X86_32
21390 */
21391-static unsigned long mmap_legacy_base(void)
21392+static unsigned long mmap_legacy_base(struct mm_struct *mm)
21393 {
21394- if (mmap_is_ia32())
21395+ if (mmap_is_ia32()) {
21396+
21397+#ifdef CONFIG_PAX_SEGMEXEC
21398+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21399+ return SEGMEXEC_TASK_UNMAPPED_BASE;
21400+ else
21401+#endif
21402+
21403 return TASK_UNMAPPED_BASE;
21404- else
21405+ } else
21406 return TASK_UNMAPPED_BASE + mmap_rnd();
21407 }
21408
21409@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
21410 void arch_pick_mmap_layout(struct mm_struct *mm)
21411 {
21412 if (mmap_is_legacy()) {
21413- mm->mmap_base = mmap_legacy_base();
21414+ mm->mmap_base = mmap_legacy_base(mm);
21415+
21416+#ifdef CONFIG_PAX_RANDMMAP
21417+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21418+ mm->mmap_base += mm->delta_mmap;
21419+#endif
21420+
21421 mm->get_unmapped_area = arch_get_unmapped_area;
21422 mm->unmap_area = arch_unmap_area;
21423 } else {
21424- mm->mmap_base = mmap_base();
21425+ mm->mmap_base = mmap_base(mm);
21426+
21427+#ifdef CONFIG_PAX_RANDMMAP
21428+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21429+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
21430+#endif
21431+
21432 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
21433 mm->unmap_area = arch_unmap_area_topdown;
21434 }
21435diff -urNp linux-3.1.1/arch/x86/mm/mmio-mod.c linux-3.1.1/arch/x86/mm/mmio-mod.c
21436--- linux-3.1.1/arch/x86/mm/mmio-mod.c 2011-11-11 15:19:27.000000000 -0500
21437+++ linux-3.1.1/arch/x86/mm/mmio-mod.c 2011-11-16 18:39:07.000000000 -0500
21438@@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
21439 break;
21440 default:
21441 {
21442- unsigned char *ip = (unsigned char *)instptr;
21443+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
21444 my_trace->opcode = MMIO_UNKNOWN_OP;
21445 my_trace->width = 0;
21446 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
21447@@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
21448 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
21449 void __iomem *addr)
21450 {
21451- static atomic_t next_id;
21452+ static atomic_unchecked_t next_id;
21453 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
21454 /* These are page-unaligned. */
21455 struct mmiotrace_map map = {
21456@@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
21457 .private = trace
21458 },
21459 .phys = offset,
21460- .id = atomic_inc_return(&next_id)
21461+ .id = atomic_inc_return_unchecked(&next_id)
21462 };
21463 map.map_id = trace->id;
21464
21465diff -urNp linux-3.1.1/arch/x86/mm/pageattr.c linux-3.1.1/arch/x86/mm/pageattr.c
21466--- linux-3.1.1/arch/x86/mm/pageattr.c 2011-11-11 15:19:27.000000000 -0500
21467+++ linux-3.1.1/arch/x86/mm/pageattr.c 2011-11-16 18:39:07.000000000 -0500
21468@@ -261,7 +261,7 @@ static inline pgprot_t static_protection
21469 */
21470 #ifdef CONFIG_PCI_BIOS
21471 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
21472- pgprot_val(forbidden) |= _PAGE_NX;
21473+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21474 #endif
21475
21476 /*
21477@@ -269,9 +269,10 @@ static inline pgprot_t static_protection
21478 * Does not cover __inittext since that is gone later on. On
21479 * 64bit we do not enforce !NX on the low mapping
21480 */
21481- if (within(address, (unsigned long)_text, (unsigned long)_etext))
21482- pgprot_val(forbidden) |= _PAGE_NX;
21483+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
21484+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21485
21486+#ifdef CONFIG_DEBUG_RODATA
21487 /*
21488 * The .rodata section needs to be read-only. Using the pfn
21489 * catches all aliases.
21490@@ -279,6 +280,7 @@ static inline pgprot_t static_protection
21491 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
21492 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
21493 pgprot_val(forbidden) |= _PAGE_RW;
21494+#endif
21495
21496 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
21497 /*
21498@@ -317,6 +319,13 @@ static inline pgprot_t static_protection
21499 }
21500 #endif
21501
21502+#ifdef CONFIG_PAX_KERNEXEC
21503+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
21504+ pgprot_val(forbidden) |= _PAGE_RW;
21505+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21506+ }
21507+#endif
21508+
21509 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
21510
21511 return prot;
21512@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
21513 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
21514 {
21515 /* change init_mm */
21516+ pax_open_kernel();
21517 set_pte_atomic(kpte, pte);
21518+
21519 #ifdef CONFIG_X86_32
21520 if (!SHARED_KERNEL_PMD) {
21521+
21522+#ifdef CONFIG_PAX_PER_CPU_PGD
21523+ unsigned long cpu;
21524+#else
21525 struct page *page;
21526+#endif
21527
21528+#ifdef CONFIG_PAX_PER_CPU_PGD
21529+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21530+ pgd_t *pgd = get_cpu_pgd(cpu);
21531+#else
21532 list_for_each_entry(page, &pgd_list, lru) {
21533- pgd_t *pgd;
21534+ pgd_t *pgd = (pgd_t *)page_address(page);
21535+#endif
21536+
21537 pud_t *pud;
21538 pmd_t *pmd;
21539
21540- pgd = (pgd_t *)page_address(page) + pgd_index(address);
21541+ pgd += pgd_index(address);
21542 pud = pud_offset(pgd, address);
21543 pmd = pmd_offset(pud, address);
21544 set_pte_atomic((pte_t *)pmd, pte);
21545 }
21546 }
21547 #endif
21548+ pax_close_kernel();
21549 }
21550
21551 static int
21552diff -urNp linux-3.1.1/arch/x86/mm/pageattr-test.c linux-3.1.1/arch/x86/mm/pageattr-test.c
21553--- linux-3.1.1/arch/x86/mm/pageattr-test.c 2011-11-11 15:19:27.000000000 -0500
21554+++ linux-3.1.1/arch/x86/mm/pageattr-test.c 2011-11-16 18:39:07.000000000 -0500
21555@@ -36,7 +36,7 @@ enum {
21556
21557 static int pte_testbit(pte_t pte)
21558 {
21559- return pte_flags(pte) & _PAGE_UNUSED1;
21560+ return pte_flags(pte) & _PAGE_CPA_TEST;
21561 }
21562
21563 struct split_state {
21564diff -urNp linux-3.1.1/arch/x86/mm/pat.c linux-3.1.1/arch/x86/mm/pat.c
21565--- linux-3.1.1/arch/x86/mm/pat.c 2011-11-11 15:19:27.000000000 -0500
21566+++ linux-3.1.1/arch/x86/mm/pat.c 2011-11-16 18:39:07.000000000 -0500
21567@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
21568
21569 if (!entry) {
21570 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
21571- current->comm, current->pid, start, end);
21572+ current->comm, task_pid_nr(current), start, end);
21573 return -EINVAL;
21574 }
21575
21576@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
21577 while (cursor < to) {
21578 if (!devmem_is_allowed(pfn)) {
21579 printk(KERN_INFO
21580- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
21581- current->comm, from, to);
21582+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
21583+ current->comm, from, to, cursor);
21584 return 0;
21585 }
21586 cursor += PAGE_SIZE;
21587@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
21588 printk(KERN_INFO
21589 "%s:%d ioremap_change_attr failed %s "
21590 "for %Lx-%Lx\n",
21591- current->comm, current->pid,
21592+ current->comm, task_pid_nr(current),
21593 cattr_name(flags),
21594 base, (unsigned long long)(base + size));
21595 return -EINVAL;
21596@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
21597 if (want_flags != flags) {
21598 printk(KERN_WARNING
21599 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
21600- current->comm, current->pid,
21601+ current->comm, task_pid_nr(current),
21602 cattr_name(want_flags),
21603 (unsigned long long)paddr,
21604 (unsigned long long)(paddr + size),
21605@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
21606 free_memtype(paddr, paddr + size);
21607 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
21608 " for %Lx-%Lx, got %s\n",
21609- current->comm, current->pid,
21610+ current->comm, task_pid_nr(current),
21611 cattr_name(want_flags),
21612 (unsigned long long)paddr,
21613 (unsigned long long)(paddr + size),
21614diff -urNp linux-3.1.1/arch/x86/mm/pf_in.c linux-3.1.1/arch/x86/mm/pf_in.c
21615--- linux-3.1.1/arch/x86/mm/pf_in.c 2011-11-11 15:19:27.000000000 -0500
21616+++ linux-3.1.1/arch/x86/mm/pf_in.c 2011-11-16 18:39:07.000000000 -0500
21617@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
21618 int i;
21619 enum reason_type rv = OTHERS;
21620
21621- p = (unsigned char *)ins_addr;
21622+ p = (unsigned char *)ktla_ktva(ins_addr);
21623 p += skip_prefix(p, &prf);
21624 p += get_opcode(p, &opcode);
21625
21626@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
21627 struct prefix_bits prf;
21628 int i;
21629
21630- p = (unsigned char *)ins_addr;
21631+ p = (unsigned char *)ktla_ktva(ins_addr);
21632 p += skip_prefix(p, &prf);
21633 p += get_opcode(p, &opcode);
21634
21635@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
21636 struct prefix_bits prf;
21637 int i;
21638
21639- p = (unsigned char *)ins_addr;
21640+ p = (unsigned char *)ktla_ktva(ins_addr);
21641 p += skip_prefix(p, &prf);
21642 p += get_opcode(p, &opcode);
21643
21644@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
21645 struct prefix_bits prf;
21646 int i;
21647
21648- p = (unsigned char *)ins_addr;
21649+ p = (unsigned char *)ktla_ktva(ins_addr);
21650 p += skip_prefix(p, &prf);
21651 p += get_opcode(p, &opcode);
21652 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
21653@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
21654 struct prefix_bits prf;
21655 int i;
21656
21657- p = (unsigned char *)ins_addr;
21658+ p = (unsigned char *)ktla_ktva(ins_addr);
21659 p += skip_prefix(p, &prf);
21660 p += get_opcode(p, &opcode);
21661 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
21662diff -urNp linux-3.1.1/arch/x86/mm/pgtable_32.c linux-3.1.1/arch/x86/mm/pgtable_32.c
21663--- linux-3.1.1/arch/x86/mm/pgtable_32.c 2011-11-11 15:19:27.000000000 -0500
21664+++ linux-3.1.1/arch/x86/mm/pgtable_32.c 2011-11-16 18:39:07.000000000 -0500
21665@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
21666 return;
21667 }
21668 pte = pte_offset_kernel(pmd, vaddr);
21669+
21670+ pax_open_kernel();
21671 if (pte_val(pteval))
21672 set_pte_at(&init_mm, vaddr, pte, pteval);
21673 else
21674 pte_clear(&init_mm, vaddr, pte);
21675+ pax_close_kernel();
21676
21677 /*
21678 * It's enough to flush this one mapping.
21679diff -urNp linux-3.1.1/arch/x86/mm/pgtable.c linux-3.1.1/arch/x86/mm/pgtable.c
21680--- linux-3.1.1/arch/x86/mm/pgtable.c 2011-11-11 15:19:27.000000000 -0500
21681+++ linux-3.1.1/arch/x86/mm/pgtable.c 2011-11-16 18:39:07.000000000 -0500
21682@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
21683 list_del(&page->lru);
21684 }
21685
21686-#define UNSHARED_PTRS_PER_PGD \
21687- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21688+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21689+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
21690
21691+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21692+{
21693+ while (count--)
21694+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
21695+}
21696+#endif
21697+
21698+#ifdef CONFIG_PAX_PER_CPU_PGD
21699+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21700+{
21701+ while (count--)
21702+
21703+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21704+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
21705+#else
21706+ *dst++ = *src++;
21707+#endif
21708
21709+}
21710+#endif
21711+
21712+#ifdef CONFIG_X86_64
21713+#define pxd_t pud_t
21714+#define pyd_t pgd_t
21715+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
21716+#define pxd_free(mm, pud) pud_free((mm), (pud))
21717+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
21718+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
21719+#define PYD_SIZE PGDIR_SIZE
21720+#else
21721+#define pxd_t pmd_t
21722+#define pyd_t pud_t
21723+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
21724+#define pxd_free(mm, pud) pmd_free((mm), (pud))
21725+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
21726+#define pyd_offset(mm ,address) pud_offset((mm), (address))
21727+#define PYD_SIZE PUD_SIZE
21728+#endif
21729+
21730+#ifdef CONFIG_PAX_PER_CPU_PGD
21731+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
21732+static inline void pgd_dtor(pgd_t *pgd) {}
21733+#else
21734 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
21735 {
21736 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
21737@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
21738 pgd_list_del(pgd);
21739 spin_unlock(&pgd_lock);
21740 }
21741+#endif
21742
21743 /*
21744 * List of all pgd's needed for non-PAE so it can invalidate entries
21745@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
21746 * -- wli
21747 */
21748
21749-#ifdef CONFIG_X86_PAE
21750+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21751 /*
21752 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
21753 * updating the top-level pagetable entries to guarantee the
21754@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
21755 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
21756 * and initialize the kernel pmds here.
21757 */
21758-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
21759+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21760
21761 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
21762 {
21763@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
21764 */
21765 flush_tlb_mm(mm);
21766 }
21767+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
21768+#define PREALLOCATED_PXDS USER_PGD_PTRS
21769 #else /* !CONFIG_X86_PAE */
21770
21771 /* No need to prepopulate any pagetable entries in non-PAE modes. */
21772-#define PREALLOCATED_PMDS 0
21773+#define PREALLOCATED_PXDS 0
21774
21775 #endif /* CONFIG_X86_PAE */
21776
21777-static void free_pmds(pmd_t *pmds[])
21778+static void free_pxds(pxd_t *pxds[])
21779 {
21780 int i;
21781
21782- for(i = 0; i < PREALLOCATED_PMDS; i++)
21783- if (pmds[i])
21784- free_page((unsigned long)pmds[i]);
21785+ for(i = 0; i < PREALLOCATED_PXDS; i++)
21786+ if (pxds[i])
21787+ free_page((unsigned long)pxds[i]);
21788 }
21789
21790-static int preallocate_pmds(pmd_t *pmds[])
21791+static int preallocate_pxds(pxd_t *pxds[])
21792 {
21793 int i;
21794 bool failed = false;
21795
21796- for(i = 0; i < PREALLOCATED_PMDS; i++) {
21797- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
21798- if (pmd == NULL)
21799+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
21800+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
21801+ if (pxd == NULL)
21802 failed = true;
21803- pmds[i] = pmd;
21804+ pxds[i] = pxd;
21805 }
21806
21807 if (failed) {
21808- free_pmds(pmds);
21809+ free_pxds(pxds);
21810 return -ENOMEM;
21811 }
21812
21813@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
21814 * preallocate which never got a corresponding vma will need to be
21815 * freed manually.
21816 */
21817-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
21818+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
21819 {
21820 int i;
21821
21822- for(i = 0; i < PREALLOCATED_PMDS; i++) {
21823+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
21824 pgd_t pgd = pgdp[i];
21825
21826 if (pgd_val(pgd) != 0) {
21827- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
21828+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
21829
21830- pgdp[i] = native_make_pgd(0);
21831+ set_pgd(pgdp + i, native_make_pgd(0));
21832
21833- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
21834- pmd_free(mm, pmd);
21835+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
21836+ pxd_free(mm, pxd);
21837 }
21838 }
21839 }
21840
21841-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
21842+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
21843 {
21844- pud_t *pud;
21845+ pyd_t *pyd;
21846 unsigned long addr;
21847 int i;
21848
21849- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
21850+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
21851 return;
21852
21853- pud = pud_offset(pgd, 0);
21854+#ifdef CONFIG_X86_64
21855+ pyd = pyd_offset(mm, 0L);
21856+#else
21857+ pyd = pyd_offset(pgd, 0L);
21858+#endif
21859
21860- for (addr = i = 0; i < PREALLOCATED_PMDS;
21861- i++, pud++, addr += PUD_SIZE) {
21862- pmd_t *pmd = pmds[i];
21863+ for (addr = i = 0; i < PREALLOCATED_PXDS;
21864+ i++, pyd++, addr += PYD_SIZE) {
21865+ pxd_t *pxd = pxds[i];
21866
21867 if (i >= KERNEL_PGD_BOUNDARY)
21868- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21869- sizeof(pmd_t) * PTRS_PER_PMD);
21870+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21871+ sizeof(pxd_t) * PTRS_PER_PMD);
21872
21873- pud_populate(mm, pud, pmd);
21874+ pyd_populate(mm, pyd, pxd);
21875 }
21876 }
21877
21878 pgd_t *pgd_alloc(struct mm_struct *mm)
21879 {
21880 pgd_t *pgd;
21881- pmd_t *pmds[PREALLOCATED_PMDS];
21882+ pxd_t *pxds[PREALLOCATED_PXDS];
21883
21884 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
21885
21886@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21887
21888 mm->pgd = pgd;
21889
21890- if (preallocate_pmds(pmds) != 0)
21891+ if (preallocate_pxds(pxds) != 0)
21892 goto out_free_pgd;
21893
21894 if (paravirt_pgd_alloc(mm) != 0)
21895- goto out_free_pmds;
21896+ goto out_free_pxds;
21897
21898 /*
21899 * Make sure that pre-populating the pmds is atomic with
21900@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21901 spin_lock(&pgd_lock);
21902
21903 pgd_ctor(mm, pgd);
21904- pgd_prepopulate_pmd(mm, pgd, pmds);
21905+ pgd_prepopulate_pxd(mm, pgd, pxds);
21906
21907 spin_unlock(&pgd_lock);
21908
21909 return pgd;
21910
21911-out_free_pmds:
21912- free_pmds(pmds);
21913+out_free_pxds:
21914+ free_pxds(pxds);
21915 out_free_pgd:
21916 free_page((unsigned long)pgd);
21917 out:
21918@@ -295,7 +344,7 @@ out:
21919
21920 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
21921 {
21922- pgd_mop_up_pmds(mm, pgd);
21923+ pgd_mop_up_pxds(mm, pgd);
21924 pgd_dtor(pgd);
21925 paravirt_pgd_free(mm, pgd);
21926 free_page((unsigned long)pgd);
21927diff -urNp linux-3.1.1/arch/x86/mm/setup_nx.c linux-3.1.1/arch/x86/mm/setup_nx.c
21928--- linux-3.1.1/arch/x86/mm/setup_nx.c 2011-11-11 15:19:27.000000000 -0500
21929+++ linux-3.1.1/arch/x86/mm/setup_nx.c 2011-11-16 18:39:07.000000000 -0500
21930@@ -5,8 +5,10 @@
21931 #include <asm/pgtable.h>
21932 #include <asm/proto.h>
21933
21934+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21935 static int disable_nx __cpuinitdata;
21936
21937+#ifndef CONFIG_PAX_PAGEEXEC
21938 /*
21939 * noexec = on|off
21940 *
21941@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
21942 return 0;
21943 }
21944 early_param("noexec", noexec_setup);
21945+#endif
21946+
21947+#endif
21948
21949 void __cpuinit x86_configure_nx(void)
21950 {
21951+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21952 if (cpu_has_nx && !disable_nx)
21953 __supported_pte_mask |= _PAGE_NX;
21954 else
21955+#endif
21956 __supported_pte_mask &= ~_PAGE_NX;
21957 }
21958
21959diff -urNp linux-3.1.1/arch/x86/mm/tlb.c linux-3.1.1/arch/x86/mm/tlb.c
21960--- linux-3.1.1/arch/x86/mm/tlb.c 2011-11-11 15:19:27.000000000 -0500
21961+++ linux-3.1.1/arch/x86/mm/tlb.c 2011-11-16 18:39:07.000000000 -0500
21962@@ -65,7 +65,11 @@ void leave_mm(int cpu)
21963 BUG();
21964 cpumask_clear_cpu(cpu,
21965 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
21966+
21967+#ifndef CONFIG_PAX_PER_CPU_PGD
21968 load_cr3(swapper_pg_dir);
21969+#endif
21970+
21971 }
21972 EXPORT_SYMBOL_GPL(leave_mm);
21973
21974diff -urNp linux-3.1.1/arch/x86/net/bpf_jit_comp.c linux-3.1.1/arch/x86/net/bpf_jit_comp.c
21975--- linux-3.1.1/arch/x86/net/bpf_jit_comp.c 2011-11-11 15:19:27.000000000 -0500
21976+++ linux-3.1.1/arch/x86/net/bpf_jit_comp.c 2011-11-16 18:39:07.000000000 -0500
21977@@ -589,7 +589,9 @@ cond_branch: f_offset = addrs[i + filt
21978 module_free(NULL, image);
21979 return;
21980 }
21981+ pax_open_kernel();
21982 memcpy(image + proglen, temp, ilen);
21983+ pax_close_kernel();
21984 }
21985 proglen += ilen;
21986 addrs[i] = proglen;
21987@@ -609,7 +611,7 @@ cond_branch: f_offset = addrs[i + filt
21988 break;
21989 }
21990 if (proglen == oldproglen) {
21991- image = module_alloc(max_t(unsigned int,
21992+ image = module_alloc_exec(max_t(unsigned int,
21993 proglen,
21994 sizeof(struct work_struct)));
21995 if (!image)
21996diff -urNp linux-3.1.1/arch/x86/net/bpf_jit.S linux-3.1.1/arch/x86/net/bpf_jit.S
21997--- linux-3.1.1/arch/x86/net/bpf_jit.S 2011-11-11 15:19:27.000000000 -0500
21998+++ linux-3.1.1/arch/x86/net/bpf_jit.S 2011-11-16 18:39:07.000000000 -0500
21999@@ -9,6 +9,7 @@
22000 */
22001 #include <linux/linkage.h>
22002 #include <asm/dwarf2.h>
22003+#include <asm/alternative-asm.h>
22004
22005 /*
22006 * Calling convention :
22007@@ -35,6 +36,7 @@ sk_load_word:
22008 jle bpf_slow_path_word
22009 mov (SKBDATA,%rsi),%eax
22010 bswap %eax /* ntohl() */
22011+ pax_force_retaddr
22012 ret
22013
22014
22015@@ -53,6 +55,7 @@ sk_load_half:
22016 jle bpf_slow_path_half
22017 movzwl (SKBDATA,%rsi),%eax
22018 rol $8,%ax # ntohs()
22019+ pax_force_retaddr
22020 ret
22021
22022 sk_load_byte_ind:
22023@@ -66,6 +69,7 @@ sk_load_byte:
22024 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
22025 jle bpf_slow_path_byte
22026 movzbl (SKBDATA,%rsi),%eax
22027+ pax_force_retaddr
22028 ret
22029
22030 /**
22031@@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
22032 movzbl (SKBDATA,%rsi),%ebx
22033 and $15,%bl
22034 shl $2,%bl
22035+ pax_force_retaddr
22036 ret
22037 CFI_ENDPROC
22038 ENDPROC(sk_load_byte_msh)
22039@@ -91,6 +96,7 @@ bpf_error:
22040 xor %eax,%eax
22041 mov -8(%rbp),%rbx
22042 leaveq
22043+ pax_force_retaddr
22044 ret
22045
22046 /* rsi contains offset and can be scratched */
22047@@ -113,6 +119,7 @@ bpf_slow_path_word:
22048 js bpf_error
22049 mov -12(%rbp),%eax
22050 bswap %eax
22051+ pax_force_retaddr
22052 ret
22053
22054 bpf_slow_path_half:
22055@@ -121,12 +128,14 @@ bpf_slow_path_half:
22056 mov -12(%rbp),%ax
22057 rol $8,%ax
22058 movzwl %ax,%eax
22059+ pax_force_retaddr
22060 ret
22061
22062 bpf_slow_path_byte:
22063 bpf_slow_path_common(1)
22064 js bpf_error
22065 movzbl -12(%rbp),%eax
22066+ pax_force_retaddr
22067 ret
22068
22069 bpf_slow_path_byte_msh:
22070@@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
22071 and $15,%al
22072 shl $2,%al
22073 xchg %eax,%ebx
22074+ pax_force_retaddr
22075 ret
22076diff -urNp linux-3.1.1/arch/x86/oprofile/backtrace.c linux-3.1.1/arch/x86/oprofile/backtrace.c
22077--- linux-3.1.1/arch/x86/oprofile/backtrace.c 2011-11-11 15:19:27.000000000 -0500
22078+++ linux-3.1.1/arch/x86/oprofile/backtrace.c 2011-11-16 18:39:07.000000000 -0500
22079@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_fram
22080 struct stack_frame_ia32 *fp;
22081 unsigned long bytes;
22082
22083- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
22084+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
22085 if (bytes != sizeof(bufhead))
22086 return NULL;
22087
22088- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
22089+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
22090
22091 oprofile_add_trace(bufhead[0].return_address);
22092
22093@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_bac
22094 struct stack_frame bufhead[2];
22095 unsigned long bytes;
22096
22097- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
22098+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
22099 if (bytes != sizeof(bufhead))
22100 return NULL;
22101
22102@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const reg
22103 {
22104 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
22105
22106- if (!user_mode_vm(regs)) {
22107+ if (!user_mode(regs)) {
22108 unsigned long stack = kernel_stack_pointer(regs);
22109 if (depth)
22110 dump_trace(NULL, regs, (unsigned long *)stack, 0,
22111diff -urNp linux-3.1.1/arch/x86/pci/mrst.c linux-3.1.1/arch/x86/pci/mrst.c
22112--- linux-3.1.1/arch/x86/pci/mrst.c 2011-11-11 15:19:27.000000000 -0500
22113+++ linux-3.1.1/arch/x86/pci/mrst.c 2011-11-16 18:39:07.000000000 -0500
22114@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
22115 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
22116 pci_mmcfg_late_init();
22117 pcibios_enable_irq = mrst_pci_irq_enable;
22118- pci_root_ops = pci_mrst_ops;
22119+ pax_open_kernel();
22120+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
22121+ pax_close_kernel();
22122 /* Continue with standard init */
22123 return 1;
22124 }
22125diff -urNp linux-3.1.1/arch/x86/pci/pcbios.c linux-3.1.1/arch/x86/pci/pcbios.c
22126--- linux-3.1.1/arch/x86/pci/pcbios.c 2011-11-11 15:19:27.000000000 -0500
22127+++ linux-3.1.1/arch/x86/pci/pcbios.c 2011-11-16 18:39:07.000000000 -0500
22128@@ -79,50 +79,93 @@ union bios32 {
22129 static struct {
22130 unsigned long address;
22131 unsigned short segment;
22132-} bios32_indirect = { 0, __KERNEL_CS };
22133+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
22134
22135 /*
22136 * Returns the entry point for the given service, NULL on error
22137 */
22138
22139-static unsigned long bios32_service(unsigned long service)
22140+static unsigned long __devinit bios32_service(unsigned long service)
22141 {
22142 unsigned char return_code; /* %al */
22143 unsigned long address; /* %ebx */
22144 unsigned long length; /* %ecx */
22145 unsigned long entry; /* %edx */
22146 unsigned long flags;
22147+ struct desc_struct d, *gdt;
22148
22149 local_irq_save(flags);
22150- __asm__("lcall *(%%edi); cld"
22151+
22152+ gdt = get_cpu_gdt_table(smp_processor_id());
22153+
22154+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
22155+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22156+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
22157+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22158+
22159+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
22160 : "=a" (return_code),
22161 "=b" (address),
22162 "=c" (length),
22163 "=d" (entry)
22164 : "0" (service),
22165 "1" (0),
22166- "D" (&bios32_indirect));
22167+ "D" (&bios32_indirect),
22168+ "r"(__PCIBIOS_DS)
22169+ : "memory");
22170+
22171+ pax_open_kernel();
22172+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
22173+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
22174+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
22175+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
22176+ pax_close_kernel();
22177+
22178 local_irq_restore(flags);
22179
22180 switch (return_code) {
22181- case 0:
22182- return address + entry;
22183- case 0x80: /* Not present */
22184- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22185- return 0;
22186- default: /* Shouldn't happen */
22187- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22188- service, return_code);
22189+ case 0: {
22190+ int cpu;
22191+ unsigned char flags;
22192+
22193+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
22194+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
22195+ printk(KERN_WARNING "bios32_service: not valid\n");
22196 return 0;
22197+ }
22198+ address = address + PAGE_OFFSET;
22199+ length += 16UL; /* some BIOSs underreport this... */
22200+ flags = 4;
22201+ if (length >= 64*1024*1024) {
22202+ length >>= PAGE_SHIFT;
22203+ flags |= 8;
22204+ }
22205+
22206+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
22207+ gdt = get_cpu_gdt_table(cpu);
22208+ pack_descriptor(&d, address, length, 0x9b, flags);
22209+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22210+ pack_descriptor(&d, address, length, 0x93, flags);
22211+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22212+ }
22213+ return entry;
22214+ }
22215+ case 0x80: /* Not present */
22216+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22217+ return 0;
22218+ default: /* Shouldn't happen */
22219+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22220+ service, return_code);
22221+ return 0;
22222 }
22223 }
22224
22225 static struct {
22226 unsigned long address;
22227 unsigned short segment;
22228-} pci_indirect = { 0, __KERNEL_CS };
22229+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
22230
22231-static int pci_bios_present;
22232+static int pci_bios_present __read_only;
22233
22234 static int __devinit check_pcibios(void)
22235 {
22236@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
22237 unsigned long flags, pcibios_entry;
22238
22239 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
22240- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
22241+ pci_indirect.address = pcibios_entry;
22242
22243 local_irq_save(flags);
22244- __asm__(
22245- "lcall *(%%edi); cld\n\t"
22246+ __asm__("movw %w6, %%ds\n\t"
22247+ "lcall *%%ss:(%%edi); cld\n\t"
22248+ "push %%ss\n\t"
22249+ "pop %%ds\n\t"
22250 "jc 1f\n\t"
22251 "xor %%ah, %%ah\n"
22252 "1:"
22253@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
22254 "=b" (ebx),
22255 "=c" (ecx)
22256 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
22257- "D" (&pci_indirect)
22258+ "D" (&pci_indirect),
22259+ "r" (__PCIBIOS_DS)
22260 : "memory");
22261 local_irq_restore(flags);
22262
22263@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int se
22264
22265 switch (len) {
22266 case 1:
22267- __asm__("lcall *(%%esi); cld\n\t"
22268+ __asm__("movw %w6, %%ds\n\t"
22269+ "lcall *%%ss:(%%esi); cld\n\t"
22270+ "push %%ss\n\t"
22271+ "pop %%ds\n\t"
22272 "jc 1f\n\t"
22273 "xor %%ah, %%ah\n"
22274 "1:"
22275@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int se
22276 : "1" (PCIBIOS_READ_CONFIG_BYTE),
22277 "b" (bx),
22278 "D" ((long)reg),
22279- "S" (&pci_indirect));
22280+ "S" (&pci_indirect),
22281+ "r" (__PCIBIOS_DS));
22282 /*
22283 * Zero-extend the result beyond 8 bits, do not trust the
22284 * BIOS having done it:
22285@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int se
22286 *value &= 0xff;
22287 break;
22288 case 2:
22289- __asm__("lcall *(%%esi); cld\n\t"
22290+ __asm__("movw %w6, %%ds\n\t"
22291+ "lcall *%%ss:(%%esi); cld\n\t"
22292+ "push %%ss\n\t"
22293+ "pop %%ds\n\t"
22294 "jc 1f\n\t"
22295 "xor %%ah, %%ah\n"
22296 "1:"
22297@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int se
22298 : "1" (PCIBIOS_READ_CONFIG_WORD),
22299 "b" (bx),
22300 "D" ((long)reg),
22301- "S" (&pci_indirect));
22302+ "S" (&pci_indirect),
22303+ "r" (__PCIBIOS_DS));
22304 /*
22305 * Zero-extend the result beyond 16 bits, do not trust the
22306 * BIOS having done it:
22307@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int se
22308 *value &= 0xffff;
22309 break;
22310 case 4:
22311- __asm__("lcall *(%%esi); cld\n\t"
22312+ __asm__("movw %w6, %%ds\n\t"
22313+ "lcall *%%ss:(%%esi); cld\n\t"
22314+ "push %%ss\n\t"
22315+ "pop %%ds\n\t"
22316 "jc 1f\n\t"
22317 "xor %%ah, %%ah\n"
22318 "1:"
22319@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int se
22320 : "1" (PCIBIOS_READ_CONFIG_DWORD),
22321 "b" (bx),
22322 "D" ((long)reg),
22323- "S" (&pci_indirect));
22324+ "S" (&pci_indirect),
22325+ "r" (__PCIBIOS_DS));
22326 break;
22327 }
22328
22329@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int s
22330
22331 switch (len) {
22332 case 1:
22333- __asm__("lcall *(%%esi); cld\n\t"
22334+ __asm__("movw %w6, %%ds\n\t"
22335+ "lcall *%%ss:(%%esi); cld\n\t"
22336+ "push %%ss\n\t"
22337+ "pop %%ds\n\t"
22338 "jc 1f\n\t"
22339 "xor %%ah, %%ah\n"
22340 "1:"
22341@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int s
22342 "c" (value),
22343 "b" (bx),
22344 "D" ((long)reg),
22345- "S" (&pci_indirect));
22346+ "S" (&pci_indirect),
22347+ "r" (__PCIBIOS_DS));
22348 break;
22349 case 2:
22350- __asm__("lcall *(%%esi); cld\n\t"
22351+ __asm__("movw %w6, %%ds\n\t"
22352+ "lcall *%%ss:(%%esi); cld\n\t"
22353+ "push %%ss\n\t"
22354+ "pop %%ds\n\t"
22355 "jc 1f\n\t"
22356 "xor %%ah, %%ah\n"
22357 "1:"
22358@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int s
22359 "c" (value),
22360 "b" (bx),
22361 "D" ((long)reg),
22362- "S" (&pci_indirect));
22363+ "S" (&pci_indirect),
22364+ "r" (__PCIBIOS_DS));
22365 break;
22366 case 4:
22367- __asm__("lcall *(%%esi); cld\n\t"
22368+ __asm__("movw %w6, %%ds\n\t"
22369+ "lcall *%%ss:(%%esi); cld\n\t"
22370+ "push %%ss\n\t"
22371+ "pop %%ds\n\t"
22372 "jc 1f\n\t"
22373 "xor %%ah, %%ah\n"
22374 "1:"
22375@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int s
22376 "c" (value),
22377 "b" (bx),
22378 "D" ((long)reg),
22379- "S" (&pci_indirect));
22380+ "S" (&pci_indirect),
22381+ "r" (__PCIBIOS_DS));
22382 break;
22383 }
22384
22385@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_i
22386
22387 DBG("PCI: Fetching IRQ routing table... ");
22388 __asm__("push %%es\n\t"
22389+ "movw %w8, %%ds\n\t"
22390 "push %%ds\n\t"
22391 "pop %%es\n\t"
22392- "lcall *(%%esi); cld\n\t"
22393+ "lcall *%%ss:(%%esi); cld\n\t"
22394 "pop %%es\n\t"
22395+ "push %%ss\n\t"
22396+ "pop %%ds\n"
22397 "jc 1f\n\t"
22398 "xor %%ah, %%ah\n"
22399 "1:"
22400@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_i
22401 "1" (0),
22402 "D" ((long) &opt),
22403 "S" (&pci_indirect),
22404- "m" (opt)
22405+ "m" (opt),
22406+ "r" (__PCIBIOS_DS)
22407 : "memory");
22408 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
22409 if (ret & 0xff00)
22410@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_d
22411 {
22412 int ret;
22413
22414- __asm__("lcall *(%%esi); cld\n\t"
22415+ __asm__("movw %w5, %%ds\n\t"
22416+ "lcall *%%ss:(%%esi); cld\n\t"
22417+ "push %%ss\n\t"
22418+ "pop %%ds\n"
22419 "jc 1f\n\t"
22420 "xor %%ah, %%ah\n"
22421 "1:"
22422@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_d
22423 : "0" (PCIBIOS_SET_PCI_HW_INT),
22424 "b" ((dev->bus->number << 8) | dev->devfn),
22425 "c" ((irq << 8) | (pin + 10)),
22426- "S" (&pci_indirect));
22427+ "S" (&pci_indirect),
22428+ "r" (__PCIBIOS_DS));
22429 return !(ret & 0xff00);
22430 }
22431 EXPORT_SYMBOL(pcibios_set_irq_routing);
22432diff -urNp linux-3.1.1/arch/x86/platform/efi/efi_32.c linux-3.1.1/arch/x86/platform/efi/efi_32.c
22433--- linux-3.1.1/arch/x86/platform/efi/efi_32.c 2011-11-11 15:19:27.000000000 -0500
22434+++ linux-3.1.1/arch/x86/platform/efi/efi_32.c 2011-11-16 18:39:07.000000000 -0500
22435@@ -38,70 +38,56 @@
22436 */
22437
22438 static unsigned long efi_rt_eflags;
22439-static pgd_t efi_bak_pg_dir_pointer[2];
22440+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
22441
22442-void efi_call_phys_prelog(void)
22443+void __init efi_call_phys_prelog(void)
22444 {
22445- unsigned long cr4;
22446- unsigned long temp;
22447 struct desc_ptr gdt_descr;
22448
22449- local_irq_save(efi_rt_eflags);
22450+#ifdef CONFIG_PAX_KERNEXEC
22451+ struct desc_struct d;
22452+#endif
22453
22454- /*
22455- * If I don't have PAE, I should just duplicate two entries in page
22456- * directory. If I have PAE, I just need to duplicate one entry in
22457- * page directory.
22458- */
22459- cr4 = read_cr4_safe();
22460+ local_irq_save(efi_rt_eflags);
22461
22462- if (cr4 & X86_CR4_PAE) {
22463- efi_bak_pg_dir_pointer[0].pgd =
22464- swapper_pg_dir[pgd_index(0)].pgd;
22465- swapper_pg_dir[0].pgd =
22466- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
22467- } else {
22468- efi_bak_pg_dir_pointer[0].pgd =
22469- swapper_pg_dir[pgd_index(0)].pgd;
22470- efi_bak_pg_dir_pointer[1].pgd =
22471- swapper_pg_dir[pgd_index(0x400000)].pgd;
22472- swapper_pg_dir[pgd_index(0)].pgd =
22473- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
22474- temp = PAGE_OFFSET + 0x400000;
22475- swapper_pg_dir[pgd_index(0x400000)].pgd =
22476- swapper_pg_dir[pgd_index(temp)].pgd;
22477- }
22478+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
22479+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22480+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
22481
22482 /*
22483 * After the lock is released, the original page table is restored.
22484 */
22485 __flush_tlb_all();
22486
22487+#ifdef CONFIG_PAX_KERNEXEC
22488+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
22489+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
22490+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
22491+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
22492+#endif
22493+
22494 gdt_descr.address = __pa(get_cpu_gdt_table(0));
22495 gdt_descr.size = GDT_SIZE - 1;
22496 load_gdt(&gdt_descr);
22497 }
22498
22499-void efi_call_phys_epilog(void)
22500+void __init efi_call_phys_epilog(void)
22501 {
22502- unsigned long cr4;
22503 struct desc_ptr gdt_descr;
22504
22505+#ifdef CONFIG_PAX_KERNEXEC
22506+ struct desc_struct d;
22507+
22508+ memset(&d, 0, sizeof d);
22509+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
22510+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
22511+#endif
22512+
22513 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
22514 gdt_descr.size = GDT_SIZE - 1;
22515 load_gdt(&gdt_descr);
22516
22517- cr4 = read_cr4_safe();
22518-
22519- if (cr4 & X86_CR4_PAE) {
22520- swapper_pg_dir[pgd_index(0)].pgd =
22521- efi_bak_pg_dir_pointer[0].pgd;
22522- } else {
22523- swapper_pg_dir[pgd_index(0)].pgd =
22524- efi_bak_pg_dir_pointer[0].pgd;
22525- swapper_pg_dir[pgd_index(0x400000)].pgd =
22526- efi_bak_pg_dir_pointer[1].pgd;
22527- }
22528+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
22529
22530 /*
22531 * After the lock is released, the original page table is restored.
22532diff -urNp linux-3.1.1/arch/x86/platform/efi/efi_stub_32.S linux-3.1.1/arch/x86/platform/efi/efi_stub_32.S
22533--- linux-3.1.1/arch/x86/platform/efi/efi_stub_32.S 2011-11-11 15:19:27.000000000 -0500
22534+++ linux-3.1.1/arch/x86/platform/efi/efi_stub_32.S 2011-11-16 18:39:07.000000000 -0500
22535@@ -6,7 +6,9 @@
22536 */
22537
22538 #include <linux/linkage.h>
22539+#include <linux/init.h>
22540 #include <asm/page_types.h>
22541+#include <asm/segment.h>
22542
22543 /*
22544 * efi_call_phys(void *, ...) is a function with variable parameters.
22545@@ -20,7 +22,7 @@
22546 * service functions will comply with gcc calling convention, too.
22547 */
22548
22549-.text
22550+__INIT
22551 ENTRY(efi_call_phys)
22552 /*
22553 * 0. The function can only be called in Linux kernel. So CS has been
22554@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
22555 * The mapping of lower virtual memory has been created in prelog and
22556 * epilog.
22557 */
22558- movl $1f, %edx
22559- subl $__PAGE_OFFSET, %edx
22560- jmp *%edx
22561+ movl $(__KERNEXEC_EFI_DS), %edx
22562+ mov %edx, %ds
22563+ mov %edx, %es
22564+ mov %edx, %ss
22565+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
22566 1:
22567
22568 /*
22569@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
22570 * parameter 2, ..., param n. To make things easy, we save the return
22571 * address of efi_call_phys in a global variable.
22572 */
22573- popl %edx
22574- movl %edx, saved_return_addr
22575- /* get the function pointer into ECX*/
22576- popl %ecx
22577- movl %ecx, efi_rt_function_ptr
22578- movl $2f, %edx
22579- subl $__PAGE_OFFSET, %edx
22580- pushl %edx
22581+ popl (saved_return_addr)
22582+ popl (efi_rt_function_ptr)
22583
22584 /*
22585 * 3. Clear PG bit in %CR0.
22586@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
22587 /*
22588 * 5. Call the physical function.
22589 */
22590- jmp *%ecx
22591+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
22592
22593-2:
22594 /*
22595 * 6. After EFI runtime service returns, control will return to
22596 * following instruction. We'd better readjust stack pointer first.
22597@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
22598 movl %cr0, %edx
22599 orl $0x80000000, %edx
22600 movl %edx, %cr0
22601- jmp 1f
22602-1:
22603+
22604 /*
22605 * 8. Now restore the virtual mode from flat mode by
22606 * adding EIP with PAGE_OFFSET.
22607 */
22608- movl $1f, %edx
22609- jmp *%edx
22610+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
22611 1:
22612+ movl $(__KERNEL_DS), %edx
22613+ mov %edx, %ds
22614+ mov %edx, %es
22615+ mov %edx, %ss
22616
22617 /*
22618 * 9. Balance the stack. And because EAX contain the return value,
22619 * we'd better not clobber it.
22620 */
22621- leal efi_rt_function_ptr, %edx
22622- movl (%edx), %ecx
22623- pushl %ecx
22624+ pushl (efi_rt_function_ptr)
22625
22626 /*
22627- * 10. Push the saved return address onto the stack and return.
22628+ * 10. Return to the saved return address.
22629 */
22630- leal saved_return_addr, %edx
22631- movl (%edx), %ecx
22632- pushl %ecx
22633- ret
22634+ jmpl *(saved_return_addr)
22635 ENDPROC(efi_call_phys)
22636 .previous
22637
22638-.data
22639+__INITDATA
22640 saved_return_addr:
22641 .long 0
22642 efi_rt_function_ptr:
22643diff -urNp linux-3.1.1/arch/x86/platform/efi/efi_stub_64.S linux-3.1.1/arch/x86/platform/efi/efi_stub_64.S
22644--- linux-3.1.1/arch/x86/platform/efi/efi_stub_64.S 2011-11-11 15:19:27.000000000 -0500
22645+++ linux-3.1.1/arch/x86/platform/efi/efi_stub_64.S 2011-11-16 18:39:07.000000000 -0500
22646@@ -7,6 +7,7 @@
22647 */
22648
22649 #include <linux/linkage.h>
22650+#include <asm/alternative-asm.h>
22651
22652 #define SAVE_XMM \
22653 mov %rsp, %rax; \
22654@@ -40,6 +41,7 @@ ENTRY(efi_call0)
22655 call *%rdi
22656 addq $32, %rsp
22657 RESTORE_XMM
22658+ pax_force_retaddr
22659 ret
22660 ENDPROC(efi_call0)
22661
22662@@ -50,6 +52,7 @@ ENTRY(efi_call1)
22663 call *%rdi
22664 addq $32, %rsp
22665 RESTORE_XMM
22666+ pax_force_retaddr
22667 ret
22668 ENDPROC(efi_call1)
22669
22670@@ -60,6 +63,7 @@ ENTRY(efi_call2)
22671 call *%rdi
22672 addq $32, %rsp
22673 RESTORE_XMM
22674+ pax_force_retaddr
22675 ret
22676 ENDPROC(efi_call2)
22677
22678@@ -71,6 +75,7 @@ ENTRY(efi_call3)
22679 call *%rdi
22680 addq $32, %rsp
22681 RESTORE_XMM
22682+ pax_force_retaddr
22683 ret
22684 ENDPROC(efi_call3)
22685
22686@@ -83,6 +88,7 @@ ENTRY(efi_call4)
22687 call *%rdi
22688 addq $32, %rsp
22689 RESTORE_XMM
22690+ pax_force_retaddr
22691 ret
22692 ENDPROC(efi_call4)
22693
22694@@ -96,6 +102,7 @@ ENTRY(efi_call5)
22695 call *%rdi
22696 addq $48, %rsp
22697 RESTORE_XMM
22698+ pax_force_retaddr
22699 ret
22700 ENDPROC(efi_call5)
22701
22702@@ -112,5 +119,6 @@ ENTRY(efi_call6)
22703 call *%rdi
22704 addq $48, %rsp
22705 RESTORE_XMM
22706+ pax_force_retaddr
22707 ret
22708 ENDPROC(efi_call6)
22709diff -urNp linux-3.1.1/arch/x86/platform/mrst/mrst.c linux-3.1.1/arch/x86/platform/mrst/mrst.c
22710--- linux-3.1.1/arch/x86/platform/mrst/mrst.c 2011-11-11 15:19:27.000000000 -0500
22711+++ linux-3.1.1/arch/x86/platform/mrst/mrst.c 2011-11-16 18:39:07.000000000 -0500
22712@@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
22713 }
22714
22715 /* Reboot and power off are handled by the SCU on a MID device */
22716-static void mrst_power_off(void)
22717+static __noreturn void mrst_power_off(void)
22718 {
22719 intel_scu_ipc_simple_command(0xf1, 1);
22720+ BUG();
22721 }
22722
22723-static void mrst_reboot(void)
22724+static __noreturn void mrst_reboot(void)
22725 {
22726 intel_scu_ipc_simple_command(0xf1, 0);
22727+ BUG();
22728 }
22729
22730 /*
22731diff -urNp linux-3.1.1/arch/x86/platform/uv/tlb_uv.c linux-3.1.1/arch/x86/platform/uv/tlb_uv.c
22732--- linux-3.1.1/arch/x86/platform/uv/tlb_uv.c 2011-11-11 15:19:27.000000000 -0500
22733+++ linux-3.1.1/arch/x86/platform/uv/tlb_uv.c 2011-11-16 19:39:11.000000000 -0500
22734@@ -377,6 +377,8 @@ static void reset_with_ipi(struct pnmask
22735 struct bau_control *smaster = bcp->socket_master;
22736 struct reset_args reset_args;
22737
22738+ pax_track_stack();
22739+
22740 reset_args.sender = sender;
22741 cpus_clear(*mask);
22742 /* find a single cpu for each uvhub in this distribution mask */
22743diff -urNp linux-3.1.1/arch/x86/power/cpu.c linux-3.1.1/arch/x86/power/cpu.c
22744--- linux-3.1.1/arch/x86/power/cpu.c 2011-11-11 15:19:27.000000000 -0500
22745+++ linux-3.1.1/arch/x86/power/cpu.c 2011-11-16 18:39:07.000000000 -0500
22746@@ -130,7 +130,7 @@ static void do_fpu_end(void)
22747 static void fix_processor_context(void)
22748 {
22749 int cpu = smp_processor_id();
22750- struct tss_struct *t = &per_cpu(init_tss, cpu);
22751+ struct tss_struct *t = init_tss + cpu;
22752
22753 set_tss_desc(cpu, t); /*
22754 * This just modifies memory; should not be
22755@@ -140,7 +140,9 @@ static void fix_processor_context(void)
22756 */
22757
22758 #ifdef CONFIG_X86_64
22759+ pax_open_kernel();
22760 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
22761+ pax_close_kernel();
22762
22763 syscall_init(); /* This sets MSR_*STAR and related */
22764 #endif
22765diff -urNp linux-3.1.1/arch/x86/vdso/Makefile linux-3.1.1/arch/x86/vdso/Makefile
22766--- linux-3.1.1/arch/x86/vdso/Makefile 2011-11-11 15:19:27.000000000 -0500
22767+++ linux-3.1.1/arch/x86/vdso/Makefile 2011-11-16 18:39:07.000000000 -0500
22768@@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
22769 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
22770 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
22771
22772-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22773+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22774 GCOV_PROFILE := n
22775
22776 #
22777diff -urNp linux-3.1.1/arch/x86/vdso/vdso32-setup.c linux-3.1.1/arch/x86/vdso/vdso32-setup.c
22778--- linux-3.1.1/arch/x86/vdso/vdso32-setup.c 2011-11-11 15:19:27.000000000 -0500
22779+++ linux-3.1.1/arch/x86/vdso/vdso32-setup.c 2011-11-16 18:39:07.000000000 -0500
22780@@ -25,6 +25,7 @@
22781 #include <asm/tlbflush.h>
22782 #include <asm/vdso.h>
22783 #include <asm/proto.h>
22784+#include <asm/mman.h>
22785
22786 enum {
22787 VDSO_DISABLED = 0,
22788@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
22789 void enable_sep_cpu(void)
22790 {
22791 int cpu = get_cpu();
22792- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22793+ struct tss_struct *tss = init_tss + cpu;
22794
22795 if (!boot_cpu_has(X86_FEATURE_SEP)) {
22796 put_cpu();
22797@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
22798 gate_vma.vm_start = FIXADDR_USER_START;
22799 gate_vma.vm_end = FIXADDR_USER_END;
22800 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
22801- gate_vma.vm_page_prot = __P101;
22802+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
22803 /*
22804 * Make sure the vDSO gets into every core dump.
22805 * Dumping its contents makes post-mortem fully interpretable later
22806@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
22807 if (compat)
22808 addr = VDSO_HIGH_BASE;
22809 else {
22810- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
22811+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
22812 if (IS_ERR_VALUE(addr)) {
22813 ret = addr;
22814 goto up_fail;
22815 }
22816 }
22817
22818- current->mm->context.vdso = (void *)addr;
22819+ current->mm->context.vdso = addr;
22820
22821 if (compat_uses_vma || !compat) {
22822 /*
22823@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
22824 }
22825
22826 current_thread_info()->sysenter_return =
22827- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22828+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22829
22830 up_fail:
22831 if (ret)
22832- current->mm->context.vdso = NULL;
22833+ current->mm->context.vdso = 0;
22834
22835 up_write(&mm->mmap_sem);
22836
22837@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
22838
22839 const char *arch_vma_name(struct vm_area_struct *vma)
22840 {
22841- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22842+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22843 return "[vdso]";
22844+
22845+#ifdef CONFIG_PAX_SEGMEXEC
22846+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
22847+ return "[vdso]";
22848+#endif
22849+
22850 return NULL;
22851 }
22852
22853@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
22854 * Check to see if the corresponding task was created in compat vdso
22855 * mode.
22856 */
22857- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
22858+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
22859 return &gate_vma;
22860 return NULL;
22861 }
22862diff -urNp linux-3.1.1/arch/x86/vdso/vma.c linux-3.1.1/arch/x86/vdso/vma.c
22863--- linux-3.1.1/arch/x86/vdso/vma.c 2011-11-11 15:19:27.000000000 -0500
22864+++ linux-3.1.1/arch/x86/vdso/vma.c 2011-11-16 18:39:07.000000000 -0500
22865@@ -16,8 +16,6 @@
22866 #include <asm/vdso.h>
22867 #include <asm/page.h>
22868
22869-unsigned int __read_mostly vdso_enabled = 1;
22870-
22871 extern char vdso_start[], vdso_end[];
22872 extern unsigned short vdso_sync_cpuid;
22873
22874@@ -97,13 +95,15 @@ static unsigned long vdso_addr(unsigned
22875 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
22876 {
22877 struct mm_struct *mm = current->mm;
22878- unsigned long addr;
22879+ unsigned long addr = 0;
22880 int ret;
22881
22882- if (!vdso_enabled)
22883- return 0;
22884-
22885 down_write(&mm->mmap_sem);
22886+
22887+#ifdef CONFIG_PAX_RANDMMAP
22888+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
22889+#endif
22890+
22891 addr = vdso_addr(mm->start_stack, vdso_size);
22892 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
22893 if (IS_ERR_VALUE(addr)) {
22894@@ -111,26 +111,18 @@ int arch_setup_additional_pages(struct l
22895 goto up_fail;
22896 }
22897
22898- current->mm->context.vdso = (void *)addr;
22899+ mm->context.vdso = addr;
22900
22901 ret = install_special_mapping(mm, addr, vdso_size,
22902 VM_READ|VM_EXEC|
22903 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
22904 VM_ALWAYSDUMP,
22905 vdso_pages);
22906- if (ret) {
22907- current->mm->context.vdso = NULL;
22908- goto up_fail;
22909- }
22910+
22911+ if (ret)
22912+ mm->context.vdso = 0;
22913
22914 up_fail:
22915 up_write(&mm->mmap_sem);
22916 return ret;
22917 }
22918-
22919-static __init int vdso_setup(char *s)
22920-{
22921- vdso_enabled = simple_strtoul(s, NULL, 0);
22922- return 0;
22923-}
22924-__setup("vdso=", vdso_setup);
22925diff -urNp linux-3.1.1/arch/x86/xen/enlighten.c linux-3.1.1/arch/x86/xen/enlighten.c
22926--- linux-3.1.1/arch/x86/xen/enlighten.c 2011-11-11 15:19:27.000000000 -0500
22927+++ linux-3.1.1/arch/x86/xen/enlighten.c 2011-11-16 18:39:07.000000000 -0500
22928@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
22929
22930 struct shared_info xen_dummy_shared_info;
22931
22932-void *xen_initial_gdt;
22933-
22934 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
22935 __read_mostly int xen_have_vector_callback;
22936 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
22937@@ -1028,7 +1026,7 @@ static const struct pv_apic_ops xen_apic
22938 #endif
22939 };
22940
22941-static void xen_reboot(int reason)
22942+static __noreturn void xen_reboot(int reason)
22943 {
22944 struct sched_shutdown r = { .reason = reason };
22945
22946@@ -1036,17 +1034,17 @@ static void xen_reboot(int reason)
22947 BUG();
22948 }
22949
22950-static void xen_restart(char *msg)
22951+static __noreturn void xen_restart(char *msg)
22952 {
22953 xen_reboot(SHUTDOWN_reboot);
22954 }
22955
22956-static void xen_emergency_restart(void)
22957+static __noreturn void xen_emergency_restart(void)
22958 {
22959 xen_reboot(SHUTDOWN_reboot);
22960 }
22961
22962-static void xen_machine_halt(void)
22963+static __noreturn void xen_machine_halt(void)
22964 {
22965 xen_reboot(SHUTDOWN_poweroff);
22966 }
22967@@ -1152,7 +1150,17 @@ asmlinkage void __init xen_start_kernel(
22968 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
22969
22970 /* Work out if we support NX */
22971- x86_configure_nx();
22972+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22973+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
22974+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
22975+ unsigned l, h;
22976+
22977+ __supported_pte_mask |= _PAGE_NX;
22978+ rdmsr(MSR_EFER, l, h);
22979+ l |= EFER_NX;
22980+ wrmsr(MSR_EFER, l, h);
22981+ }
22982+#endif
22983
22984 xen_setup_features();
22985
22986@@ -1183,13 +1191,6 @@ asmlinkage void __init xen_start_kernel(
22987
22988 machine_ops = xen_machine_ops;
22989
22990- /*
22991- * The only reliable way to retain the initial address of the
22992- * percpu gdt_page is to remember it here, so we can go and
22993- * mark it RW later, when the initial percpu area is freed.
22994- */
22995- xen_initial_gdt = &per_cpu(gdt_page, 0);
22996-
22997 xen_smp_init();
22998
22999 #ifdef CONFIG_ACPI_NUMA
23000diff -urNp linux-3.1.1/arch/x86/xen/mmu.c linux-3.1.1/arch/x86/xen/mmu.c
23001--- linux-3.1.1/arch/x86/xen/mmu.c 2011-11-11 15:19:27.000000000 -0500
23002+++ linux-3.1.1/arch/x86/xen/mmu.c 2011-11-16 18:39:07.000000000 -0500
23003@@ -1768,6 +1768,8 @@ pgd_t * __init xen_setup_kernel_pagetabl
23004 convert_pfn_mfn(init_level4_pgt);
23005 convert_pfn_mfn(level3_ident_pgt);
23006 convert_pfn_mfn(level3_kernel_pgt);
23007+ convert_pfn_mfn(level3_vmalloc_pgt);
23008+ convert_pfn_mfn(level3_vmemmap_pgt);
23009
23010 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
23011 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
23012@@ -1786,7 +1788,10 @@ pgd_t * __init xen_setup_kernel_pagetabl
23013 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
23014 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
23015 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
23016+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
23017+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
23018 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
23019+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
23020 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
23021 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
23022
23023@@ -2000,6 +2005,7 @@ static void __init xen_post_allocator_in
23024 pv_mmu_ops.set_pud = xen_set_pud;
23025 #if PAGETABLE_LEVELS == 4
23026 pv_mmu_ops.set_pgd = xen_set_pgd;
23027+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
23028 #endif
23029
23030 /* This will work as long as patching hasn't happened yet
23031@@ -2081,6 +2087,7 @@ static const struct pv_mmu_ops xen_mmu_o
23032 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
23033 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
23034 .set_pgd = xen_set_pgd_hyper,
23035+ .set_pgd_batched = xen_set_pgd_hyper,
23036
23037 .alloc_pud = xen_alloc_pmd_init,
23038 .release_pud = xen_release_pmd_init,
23039diff -urNp linux-3.1.1/arch/x86/xen/smp.c linux-3.1.1/arch/x86/xen/smp.c
23040--- linux-3.1.1/arch/x86/xen/smp.c 2011-11-11 15:19:27.000000000 -0500
23041+++ linux-3.1.1/arch/x86/xen/smp.c 2011-11-16 18:39:07.000000000 -0500
23042@@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_
23043 {
23044 BUG_ON(smp_processor_id() != 0);
23045 native_smp_prepare_boot_cpu();
23046-
23047- /* We've switched to the "real" per-cpu gdt, so make sure the
23048- old memory can be recycled */
23049- make_lowmem_page_readwrite(xen_initial_gdt);
23050-
23051 xen_filter_cpu_maps();
23052 xen_setup_vcpu_info_placement();
23053 }
23054@@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu,
23055 gdt = get_cpu_gdt_table(cpu);
23056
23057 ctxt->flags = VGCF_IN_KERNEL;
23058- ctxt->user_regs.ds = __USER_DS;
23059- ctxt->user_regs.es = __USER_DS;
23060+ ctxt->user_regs.ds = __KERNEL_DS;
23061+ ctxt->user_regs.es = __KERNEL_DS;
23062 ctxt->user_regs.ss = __KERNEL_DS;
23063 #ifdef CONFIG_X86_32
23064 ctxt->user_regs.fs = __KERNEL_PERCPU;
23065- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
23066+ savesegment(gs, ctxt->user_regs.gs);
23067 #else
23068 ctxt->gs_base_kernel = per_cpu_offset(cpu);
23069 #endif
23070@@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned
23071 int rc;
23072
23073 per_cpu(current_task, cpu) = idle;
23074+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
23075 #ifdef CONFIG_X86_32
23076 irq_ctx_init(cpu);
23077 #else
23078 clear_tsk_thread_flag(idle, TIF_FORK);
23079- per_cpu(kernel_stack, cpu) =
23080- (unsigned long)task_stack_page(idle) -
23081- KERNEL_STACK_OFFSET + THREAD_SIZE;
23082+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
23083 #endif
23084 xen_setup_runstate_info(cpu);
23085 xen_setup_timer(cpu);
23086diff -urNp linux-3.1.1/arch/x86/xen/xen-asm_32.S linux-3.1.1/arch/x86/xen/xen-asm_32.S
23087--- linux-3.1.1/arch/x86/xen/xen-asm_32.S 2011-11-11 15:19:27.000000000 -0500
23088+++ linux-3.1.1/arch/x86/xen/xen-asm_32.S 2011-11-16 18:39:07.000000000 -0500
23089@@ -83,14 +83,14 @@ ENTRY(xen_iret)
23090 ESP_OFFSET=4 # bytes pushed onto stack
23091
23092 /*
23093- * Store vcpu_info pointer for easy access. Do it this way to
23094- * avoid having to reload %fs
23095+ * Store vcpu_info pointer for easy access.
23096 */
23097 #ifdef CONFIG_SMP
23098- GET_THREAD_INFO(%eax)
23099- movl TI_cpu(%eax), %eax
23100- movl __per_cpu_offset(,%eax,4), %eax
23101- mov xen_vcpu(%eax), %eax
23102+ push %fs
23103+ mov $(__KERNEL_PERCPU), %eax
23104+ mov %eax, %fs
23105+ mov PER_CPU_VAR(xen_vcpu), %eax
23106+ pop %fs
23107 #else
23108 movl xen_vcpu, %eax
23109 #endif
23110diff -urNp linux-3.1.1/arch/x86/xen/xen-head.S linux-3.1.1/arch/x86/xen/xen-head.S
23111--- linux-3.1.1/arch/x86/xen/xen-head.S 2011-11-11 15:19:27.000000000 -0500
23112+++ linux-3.1.1/arch/x86/xen/xen-head.S 2011-11-16 18:39:07.000000000 -0500
23113@@ -19,6 +19,17 @@ ENTRY(startup_xen)
23114 #ifdef CONFIG_X86_32
23115 mov %esi,xen_start_info
23116 mov $init_thread_union+THREAD_SIZE,%esp
23117+#ifdef CONFIG_SMP
23118+ movl $cpu_gdt_table,%edi
23119+ movl $__per_cpu_load,%eax
23120+ movw %ax,__KERNEL_PERCPU + 2(%edi)
23121+ rorl $16,%eax
23122+ movb %al,__KERNEL_PERCPU + 4(%edi)
23123+ movb %ah,__KERNEL_PERCPU + 7(%edi)
23124+ movl $__per_cpu_end - 1,%eax
23125+ subl $__per_cpu_start,%eax
23126+ movw %ax,__KERNEL_PERCPU + 0(%edi)
23127+#endif
23128 #else
23129 mov %rsi,xen_start_info
23130 mov $init_thread_union+THREAD_SIZE,%rsp
23131diff -urNp linux-3.1.1/arch/x86/xen/xen-ops.h linux-3.1.1/arch/x86/xen/xen-ops.h
23132--- linux-3.1.1/arch/x86/xen/xen-ops.h 2011-11-11 15:19:27.000000000 -0500
23133+++ linux-3.1.1/arch/x86/xen/xen-ops.h 2011-11-16 18:39:07.000000000 -0500
23134@@ -10,8 +10,6 @@
23135 extern const char xen_hypervisor_callback[];
23136 extern const char xen_failsafe_callback[];
23137
23138-extern void *xen_initial_gdt;
23139-
23140 struct trap_info;
23141 void xen_copy_trap_info(struct trap_info *traps);
23142
23143diff -urNp linux-3.1.1/block/blk-iopoll.c linux-3.1.1/block/blk-iopoll.c
23144--- linux-3.1.1/block/blk-iopoll.c 2011-11-11 15:19:27.000000000 -0500
23145+++ linux-3.1.1/block/blk-iopoll.c 2011-11-16 18:39:07.000000000 -0500
23146@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
23147 }
23148 EXPORT_SYMBOL(blk_iopoll_complete);
23149
23150-static void blk_iopoll_softirq(struct softirq_action *h)
23151+static void blk_iopoll_softirq(void)
23152 {
23153 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
23154 int rearm = 0, budget = blk_iopoll_budget;
23155diff -urNp linux-3.1.1/block/blk-map.c linux-3.1.1/block/blk-map.c
23156--- linux-3.1.1/block/blk-map.c 2011-11-11 15:19:27.000000000 -0500
23157+++ linux-3.1.1/block/blk-map.c 2011-11-16 18:39:07.000000000 -0500
23158@@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
23159 if (!len || !kbuf)
23160 return -EINVAL;
23161
23162- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
23163+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
23164 if (do_copy)
23165 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
23166 else
23167diff -urNp linux-3.1.1/block/blk-softirq.c linux-3.1.1/block/blk-softirq.c
23168--- linux-3.1.1/block/blk-softirq.c 2011-11-11 15:19:27.000000000 -0500
23169+++ linux-3.1.1/block/blk-softirq.c 2011-11-16 18:39:07.000000000 -0500
23170@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
23171 * Softirq action handler - move entries to local list and loop over them
23172 * while passing them to the queue registered handler.
23173 */
23174-static void blk_done_softirq(struct softirq_action *h)
23175+static void blk_done_softirq(void)
23176 {
23177 struct list_head *cpu_list, local_list;
23178
23179diff -urNp linux-3.1.1/block/bsg.c linux-3.1.1/block/bsg.c
23180--- linux-3.1.1/block/bsg.c 2011-11-11 15:19:27.000000000 -0500
23181+++ linux-3.1.1/block/bsg.c 2011-11-16 18:39:07.000000000 -0500
23182@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
23183 struct sg_io_v4 *hdr, struct bsg_device *bd,
23184 fmode_t has_write_perm)
23185 {
23186+ unsigned char tmpcmd[sizeof(rq->__cmd)];
23187+ unsigned char *cmdptr;
23188+
23189 if (hdr->request_len > BLK_MAX_CDB) {
23190 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
23191 if (!rq->cmd)
23192 return -ENOMEM;
23193- }
23194+ cmdptr = rq->cmd;
23195+ } else
23196+ cmdptr = tmpcmd;
23197
23198- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
23199+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
23200 hdr->request_len))
23201 return -EFAULT;
23202
23203+ if (cmdptr != rq->cmd)
23204+ memcpy(rq->cmd, cmdptr, hdr->request_len);
23205+
23206 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
23207 if (blk_verify_command(rq->cmd, has_write_perm))
23208 return -EPERM;
23209diff -urNp linux-3.1.1/block/compat_ioctl.c linux-3.1.1/block/compat_ioctl.c
23210--- linux-3.1.1/block/compat_ioctl.c 2011-11-11 15:19:27.000000000 -0500
23211+++ linux-3.1.1/block/compat_ioctl.c 2011-11-16 18:39:07.000000000 -0500
23212@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_
23213 err |= __get_user(f->spec1, &uf->spec1);
23214 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
23215 err |= __get_user(name, &uf->name);
23216- f->name = compat_ptr(name);
23217+ f->name = (void __force_kernel *)compat_ptr(name);
23218 if (err) {
23219 err = -EFAULT;
23220 goto out;
23221diff -urNp linux-3.1.1/block/scsi_ioctl.c linux-3.1.1/block/scsi_ioctl.c
23222--- linux-3.1.1/block/scsi_ioctl.c 2011-11-11 15:19:27.000000000 -0500
23223+++ linux-3.1.1/block/scsi_ioctl.c 2011-11-16 18:39:07.000000000 -0500
23224@@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
23225 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
23226 struct sg_io_hdr *hdr, fmode_t mode)
23227 {
23228- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
23229+ unsigned char tmpcmd[sizeof(rq->__cmd)];
23230+ unsigned char *cmdptr;
23231+
23232+ if (rq->cmd != rq->__cmd)
23233+ cmdptr = rq->cmd;
23234+ else
23235+ cmdptr = tmpcmd;
23236+
23237+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
23238 return -EFAULT;
23239+
23240+ if (cmdptr != rq->cmd)
23241+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
23242+
23243 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
23244 return -EPERM;
23245
23246@@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
23247 int err;
23248 unsigned int in_len, out_len, bytes, opcode, cmdlen;
23249 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
23250+ unsigned char tmpcmd[sizeof(rq->__cmd)];
23251+ unsigned char *cmdptr;
23252
23253 if (!sic)
23254 return -EINVAL;
23255@@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
23256 */
23257 err = -EFAULT;
23258 rq->cmd_len = cmdlen;
23259- if (copy_from_user(rq->cmd, sic->data, cmdlen))
23260+
23261+ if (rq->cmd != rq->__cmd)
23262+ cmdptr = rq->cmd;
23263+ else
23264+ cmdptr = tmpcmd;
23265+
23266+ if (copy_from_user(cmdptr, sic->data, cmdlen))
23267 goto error;
23268
23269+ if (rq->cmd != cmdptr)
23270+ memcpy(rq->cmd, cmdptr, cmdlen);
23271+
23272 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
23273 goto error;
23274
23275diff -urNp linux-3.1.1/crypto/cryptd.c linux-3.1.1/crypto/cryptd.c
23276--- linux-3.1.1/crypto/cryptd.c 2011-11-11 15:19:27.000000000 -0500
23277+++ linux-3.1.1/crypto/cryptd.c 2011-11-16 18:39:07.000000000 -0500
23278@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
23279
23280 struct cryptd_blkcipher_request_ctx {
23281 crypto_completion_t complete;
23282-};
23283+} __no_const;
23284
23285 struct cryptd_hash_ctx {
23286 struct crypto_shash *child;
23287@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
23288
23289 struct cryptd_aead_request_ctx {
23290 crypto_completion_t complete;
23291-};
23292+} __no_const;
23293
23294 static void cryptd_queue_worker(struct work_struct *work);
23295
23296diff -urNp linux-3.1.1/crypto/serpent.c linux-3.1.1/crypto/serpent.c
23297--- linux-3.1.1/crypto/serpent.c 2011-11-11 15:19:27.000000000 -0500
23298+++ linux-3.1.1/crypto/serpent.c 2011-11-16 18:40:10.000000000 -0500
23299@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
23300 u32 r0,r1,r2,r3,r4;
23301 int i;
23302
23303+ pax_track_stack();
23304+
23305 /* Copy key, add padding */
23306
23307 for (i = 0; i < keylen; ++i)
23308diff -urNp linux-3.1.1/Documentation/dontdiff linux-3.1.1/Documentation/dontdiff
23309--- linux-3.1.1/Documentation/dontdiff 2011-11-11 15:19:27.000000000 -0500
23310+++ linux-3.1.1/Documentation/dontdiff 2011-11-16 18:39:07.000000000 -0500
23311@@ -5,6 +5,7 @@
23312 *.cis
23313 *.cpio
23314 *.csp
23315+*.dbg
23316 *.dsp
23317 *.dvi
23318 *.elf
23319@@ -48,9 +49,11 @@
23320 *.tab.h
23321 *.tex
23322 *.ver
23323+*.vim
23324 *.xml
23325 *.xz
23326 *_MODULES
23327+*_reg_safe.h
23328 *_vga16.c
23329 *~
23330 \#*#
23331@@ -70,6 +73,7 @@ Kerntypes
23332 Module.markers
23333 Module.symvers
23334 PENDING
23335+PERF*
23336 SCCS
23337 System.map*
23338 TAGS
23339@@ -93,19 +97,24 @@ bounds.h
23340 bsetup
23341 btfixupprep
23342 build
23343+builtin-policy.h
23344 bvmlinux
23345 bzImage*
23346 capability_names.h
23347 capflags.c
23348 classlist.h*
23349+clut_vga16.c
23350+common-cmds.h
23351 comp*.log
23352 compile.h*
23353 conf
23354 config
23355 config-*
23356 config_data.h*
23357+config.c
23358 config.mak
23359 config.mak.autogen
23360+config.tmp
23361 conmakehash
23362 consolemap_deftbl.c*
23363 cpustr.h
23364@@ -119,6 +128,7 @@ dslm
23365 elf2ecoff
23366 elfconfig.h*
23367 evergreen_reg_safe.h
23368+exception_policy.conf
23369 fixdep
23370 flask.h
23371 fore200e_mkfirm
23372@@ -126,12 +136,14 @@ fore200e_pca_fw.c*
23373 gconf
23374 gconf.glade.h
23375 gen-devlist
23376+gen-kdb_cmds.c
23377 gen_crc32table
23378 gen_init_cpio
23379 generated
23380 genheaders
23381 genksyms
23382 *_gray256.c
23383+hash
23384 hpet_example
23385 hugepage-mmap
23386 hugepage-shm
23387@@ -146,7 +158,7 @@ int32.c
23388 int4.c
23389 int8.c
23390 kallsyms
23391-kconfig
23392+kern_constants.h
23393 keywords.c
23394 ksym.c*
23395 ksym.h*
23396@@ -154,7 +166,6 @@ kxgettext
23397 lkc_defs.h
23398 lex.c
23399 lex.*.c
23400-linux
23401 logo_*.c
23402 logo_*_clut224.c
23403 logo_*_mono.c
23404@@ -166,7 +177,6 @@ machtypes.h
23405 map
23406 map_hugetlb
23407 maui_boot.h
23408-media
23409 mconf
23410 miboot*
23411 mk_elfconfig
23412@@ -174,6 +184,7 @@ mkboot
23413 mkbugboot
23414 mkcpustr
23415 mkdep
23416+mkpiggy
23417 mkprep
23418 mkregtable
23419 mktables
23420@@ -209,6 +220,7 @@ r300_reg_safe.h
23421 r420_reg_safe.h
23422 r600_reg_safe.h
23423 recordmcount
23424+regdb.c
23425 relocs
23426 rlim_names.h
23427 rn50_reg_safe.h
23428@@ -219,6 +231,7 @@ setup
23429 setup.bin
23430 setup.elf
23431 sImage
23432+slabinfo
23433 sm_tbl*
23434 split-include
23435 syscalltab.h
23436@@ -229,6 +242,7 @@ tftpboot.img
23437 timeconst.h
23438 times.h*
23439 trix_boot.h
23440+user_constants.h
23441 utsrelease.h*
23442 vdso-syms.lds
23443 vdso.lds
23444@@ -246,7 +260,9 @@ vmlinux
23445 vmlinux-*
23446 vmlinux.aout
23447 vmlinux.bin.all
23448+vmlinux.bin.bz2
23449 vmlinux.lds
23450+vmlinux.relocs
23451 vmlinuz
23452 voffset.h
23453 vsyscall.lds
23454@@ -254,9 +270,11 @@ vsyscall_32.lds
23455 wanxlfw.inc
23456 uImage
23457 unifdef
23458+utsrelease.h
23459 wakeup.bin
23460 wakeup.elf
23461 wakeup.lds
23462 zImage*
23463 zconf.hash.c
23464+zconf.lex.c
23465 zoffset.h
23466diff -urNp linux-3.1.1/Documentation/kernel-parameters.txt linux-3.1.1/Documentation/kernel-parameters.txt
23467--- linux-3.1.1/Documentation/kernel-parameters.txt 2011-11-11 15:19:27.000000000 -0500
23468+++ linux-3.1.1/Documentation/kernel-parameters.txt 2011-11-16 18:39:07.000000000 -0500
23469@@ -1898,6 +1898,13 @@ bytes respectively. Such letter suffixes
23470 the specified number of seconds. This is to be used if
23471 your oopses keep scrolling off the screen.
23472
23473+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
23474+ virtualization environments that don't cope well with the
23475+ expand down segment used by UDEREF on X86-32 or the frequent
23476+ page table updates on X86-64.
23477+
23478+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
23479+
23480 pcbit= [HW,ISDN]
23481
23482 pcd. [PARIDE]
23483diff -urNp linux-3.1.1/drivers/acpi/apei/cper.c linux-3.1.1/drivers/acpi/apei/cper.c
23484--- linux-3.1.1/drivers/acpi/apei/cper.c 2011-11-11 15:19:27.000000000 -0500
23485+++ linux-3.1.1/drivers/acpi/apei/cper.c 2011-11-16 18:39:07.000000000 -0500
23486@@ -38,12 +38,12 @@
23487 */
23488 u64 cper_next_record_id(void)
23489 {
23490- static atomic64_t seq;
23491+ static atomic64_unchecked_t seq;
23492
23493- if (!atomic64_read(&seq))
23494- atomic64_set(&seq, ((u64)get_seconds()) << 32);
23495+ if (!atomic64_read_unchecked(&seq))
23496+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
23497
23498- return atomic64_inc_return(&seq);
23499+ return atomic64_inc_return_unchecked(&seq);
23500 }
23501 EXPORT_SYMBOL_GPL(cper_next_record_id);
23502
23503diff -urNp linux-3.1.1/drivers/acpi/ec_sys.c linux-3.1.1/drivers/acpi/ec_sys.c
23504--- linux-3.1.1/drivers/acpi/ec_sys.c 2011-11-11 15:19:27.000000000 -0500
23505+++ linux-3.1.1/drivers/acpi/ec_sys.c 2011-11-16 18:39:07.000000000 -0500
23506@@ -11,6 +11,7 @@
23507 #include <linux/kernel.h>
23508 #include <linux/acpi.h>
23509 #include <linux/debugfs.h>
23510+#include <asm/uaccess.h>
23511 #include "internal.h"
23512
23513 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
23514@@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct fi
23515 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
23516 */
23517 unsigned int size = EC_SPACE_SIZE;
23518- u8 *data = (u8 *) buf;
23519+ u8 data;
23520 loff_t init_off = *off;
23521 int err = 0;
23522
23523@@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct fi
23524 size = count;
23525
23526 while (size) {
23527- err = ec_read(*off, &data[*off - init_off]);
23528+ err = ec_read(*off, &data);
23529 if (err)
23530 return err;
23531+ if (put_user(data, &buf[*off - init_off]))
23532+ return -EFAULT;
23533 *off += 1;
23534 size--;
23535 }
23536@@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct f
23537
23538 unsigned int size = count;
23539 loff_t init_off = *off;
23540- u8 *data = (u8 *) buf;
23541 int err = 0;
23542
23543 if (*off >= EC_SPACE_SIZE)
23544@@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct f
23545 }
23546
23547 while (size) {
23548- u8 byte_write = data[*off - init_off];
23549+ u8 byte_write;
23550+ if (get_user(byte_write, &buf[*off - init_off]))
23551+ return -EFAULT;
23552 err = ec_write(*off, byte_write);
23553 if (err)
23554 return err;
23555diff -urNp linux-3.1.1/drivers/acpi/proc.c linux-3.1.1/drivers/acpi/proc.c
23556--- linux-3.1.1/drivers/acpi/proc.c 2011-11-11 15:19:27.000000000 -0500
23557+++ linux-3.1.1/drivers/acpi/proc.c 2011-11-16 18:39:07.000000000 -0500
23558@@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
23559 size_t count, loff_t * ppos)
23560 {
23561 struct list_head *node, *next;
23562- char strbuf[5];
23563- char str[5] = "";
23564- unsigned int len = count;
23565-
23566- if (len > 4)
23567- len = 4;
23568- if (len < 0)
23569- return -EFAULT;
23570+ char strbuf[5] = {0};
23571
23572- if (copy_from_user(strbuf, buffer, len))
23573+ if (count > 4)
23574+ count = 4;
23575+ if (copy_from_user(strbuf, buffer, count))
23576 return -EFAULT;
23577- strbuf[len] = '\0';
23578- sscanf(strbuf, "%s", str);
23579+ strbuf[count] = '\0';
23580
23581 mutex_lock(&acpi_device_lock);
23582 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
23583@@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
23584 if (!dev->wakeup.flags.valid)
23585 continue;
23586
23587- if (!strncmp(dev->pnp.bus_id, str, 4)) {
23588+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
23589 if (device_can_wakeup(&dev->dev)) {
23590 bool enable = !device_may_wakeup(&dev->dev);
23591 device_set_wakeup_enable(&dev->dev, enable);
23592diff -urNp linux-3.1.1/drivers/acpi/processor_driver.c linux-3.1.1/drivers/acpi/processor_driver.c
23593--- linux-3.1.1/drivers/acpi/processor_driver.c 2011-11-11 15:19:27.000000000 -0500
23594+++ linux-3.1.1/drivers/acpi/processor_driver.c 2011-11-16 18:39:07.000000000 -0500
23595@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
23596 return 0;
23597 #endif
23598
23599- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
23600+ BUG_ON(pr->id >= nr_cpu_ids);
23601
23602 /*
23603 * Buggy BIOS check
23604diff -urNp linux-3.1.1/drivers/ata/libata-core.c linux-3.1.1/drivers/ata/libata-core.c
23605--- linux-3.1.1/drivers/ata/libata-core.c 2011-11-11 15:19:27.000000000 -0500
23606+++ linux-3.1.1/drivers/ata/libata-core.c 2011-11-16 18:39:07.000000000 -0500
23607@@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *
23608 struct ata_port *ap;
23609 unsigned int tag;
23610
23611- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23612+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23613 ap = qc->ap;
23614
23615 qc->flags = 0;
23616@@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued
23617 struct ata_port *ap;
23618 struct ata_link *link;
23619
23620- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23621+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23622 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
23623 ap = qc->ap;
23624 link = qc->dev->link;
23625@@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct
23626 return;
23627
23628 spin_lock(&lock);
23629+ pax_open_kernel();
23630
23631 for (cur = ops->inherits; cur; cur = cur->inherits) {
23632 void **inherit = (void **)cur;
23633@@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct
23634 if (IS_ERR(*pp))
23635 *pp = NULL;
23636
23637- ops->inherits = NULL;
23638+ *(struct ata_port_operations **)&ops->inherits = NULL;
23639
23640+ pax_close_kernel();
23641 spin_unlock(&lock);
23642 }
23643
23644diff -urNp linux-3.1.1/drivers/ata/libata-eh.c linux-3.1.1/drivers/ata/libata-eh.c
23645--- linux-3.1.1/drivers/ata/libata-eh.c 2011-11-11 15:19:27.000000000 -0500
23646+++ linux-3.1.1/drivers/ata/libata-eh.c 2011-11-16 18:40:10.000000000 -0500
23647@@ -2515,6 +2515,8 @@ void ata_eh_report(struct ata_port *ap)
23648 {
23649 struct ata_link *link;
23650
23651+ pax_track_stack();
23652+
23653 ata_for_each_link(link, ap, HOST_FIRST)
23654 ata_eh_link_report(link);
23655 }
23656diff -urNp linux-3.1.1/drivers/ata/pata_arasan_cf.c linux-3.1.1/drivers/ata/pata_arasan_cf.c
23657--- linux-3.1.1/drivers/ata/pata_arasan_cf.c 2011-11-11 15:19:27.000000000 -0500
23658+++ linux-3.1.1/drivers/ata/pata_arasan_cf.c 2011-11-16 18:39:07.000000000 -0500
23659@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
23660 /* Handle platform specific quirks */
23661 if (pdata->quirk) {
23662 if (pdata->quirk & CF_BROKEN_PIO) {
23663- ap->ops->set_piomode = NULL;
23664+ pax_open_kernel();
23665+ *(void **)&ap->ops->set_piomode = NULL;
23666+ pax_close_kernel();
23667 ap->pio_mask = 0;
23668 }
23669 if (pdata->quirk & CF_BROKEN_MWDMA)
23670diff -urNp linux-3.1.1/drivers/atm/adummy.c linux-3.1.1/drivers/atm/adummy.c
23671--- linux-3.1.1/drivers/atm/adummy.c 2011-11-11 15:19:27.000000000 -0500
23672+++ linux-3.1.1/drivers/atm/adummy.c 2011-11-16 18:39:07.000000000 -0500
23673@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
23674 vcc->pop(vcc, skb);
23675 else
23676 dev_kfree_skb_any(skb);
23677- atomic_inc(&vcc->stats->tx);
23678+ atomic_inc_unchecked(&vcc->stats->tx);
23679
23680 return 0;
23681 }
23682diff -urNp linux-3.1.1/drivers/atm/ambassador.c linux-3.1.1/drivers/atm/ambassador.c
23683--- linux-3.1.1/drivers/atm/ambassador.c 2011-11-11 15:19:27.000000000 -0500
23684+++ linux-3.1.1/drivers/atm/ambassador.c 2011-11-16 18:39:07.000000000 -0500
23685@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
23686 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
23687
23688 // VC layer stats
23689- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23690+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23691
23692 // free the descriptor
23693 kfree (tx_descr);
23694@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
23695 dump_skb ("<<<", vc, skb);
23696
23697 // VC layer stats
23698- atomic_inc(&atm_vcc->stats->rx);
23699+ atomic_inc_unchecked(&atm_vcc->stats->rx);
23700 __net_timestamp(skb);
23701 // end of our responsibility
23702 atm_vcc->push (atm_vcc, skb);
23703@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
23704 } else {
23705 PRINTK (KERN_INFO, "dropped over-size frame");
23706 // should we count this?
23707- atomic_inc(&atm_vcc->stats->rx_drop);
23708+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23709 }
23710
23711 } else {
23712@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * at
23713 }
23714
23715 if (check_area (skb->data, skb->len)) {
23716- atomic_inc(&atm_vcc->stats->tx_err);
23717+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
23718 return -ENOMEM; // ?
23719 }
23720
23721diff -urNp linux-3.1.1/drivers/atm/atmtcp.c linux-3.1.1/drivers/atm/atmtcp.c
23722--- linux-3.1.1/drivers/atm/atmtcp.c 2011-11-11 15:19:27.000000000 -0500
23723+++ linux-3.1.1/drivers/atm/atmtcp.c 2011-11-16 18:39:07.000000000 -0500
23724@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
23725 if (vcc->pop) vcc->pop(vcc,skb);
23726 else dev_kfree_skb(skb);
23727 if (dev_data) return 0;
23728- atomic_inc(&vcc->stats->tx_err);
23729+ atomic_inc_unchecked(&vcc->stats->tx_err);
23730 return -ENOLINK;
23731 }
23732 size = skb->len+sizeof(struct atmtcp_hdr);
23733@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
23734 if (!new_skb) {
23735 if (vcc->pop) vcc->pop(vcc,skb);
23736 else dev_kfree_skb(skb);
23737- atomic_inc(&vcc->stats->tx_err);
23738+ atomic_inc_unchecked(&vcc->stats->tx_err);
23739 return -ENOBUFS;
23740 }
23741 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
23742@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
23743 if (vcc->pop) vcc->pop(vcc,skb);
23744 else dev_kfree_skb(skb);
23745 out_vcc->push(out_vcc,new_skb);
23746- atomic_inc(&vcc->stats->tx);
23747- atomic_inc(&out_vcc->stats->rx);
23748+ atomic_inc_unchecked(&vcc->stats->tx);
23749+ atomic_inc_unchecked(&out_vcc->stats->rx);
23750 return 0;
23751 }
23752
23753@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
23754 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
23755 read_unlock(&vcc_sklist_lock);
23756 if (!out_vcc) {
23757- atomic_inc(&vcc->stats->tx_err);
23758+ atomic_inc_unchecked(&vcc->stats->tx_err);
23759 goto done;
23760 }
23761 skb_pull(skb,sizeof(struct atmtcp_hdr));
23762@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
23763 __net_timestamp(new_skb);
23764 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
23765 out_vcc->push(out_vcc,new_skb);
23766- atomic_inc(&vcc->stats->tx);
23767- atomic_inc(&out_vcc->stats->rx);
23768+ atomic_inc_unchecked(&vcc->stats->tx);
23769+ atomic_inc_unchecked(&out_vcc->stats->rx);
23770 done:
23771 if (vcc->pop) vcc->pop(vcc,skb);
23772 else dev_kfree_skb(skb);
23773diff -urNp linux-3.1.1/drivers/atm/eni.c linux-3.1.1/drivers/atm/eni.c
23774--- linux-3.1.1/drivers/atm/eni.c 2011-11-11 15:19:27.000000000 -0500
23775+++ linux-3.1.1/drivers/atm/eni.c 2011-11-16 18:39:07.000000000 -0500
23776@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
23777 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
23778 vcc->dev->number);
23779 length = 0;
23780- atomic_inc(&vcc->stats->rx_err);
23781+ atomic_inc_unchecked(&vcc->stats->rx_err);
23782 }
23783 else {
23784 length = ATM_CELL_SIZE-1; /* no HEC */
23785@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
23786 size);
23787 }
23788 eff = length = 0;
23789- atomic_inc(&vcc->stats->rx_err);
23790+ atomic_inc_unchecked(&vcc->stats->rx_err);
23791 }
23792 else {
23793 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
23794@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
23795 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
23796 vcc->dev->number,vcc->vci,length,size << 2,descr);
23797 length = eff = 0;
23798- atomic_inc(&vcc->stats->rx_err);
23799+ atomic_inc_unchecked(&vcc->stats->rx_err);
23800 }
23801 }
23802 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
23803@@ -771,7 +771,7 @@ rx_dequeued++;
23804 vcc->push(vcc,skb);
23805 pushed++;
23806 }
23807- atomic_inc(&vcc->stats->rx);
23808+ atomic_inc_unchecked(&vcc->stats->rx);
23809 }
23810 wake_up(&eni_dev->rx_wait);
23811 }
23812@@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
23813 PCI_DMA_TODEVICE);
23814 if (vcc->pop) vcc->pop(vcc,skb);
23815 else dev_kfree_skb_irq(skb);
23816- atomic_inc(&vcc->stats->tx);
23817+ atomic_inc_unchecked(&vcc->stats->tx);
23818 wake_up(&eni_dev->tx_wait);
23819 dma_complete++;
23820 }
23821@@ -1568,7 +1568,7 @@ tx_complete++;
23822 /*--------------------------------- entries ---------------------------------*/
23823
23824
23825-static const char *media_name[] __devinitdata = {
23826+static const char *media_name[] __devinitconst = {
23827 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
23828 "UTP", "05?", "06?", "07?", /* 4- 7 */
23829 "TAXI","09?", "10?", "11?", /* 8-11 */
23830diff -urNp linux-3.1.1/drivers/atm/firestream.c linux-3.1.1/drivers/atm/firestream.c
23831--- linux-3.1.1/drivers/atm/firestream.c 2011-11-11 15:19:27.000000000 -0500
23832+++ linux-3.1.1/drivers/atm/firestream.c 2011-11-16 18:39:07.000000000 -0500
23833@@ -750,7 +750,7 @@ static void process_txdone_queue (struct
23834 }
23835 }
23836
23837- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23838+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23839
23840 fs_dprintk (FS_DEBUG_TXMEM, "i");
23841 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
23842@@ -817,7 +817,7 @@ static void process_incoming (struct fs_
23843 #endif
23844 skb_put (skb, qe->p1 & 0xffff);
23845 ATM_SKB(skb)->vcc = atm_vcc;
23846- atomic_inc(&atm_vcc->stats->rx);
23847+ atomic_inc_unchecked(&atm_vcc->stats->rx);
23848 __net_timestamp(skb);
23849 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
23850 atm_vcc->push (atm_vcc, skb);
23851@@ -838,12 +838,12 @@ static void process_incoming (struct fs_
23852 kfree (pe);
23853 }
23854 if (atm_vcc)
23855- atomic_inc(&atm_vcc->stats->rx_drop);
23856+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23857 break;
23858 case 0x1f: /* Reassembly abort: no buffers. */
23859 /* Silently increment error counter. */
23860 if (atm_vcc)
23861- atomic_inc(&atm_vcc->stats->rx_drop);
23862+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23863 break;
23864 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
23865 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
23866diff -urNp linux-3.1.1/drivers/atm/fore200e.c linux-3.1.1/drivers/atm/fore200e.c
23867--- linux-3.1.1/drivers/atm/fore200e.c 2011-11-11 15:19:27.000000000 -0500
23868+++ linux-3.1.1/drivers/atm/fore200e.c 2011-11-16 18:39:07.000000000 -0500
23869@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
23870 #endif
23871 /* check error condition */
23872 if (*entry->status & STATUS_ERROR)
23873- atomic_inc(&vcc->stats->tx_err);
23874+ atomic_inc_unchecked(&vcc->stats->tx_err);
23875 else
23876- atomic_inc(&vcc->stats->tx);
23877+ atomic_inc_unchecked(&vcc->stats->tx);
23878 }
23879 }
23880
23881@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
23882 if (skb == NULL) {
23883 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
23884
23885- atomic_inc(&vcc->stats->rx_drop);
23886+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23887 return -ENOMEM;
23888 }
23889
23890@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
23891
23892 dev_kfree_skb_any(skb);
23893
23894- atomic_inc(&vcc->stats->rx_drop);
23895+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23896 return -ENOMEM;
23897 }
23898
23899 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
23900
23901 vcc->push(vcc, skb);
23902- atomic_inc(&vcc->stats->rx);
23903+ atomic_inc_unchecked(&vcc->stats->rx);
23904
23905 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
23906
23907@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
23908 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
23909 fore200e->atm_dev->number,
23910 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
23911- atomic_inc(&vcc->stats->rx_err);
23912+ atomic_inc_unchecked(&vcc->stats->rx_err);
23913 }
23914 }
23915
23916@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
23917 goto retry_here;
23918 }
23919
23920- atomic_inc(&vcc->stats->tx_err);
23921+ atomic_inc_unchecked(&vcc->stats->tx_err);
23922
23923 fore200e->tx_sat++;
23924 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
23925diff -urNp linux-3.1.1/drivers/atm/he.c linux-3.1.1/drivers/atm/he.c
23926--- linux-3.1.1/drivers/atm/he.c 2011-11-11 15:19:27.000000000 -0500
23927+++ linux-3.1.1/drivers/atm/he.c 2011-11-16 18:39:07.000000000 -0500
23928@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23929
23930 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
23931 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
23932- atomic_inc(&vcc->stats->rx_drop);
23933+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23934 goto return_host_buffers;
23935 }
23936
23937@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23938 RBRQ_LEN_ERR(he_dev->rbrq_head)
23939 ? "LEN_ERR" : "",
23940 vcc->vpi, vcc->vci);
23941- atomic_inc(&vcc->stats->rx_err);
23942+ atomic_inc_unchecked(&vcc->stats->rx_err);
23943 goto return_host_buffers;
23944 }
23945
23946@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23947 vcc->push(vcc, skb);
23948 spin_lock(&he_dev->global_lock);
23949
23950- atomic_inc(&vcc->stats->rx);
23951+ atomic_inc_unchecked(&vcc->stats->rx);
23952
23953 return_host_buffers:
23954 ++pdus_assembled;
23955@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
23956 tpd->vcc->pop(tpd->vcc, tpd->skb);
23957 else
23958 dev_kfree_skb_any(tpd->skb);
23959- atomic_inc(&tpd->vcc->stats->tx_err);
23960+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
23961 }
23962 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
23963 return;
23964@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23965 vcc->pop(vcc, skb);
23966 else
23967 dev_kfree_skb_any(skb);
23968- atomic_inc(&vcc->stats->tx_err);
23969+ atomic_inc_unchecked(&vcc->stats->tx_err);
23970 return -EINVAL;
23971 }
23972
23973@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23974 vcc->pop(vcc, skb);
23975 else
23976 dev_kfree_skb_any(skb);
23977- atomic_inc(&vcc->stats->tx_err);
23978+ atomic_inc_unchecked(&vcc->stats->tx_err);
23979 return -EINVAL;
23980 }
23981 #endif
23982@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23983 vcc->pop(vcc, skb);
23984 else
23985 dev_kfree_skb_any(skb);
23986- atomic_inc(&vcc->stats->tx_err);
23987+ atomic_inc_unchecked(&vcc->stats->tx_err);
23988 spin_unlock_irqrestore(&he_dev->global_lock, flags);
23989 return -ENOMEM;
23990 }
23991@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23992 vcc->pop(vcc, skb);
23993 else
23994 dev_kfree_skb_any(skb);
23995- atomic_inc(&vcc->stats->tx_err);
23996+ atomic_inc_unchecked(&vcc->stats->tx_err);
23997 spin_unlock_irqrestore(&he_dev->global_lock, flags);
23998 return -ENOMEM;
23999 }
24000@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
24001 __enqueue_tpd(he_dev, tpd, cid);
24002 spin_unlock_irqrestore(&he_dev->global_lock, flags);
24003
24004- atomic_inc(&vcc->stats->tx);
24005+ atomic_inc_unchecked(&vcc->stats->tx);
24006
24007 return 0;
24008 }
24009diff -urNp linux-3.1.1/drivers/atm/horizon.c linux-3.1.1/drivers/atm/horizon.c
24010--- linux-3.1.1/drivers/atm/horizon.c 2011-11-11 15:19:27.000000000 -0500
24011+++ linux-3.1.1/drivers/atm/horizon.c 2011-11-16 18:39:07.000000000 -0500
24012@@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev,
24013 {
24014 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
24015 // VC layer stats
24016- atomic_inc(&vcc->stats->rx);
24017+ atomic_inc_unchecked(&vcc->stats->rx);
24018 __net_timestamp(skb);
24019 // end of our responsibility
24020 vcc->push (vcc, skb);
24021@@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const
24022 dev->tx_iovec = NULL;
24023
24024 // VC layer stats
24025- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
24026+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
24027
24028 // free the skb
24029 hrz_kfree_skb (skb);
24030diff -urNp linux-3.1.1/drivers/atm/idt77252.c linux-3.1.1/drivers/atm/idt77252.c
24031--- linux-3.1.1/drivers/atm/idt77252.c 2011-11-11 15:19:27.000000000 -0500
24032+++ linux-3.1.1/drivers/atm/idt77252.c 2011-11-16 18:39:07.000000000 -0500
24033@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, str
24034 else
24035 dev_kfree_skb(skb);
24036
24037- atomic_inc(&vcc->stats->tx);
24038+ atomic_inc_unchecked(&vcc->stats->tx);
24039 }
24040
24041 atomic_dec(&scq->used);
24042@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, st
24043 if ((sb = dev_alloc_skb(64)) == NULL) {
24044 printk("%s: Can't allocate buffers for aal0.\n",
24045 card->name);
24046- atomic_add(i, &vcc->stats->rx_drop);
24047+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
24048 break;
24049 }
24050 if (!atm_charge(vcc, sb->truesize)) {
24051 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
24052 card->name);
24053- atomic_add(i - 1, &vcc->stats->rx_drop);
24054+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
24055 dev_kfree_skb(sb);
24056 break;
24057 }
24058@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, st
24059 ATM_SKB(sb)->vcc = vcc;
24060 __net_timestamp(sb);
24061 vcc->push(vcc, sb);
24062- atomic_inc(&vcc->stats->rx);
24063+ atomic_inc_unchecked(&vcc->stats->rx);
24064
24065 cell += ATM_CELL_PAYLOAD;
24066 }
24067@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, st
24068 "(CDC: %08x)\n",
24069 card->name, len, rpp->len, readl(SAR_REG_CDC));
24070 recycle_rx_pool_skb(card, rpp);
24071- atomic_inc(&vcc->stats->rx_err);
24072+ atomic_inc_unchecked(&vcc->stats->rx_err);
24073 return;
24074 }
24075 if (stat & SAR_RSQE_CRC) {
24076 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
24077 recycle_rx_pool_skb(card, rpp);
24078- atomic_inc(&vcc->stats->rx_err);
24079+ atomic_inc_unchecked(&vcc->stats->rx_err);
24080 return;
24081 }
24082 if (skb_queue_len(&rpp->queue) > 1) {
24083@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, st
24084 RXPRINTK("%s: Can't alloc RX skb.\n",
24085 card->name);
24086 recycle_rx_pool_skb(card, rpp);
24087- atomic_inc(&vcc->stats->rx_err);
24088+ atomic_inc_unchecked(&vcc->stats->rx_err);
24089 return;
24090 }
24091 if (!atm_charge(vcc, skb->truesize)) {
24092@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, st
24093 __net_timestamp(skb);
24094
24095 vcc->push(vcc, skb);
24096- atomic_inc(&vcc->stats->rx);
24097+ atomic_inc_unchecked(&vcc->stats->rx);
24098
24099 return;
24100 }
24101@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, st
24102 __net_timestamp(skb);
24103
24104 vcc->push(vcc, skb);
24105- atomic_inc(&vcc->stats->rx);
24106+ atomic_inc_unchecked(&vcc->stats->rx);
24107
24108 if (skb->truesize > SAR_FB_SIZE_3)
24109 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
24110@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *car
24111 if (vcc->qos.aal != ATM_AAL0) {
24112 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
24113 card->name, vpi, vci);
24114- atomic_inc(&vcc->stats->rx_drop);
24115+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24116 goto drop;
24117 }
24118
24119 if ((sb = dev_alloc_skb(64)) == NULL) {
24120 printk("%s: Can't allocate buffers for AAL0.\n",
24121 card->name);
24122- atomic_inc(&vcc->stats->rx_err);
24123+ atomic_inc_unchecked(&vcc->stats->rx_err);
24124 goto drop;
24125 }
24126
24127@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *car
24128 ATM_SKB(sb)->vcc = vcc;
24129 __net_timestamp(sb);
24130 vcc->push(vcc, sb);
24131- atomic_inc(&vcc->stats->rx);
24132+ atomic_inc_unchecked(&vcc->stats->rx);
24133
24134 drop:
24135 skb_pull(queue, 64);
24136@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
24137
24138 if (vc == NULL) {
24139 printk("%s: NULL connection in send().\n", card->name);
24140- atomic_inc(&vcc->stats->tx_err);
24141+ atomic_inc_unchecked(&vcc->stats->tx_err);
24142 dev_kfree_skb(skb);
24143 return -EINVAL;
24144 }
24145 if (!test_bit(VCF_TX, &vc->flags)) {
24146 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
24147- atomic_inc(&vcc->stats->tx_err);
24148+ atomic_inc_unchecked(&vcc->stats->tx_err);
24149 dev_kfree_skb(skb);
24150 return -EINVAL;
24151 }
24152@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
24153 break;
24154 default:
24155 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
24156- atomic_inc(&vcc->stats->tx_err);
24157+ atomic_inc_unchecked(&vcc->stats->tx_err);
24158 dev_kfree_skb(skb);
24159 return -EINVAL;
24160 }
24161
24162 if (skb_shinfo(skb)->nr_frags != 0) {
24163 printk("%s: No scatter-gather yet.\n", card->name);
24164- atomic_inc(&vcc->stats->tx_err);
24165+ atomic_inc_unchecked(&vcc->stats->tx_err);
24166 dev_kfree_skb(skb);
24167 return -EINVAL;
24168 }
24169@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
24170
24171 err = queue_skb(card, vc, skb, oam);
24172 if (err) {
24173- atomic_inc(&vcc->stats->tx_err);
24174+ atomic_inc_unchecked(&vcc->stats->tx_err);
24175 dev_kfree_skb(skb);
24176 return err;
24177 }
24178@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
24179 skb = dev_alloc_skb(64);
24180 if (!skb) {
24181 printk("%s: Out of memory in send_oam().\n", card->name);
24182- atomic_inc(&vcc->stats->tx_err);
24183+ atomic_inc_unchecked(&vcc->stats->tx_err);
24184 return -ENOMEM;
24185 }
24186 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
24187diff -urNp linux-3.1.1/drivers/atm/iphase.c linux-3.1.1/drivers/atm/iphase.c
24188--- linux-3.1.1/drivers/atm/iphase.c 2011-11-11 15:19:27.000000000 -0500
24189+++ linux-3.1.1/drivers/atm/iphase.c 2011-11-16 18:39:07.000000000 -0500
24190@@ -1121,7 +1121,7 @@ static int rx_pkt(struct atm_dev *dev)
24191 status = (u_short) (buf_desc_ptr->desc_mode);
24192 if (status & (RX_CER | RX_PTE | RX_OFL))
24193 {
24194- atomic_inc(&vcc->stats->rx_err);
24195+ atomic_inc_unchecked(&vcc->stats->rx_err);
24196 IF_ERR(printk("IA: bad packet, dropping it");)
24197 if (status & RX_CER) {
24198 IF_ERR(printk(" cause: packet CRC error\n");)
24199@@ -1144,7 +1144,7 @@ static int rx_pkt(struct atm_dev *dev)
24200 len = dma_addr - buf_addr;
24201 if (len > iadev->rx_buf_sz) {
24202 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
24203- atomic_inc(&vcc->stats->rx_err);
24204+ atomic_inc_unchecked(&vcc->stats->rx_err);
24205 goto out_free_desc;
24206 }
24207
24208@@ -1294,7 +1294,7 @@ static void rx_dle_intr(struct atm_dev *
24209 ia_vcc = INPH_IA_VCC(vcc);
24210 if (ia_vcc == NULL)
24211 {
24212- atomic_inc(&vcc->stats->rx_err);
24213+ atomic_inc_unchecked(&vcc->stats->rx_err);
24214 dev_kfree_skb_any(skb);
24215 atm_return(vcc, atm_guess_pdu2truesize(len));
24216 goto INCR_DLE;
24217@@ -1306,7 +1306,7 @@ static void rx_dle_intr(struct atm_dev *
24218 if ((length > iadev->rx_buf_sz) || (length >
24219 (skb->len - sizeof(struct cpcs_trailer))))
24220 {
24221- atomic_inc(&vcc->stats->rx_err);
24222+ atomic_inc_unchecked(&vcc->stats->rx_err);
24223 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
24224 length, skb->len);)
24225 dev_kfree_skb_any(skb);
24226@@ -1322,7 +1322,7 @@ static void rx_dle_intr(struct atm_dev *
24227
24228 IF_RX(printk("rx_dle_intr: skb push");)
24229 vcc->push(vcc,skb);
24230- atomic_inc(&vcc->stats->rx);
24231+ atomic_inc_unchecked(&vcc->stats->rx);
24232 iadev->rx_pkt_cnt++;
24233 }
24234 INCR_DLE:
24235@@ -2802,15 +2802,15 @@ static int ia_ioctl(struct atm_dev *dev,
24236 {
24237 struct k_sonet_stats *stats;
24238 stats = &PRIV(_ia_dev[board])->sonet_stats;
24239- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
24240- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
24241- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
24242- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
24243- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
24244- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
24245- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
24246- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
24247- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
24248+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
24249+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
24250+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
24251+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
24252+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
24253+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
24254+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
24255+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
24256+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
24257 }
24258 ia_cmds.status = 0;
24259 break;
24260@@ -2915,7 +2915,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
24261 if ((desc == 0) || (desc > iadev->num_tx_desc))
24262 {
24263 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
24264- atomic_inc(&vcc->stats->tx);
24265+ atomic_inc_unchecked(&vcc->stats->tx);
24266 if (vcc->pop)
24267 vcc->pop(vcc, skb);
24268 else
24269@@ -3020,14 +3020,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
24270 ATM_DESC(skb) = vcc->vci;
24271 skb_queue_tail(&iadev->tx_dma_q, skb);
24272
24273- atomic_inc(&vcc->stats->tx);
24274+ atomic_inc_unchecked(&vcc->stats->tx);
24275 iadev->tx_pkt_cnt++;
24276 /* Increment transaction counter */
24277 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
24278
24279 #if 0
24280 /* add flow control logic */
24281- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
24282+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
24283 if (iavcc->vc_desc_cnt > 10) {
24284 vcc->tx_quota = vcc->tx_quota * 3 / 4;
24285 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
24286diff -urNp linux-3.1.1/drivers/atm/lanai.c linux-3.1.1/drivers/atm/lanai.c
24287--- linux-3.1.1/drivers/atm/lanai.c 2011-11-11 15:19:27.000000000 -0500
24288+++ linux-3.1.1/drivers/atm/lanai.c 2011-11-16 18:39:07.000000000 -0500
24289@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
24290 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
24291 lanai_endtx(lanai, lvcc);
24292 lanai_free_skb(lvcc->tx.atmvcc, skb);
24293- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
24294+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
24295 }
24296
24297 /* Try to fill the buffer - don't call unless there is backlog */
24298@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
24299 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
24300 __net_timestamp(skb);
24301 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
24302- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
24303+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
24304 out:
24305 lvcc->rx.buf.ptr = end;
24306 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
24307@@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
24308 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
24309 "vcc %d\n", lanai->number, (unsigned int) s, vci);
24310 lanai->stats.service_rxnotaal5++;
24311- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
24312+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
24313 return 0;
24314 }
24315 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
24316@@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
24317 int bytes;
24318 read_unlock(&vcc_sklist_lock);
24319 DPRINTK("got trashed rx pdu on vci %d\n", vci);
24320- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
24321+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
24322 lvcc->stats.x.aal5.service_trash++;
24323 bytes = (SERVICE_GET_END(s) * 16) -
24324 (((unsigned long) lvcc->rx.buf.ptr) -
24325@@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
24326 }
24327 if (s & SERVICE_STREAM) {
24328 read_unlock(&vcc_sklist_lock);
24329- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
24330+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
24331 lvcc->stats.x.aal5.service_stream++;
24332 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
24333 "PDU on VCI %d!\n", lanai->number, vci);
24334@@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
24335 return 0;
24336 }
24337 DPRINTK("got rx crc error on vci %d\n", vci);
24338- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
24339+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
24340 lvcc->stats.x.aal5.service_rxcrc++;
24341 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
24342 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
24343diff -urNp linux-3.1.1/drivers/atm/nicstar.c linux-3.1.1/drivers/atm/nicstar.c
24344--- linux-3.1.1/drivers/atm/nicstar.c 2011-11-11 15:19:27.000000000 -0500
24345+++ linux-3.1.1/drivers/atm/nicstar.c 2011-11-16 18:39:07.000000000 -0500
24346@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
24347 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
24348 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
24349 card->index);
24350- atomic_inc(&vcc->stats->tx_err);
24351+ atomic_inc_unchecked(&vcc->stats->tx_err);
24352 dev_kfree_skb_any(skb);
24353 return -EINVAL;
24354 }
24355@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
24356 if (!vc->tx) {
24357 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
24358 card->index);
24359- atomic_inc(&vcc->stats->tx_err);
24360+ atomic_inc_unchecked(&vcc->stats->tx_err);
24361 dev_kfree_skb_any(skb);
24362 return -EINVAL;
24363 }
24364@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
24365 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
24366 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
24367 card->index);
24368- atomic_inc(&vcc->stats->tx_err);
24369+ atomic_inc_unchecked(&vcc->stats->tx_err);
24370 dev_kfree_skb_any(skb);
24371 return -EINVAL;
24372 }
24373
24374 if (skb_shinfo(skb)->nr_frags != 0) {
24375 printk("nicstar%d: No scatter-gather yet.\n", card->index);
24376- atomic_inc(&vcc->stats->tx_err);
24377+ atomic_inc_unchecked(&vcc->stats->tx_err);
24378 dev_kfree_skb_any(skb);
24379 return -EINVAL;
24380 }
24381@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
24382 }
24383
24384 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
24385- atomic_inc(&vcc->stats->tx_err);
24386+ atomic_inc_unchecked(&vcc->stats->tx_err);
24387 dev_kfree_skb_any(skb);
24388 return -EIO;
24389 }
24390- atomic_inc(&vcc->stats->tx);
24391+ atomic_inc_unchecked(&vcc->stats->tx);
24392
24393 return 0;
24394 }
24395@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
24396 printk
24397 ("nicstar%d: Can't allocate buffers for aal0.\n",
24398 card->index);
24399- atomic_add(i, &vcc->stats->rx_drop);
24400+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
24401 break;
24402 }
24403 if (!atm_charge(vcc, sb->truesize)) {
24404 RXPRINTK
24405 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
24406 card->index);
24407- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
24408+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
24409 dev_kfree_skb_any(sb);
24410 break;
24411 }
24412@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
24413 ATM_SKB(sb)->vcc = vcc;
24414 __net_timestamp(sb);
24415 vcc->push(vcc, sb);
24416- atomic_inc(&vcc->stats->rx);
24417+ atomic_inc_unchecked(&vcc->stats->rx);
24418 cell += ATM_CELL_PAYLOAD;
24419 }
24420
24421@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
24422 if (iovb == NULL) {
24423 printk("nicstar%d: Out of iovec buffers.\n",
24424 card->index);
24425- atomic_inc(&vcc->stats->rx_drop);
24426+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24427 recycle_rx_buf(card, skb);
24428 return;
24429 }
24430@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
24431 small or large buffer itself. */
24432 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
24433 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
24434- atomic_inc(&vcc->stats->rx_err);
24435+ atomic_inc_unchecked(&vcc->stats->rx_err);
24436 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
24437 NS_MAX_IOVECS);
24438 NS_PRV_IOVCNT(iovb) = 0;
24439@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
24440 ("nicstar%d: Expected a small buffer, and this is not one.\n",
24441 card->index);
24442 which_list(card, skb);
24443- atomic_inc(&vcc->stats->rx_err);
24444+ atomic_inc_unchecked(&vcc->stats->rx_err);
24445 recycle_rx_buf(card, skb);
24446 vc->rx_iov = NULL;
24447 recycle_iov_buf(card, iovb);
24448@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
24449 ("nicstar%d: Expected a large buffer, and this is not one.\n",
24450 card->index);
24451 which_list(card, skb);
24452- atomic_inc(&vcc->stats->rx_err);
24453+ atomic_inc_unchecked(&vcc->stats->rx_err);
24454 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
24455 NS_PRV_IOVCNT(iovb));
24456 vc->rx_iov = NULL;
24457@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
24458 printk(" - PDU size mismatch.\n");
24459 else
24460 printk(".\n");
24461- atomic_inc(&vcc->stats->rx_err);
24462+ atomic_inc_unchecked(&vcc->stats->rx_err);
24463 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
24464 NS_PRV_IOVCNT(iovb));
24465 vc->rx_iov = NULL;
24466@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
24467 /* skb points to a small buffer */
24468 if (!atm_charge(vcc, skb->truesize)) {
24469 push_rxbufs(card, skb);
24470- atomic_inc(&vcc->stats->rx_drop);
24471+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24472 } else {
24473 skb_put(skb, len);
24474 dequeue_sm_buf(card, skb);
24475@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
24476 ATM_SKB(skb)->vcc = vcc;
24477 __net_timestamp(skb);
24478 vcc->push(vcc, skb);
24479- atomic_inc(&vcc->stats->rx);
24480+ atomic_inc_unchecked(&vcc->stats->rx);
24481 }
24482 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
24483 struct sk_buff *sb;
24484@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
24485 if (len <= NS_SMBUFSIZE) {
24486 if (!atm_charge(vcc, sb->truesize)) {
24487 push_rxbufs(card, sb);
24488- atomic_inc(&vcc->stats->rx_drop);
24489+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24490 } else {
24491 skb_put(sb, len);
24492 dequeue_sm_buf(card, sb);
24493@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
24494 ATM_SKB(sb)->vcc = vcc;
24495 __net_timestamp(sb);
24496 vcc->push(vcc, sb);
24497- atomic_inc(&vcc->stats->rx);
24498+ atomic_inc_unchecked(&vcc->stats->rx);
24499 }
24500
24501 push_rxbufs(card, skb);
24502@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
24503
24504 if (!atm_charge(vcc, skb->truesize)) {
24505 push_rxbufs(card, skb);
24506- atomic_inc(&vcc->stats->rx_drop);
24507+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24508 } else {
24509 dequeue_lg_buf(card, skb);
24510 #ifdef NS_USE_DESTRUCTORS
24511@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
24512 ATM_SKB(skb)->vcc = vcc;
24513 __net_timestamp(skb);
24514 vcc->push(vcc, skb);
24515- atomic_inc(&vcc->stats->rx);
24516+ atomic_inc_unchecked(&vcc->stats->rx);
24517 }
24518
24519 push_rxbufs(card, sb);
24520@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
24521 printk
24522 ("nicstar%d: Out of huge buffers.\n",
24523 card->index);
24524- atomic_inc(&vcc->stats->rx_drop);
24525+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24526 recycle_iovec_rx_bufs(card,
24527 (struct iovec *)
24528 iovb->data,
24529@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
24530 card->hbpool.count++;
24531 } else
24532 dev_kfree_skb_any(hb);
24533- atomic_inc(&vcc->stats->rx_drop);
24534+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24535 } else {
24536 /* Copy the small buffer to the huge buffer */
24537 sb = (struct sk_buff *)iov->iov_base;
24538@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
24539 #endif /* NS_USE_DESTRUCTORS */
24540 __net_timestamp(hb);
24541 vcc->push(vcc, hb);
24542- atomic_inc(&vcc->stats->rx);
24543+ atomic_inc_unchecked(&vcc->stats->rx);
24544 }
24545 }
24546
24547diff -urNp linux-3.1.1/drivers/atm/solos-pci.c linux-3.1.1/drivers/atm/solos-pci.c
24548--- linux-3.1.1/drivers/atm/solos-pci.c 2011-11-11 15:19:27.000000000 -0500
24549+++ linux-3.1.1/drivers/atm/solos-pci.c 2011-11-16 18:40:10.000000000 -0500
24550@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
24551 }
24552 atm_charge(vcc, skb->truesize);
24553 vcc->push(vcc, skb);
24554- atomic_inc(&vcc->stats->rx);
24555+ atomic_inc_unchecked(&vcc->stats->rx);
24556 break;
24557
24558 case PKT_STATUS:
24559@@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *
24560 char msg[500];
24561 char item[10];
24562
24563+ pax_track_stack();
24564+
24565 len = buf->len;
24566 for (i = 0; i < len; i++){
24567 if(i % 8 == 0)
24568@@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_car
24569 vcc = SKB_CB(oldskb)->vcc;
24570
24571 if (vcc) {
24572- atomic_inc(&vcc->stats->tx);
24573+ atomic_inc_unchecked(&vcc->stats->tx);
24574 solos_pop(vcc, oldskb);
24575 } else
24576 dev_kfree_skb_irq(oldskb);
24577diff -urNp linux-3.1.1/drivers/atm/suni.c linux-3.1.1/drivers/atm/suni.c
24578--- linux-3.1.1/drivers/atm/suni.c 2011-11-11 15:19:27.000000000 -0500
24579+++ linux-3.1.1/drivers/atm/suni.c 2011-11-16 18:39:07.000000000 -0500
24580@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
24581
24582
24583 #define ADD_LIMITED(s,v) \
24584- atomic_add((v),&stats->s); \
24585- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
24586+ atomic_add_unchecked((v),&stats->s); \
24587+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
24588
24589
24590 static void suni_hz(unsigned long from_timer)
24591diff -urNp linux-3.1.1/drivers/atm/uPD98402.c linux-3.1.1/drivers/atm/uPD98402.c
24592--- linux-3.1.1/drivers/atm/uPD98402.c 2011-11-11 15:19:27.000000000 -0500
24593+++ linux-3.1.1/drivers/atm/uPD98402.c 2011-11-16 18:39:07.000000000 -0500
24594@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
24595 struct sonet_stats tmp;
24596 int error = 0;
24597
24598- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
24599+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
24600 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
24601 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
24602 if (zero && !error) {
24603@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
24604
24605
24606 #define ADD_LIMITED(s,v) \
24607- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
24608- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
24609- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
24610+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
24611+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
24612+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
24613
24614
24615 static void stat_event(struct atm_dev *dev)
24616@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
24617 if (reason & uPD98402_INT_PFM) stat_event(dev);
24618 if (reason & uPD98402_INT_PCO) {
24619 (void) GET(PCOCR); /* clear interrupt cause */
24620- atomic_add(GET(HECCT),
24621+ atomic_add_unchecked(GET(HECCT),
24622 &PRIV(dev)->sonet_stats.uncorr_hcs);
24623 }
24624 if ((reason & uPD98402_INT_RFO) &&
24625@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
24626 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
24627 uPD98402_INT_LOS),PIMR); /* enable them */
24628 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
24629- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
24630- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
24631- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
24632+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
24633+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
24634+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
24635 return 0;
24636 }
24637
24638diff -urNp linux-3.1.1/drivers/atm/zatm.c linux-3.1.1/drivers/atm/zatm.c
24639--- linux-3.1.1/drivers/atm/zatm.c 2011-11-11 15:19:27.000000000 -0500
24640+++ linux-3.1.1/drivers/atm/zatm.c 2011-11-16 18:39:07.000000000 -0500
24641@@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
24642 }
24643 if (!size) {
24644 dev_kfree_skb_irq(skb);
24645- if (vcc) atomic_inc(&vcc->stats->rx_err);
24646+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
24647 continue;
24648 }
24649 if (!atm_charge(vcc,skb->truesize)) {
24650@@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
24651 skb->len = size;
24652 ATM_SKB(skb)->vcc = vcc;
24653 vcc->push(vcc,skb);
24654- atomic_inc(&vcc->stats->rx);
24655+ atomic_inc_unchecked(&vcc->stats->rx);
24656 }
24657 zout(pos & 0xffff,MTA(mbx));
24658 #if 0 /* probably a stupid idea */
24659@@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
24660 skb_queue_head(&zatm_vcc->backlog,skb);
24661 break;
24662 }
24663- atomic_inc(&vcc->stats->tx);
24664+ atomic_inc_unchecked(&vcc->stats->tx);
24665 wake_up(&zatm_vcc->tx_wait);
24666 }
24667
24668diff -urNp linux-3.1.1/drivers/base/devtmpfs.c linux-3.1.1/drivers/base/devtmpfs.c
24669--- linux-3.1.1/drivers/base/devtmpfs.c 2011-11-11 15:19:27.000000000 -0500
24670+++ linux-3.1.1/drivers/base/devtmpfs.c 2011-11-16 18:39:07.000000000 -0500
24671@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
24672 if (!thread)
24673 return 0;
24674
24675- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
24676+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
24677 if (err)
24678 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
24679 else
24680diff -urNp linux-3.1.1/drivers/base/power/wakeup.c linux-3.1.1/drivers/base/power/wakeup.c
24681--- linux-3.1.1/drivers/base/power/wakeup.c 2011-11-11 15:19:27.000000000 -0500
24682+++ linux-3.1.1/drivers/base/power/wakeup.c 2011-11-16 18:39:07.000000000 -0500
24683@@ -29,14 +29,14 @@ bool events_check_enabled;
24684 * They need to be modified together atomically, so it's better to use one
24685 * atomic variable to hold them both.
24686 */
24687-static atomic_t combined_event_count = ATOMIC_INIT(0);
24688+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
24689
24690 #define IN_PROGRESS_BITS (sizeof(int) * 4)
24691 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
24692
24693 static void split_counters(unsigned int *cnt, unsigned int *inpr)
24694 {
24695- unsigned int comb = atomic_read(&combined_event_count);
24696+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
24697
24698 *cnt = (comb >> IN_PROGRESS_BITS);
24699 *inpr = comb & MAX_IN_PROGRESS;
24700@@ -350,7 +350,7 @@ static void wakeup_source_activate(struc
24701 ws->last_time = ktime_get();
24702
24703 /* Increment the counter of events in progress. */
24704- atomic_inc(&combined_event_count);
24705+ atomic_inc_unchecked(&combined_event_count);
24706 }
24707
24708 /**
24709@@ -440,7 +440,7 @@ static void wakeup_source_deactivate(str
24710 * Increment the counter of registered wakeup events and decrement the
24711 * couter of wakeup events in progress simultaneously.
24712 */
24713- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
24714+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
24715 }
24716
24717 /**
24718diff -urNp linux-3.1.1/drivers/block/cciss.c linux-3.1.1/drivers/block/cciss.c
24719--- linux-3.1.1/drivers/block/cciss.c 2011-11-11 15:19:27.000000000 -0500
24720+++ linux-3.1.1/drivers/block/cciss.c 2011-11-16 18:40:10.000000000 -0500
24721@@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct
24722 int err;
24723 u32 cp;
24724
24725+ memset(&arg64, 0, sizeof(arg64));
24726+
24727 err = 0;
24728 err |=
24729 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
24730@@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
24731 while (!list_empty(&h->reqQ)) {
24732 c = list_entry(h->reqQ.next, CommandList_struct, list);
24733 /* can't do anything if fifo is full */
24734- if ((h->access.fifo_full(h))) {
24735+ if ((h->access->fifo_full(h))) {
24736 dev_warn(&h->pdev->dev, "fifo full\n");
24737 break;
24738 }
24739@@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
24740 h->Qdepth--;
24741
24742 /* Tell the controller execute command */
24743- h->access.submit_command(h, c);
24744+ h->access->submit_command(h, c);
24745
24746 /* Put job onto the completed Q */
24747 addQ(&h->cmpQ, c);
24748@@ -3422,17 +3424,17 @@ startio:
24749
24750 static inline unsigned long get_next_completion(ctlr_info_t *h)
24751 {
24752- return h->access.command_completed(h);
24753+ return h->access->command_completed(h);
24754 }
24755
24756 static inline int interrupt_pending(ctlr_info_t *h)
24757 {
24758- return h->access.intr_pending(h);
24759+ return h->access->intr_pending(h);
24760 }
24761
24762 static inline long interrupt_not_for_us(ctlr_info_t *h)
24763 {
24764- return ((h->access.intr_pending(h) == 0) ||
24765+ return ((h->access->intr_pending(h) == 0) ||
24766 (h->interrupts_enabled == 0));
24767 }
24768
24769@@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info
24770 u32 a;
24771
24772 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
24773- return h->access.command_completed(h);
24774+ return h->access->command_completed(h);
24775
24776 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
24777 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
24778@@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controll
24779 trans_support & CFGTBL_Trans_use_short_tags);
24780
24781 /* Change the access methods to the performant access methods */
24782- h->access = SA5_performant_access;
24783+ h->access = &SA5_performant_access;
24784 h->transMethod = CFGTBL_Trans_Performant;
24785
24786 return;
24787@@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr
24788 if (prod_index < 0)
24789 return -ENODEV;
24790 h->product_name = products[prod_index].product_name;
24791- h->access = *(products[prod_index].access);
24792+ h->access = products[prod_index].access;
24793
24794 if (cciss_board_disabled(h)) {
24795 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
24796@@ -5009,7 +5011,7 @@ reinit_after_soft_reset:
24797 }
24798
24799 /* make sure the board interrupts are off */
24800- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24801+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24802 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
24803 if (rc)
24804 goto clean2;
24805@@ -5061,7 +5063,7 @@ reinit_after_soft_reset:
24806 * fake ones to scoop up any residual completions.
24807 */
24808 spin_lock_irqsave(&h->lock, flags);
24809- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24810+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24811 spin_unlock_irqrestore(&h->lock, flags);
24812 free_irq(h->intr[PERF_MODE_INT], h);
24813 rc = cciss_request_irq(h, cciss_msix_discard_completions,
24814@@ -5081,9 +5083,9 @@ reinit_after_soft_reset:
24815 dev_info(&h->pdev->dev, "Board READY.\n");
24816 dev_info(&h->pdev->dev,
24817 "Waiting for stale completions to drain.\n");
24818- h->access.set_intr_mask(h, CCISS_INTR_ON);
24819+ h->access->set_intr_mask(h, CCISS_INTR_ON);
24820 msleep(10000);
24821- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24822+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24823
24824 rc = controller_reset_failed(h->cfgtable);
24825 if (rc)
24826@@ -5106,7 +5108,7 @@ reinit_after_soft_reset:
24827 cciss_scsi_setup(h);
24828
24829 /* Turn the interrupts on so we can service requests */
24830- h->access.set_intr_mask(h, CCISS_INTR_ON);
24831+ h->access->set_intr_mask(h, CCISS_INTR_ON);
24832
24833 /* Get the firmware version */
24834 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
24835@@ -5178,7 +5180,7 @@ static void cciss_shutdown(struct pci_de
24836 kfree(flush_buf);
24837 if (return_code != IO_OK)
24838 dev_warn(&h->pdev->dev, "Error flushing cache\n");
24839- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24840+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24841 free_irq(h->intr[PERF_MODE_INT], h);
24842 }
24843
24844diff -urNp linux-3.1.1/drivers/block/cciss.h linux-3.1.1/drivers/block/cciss.h
24845--- linux-3.1.1/drivers/block/cciss.h 2011-11-11 15:19:27.000000000 -0500
24846+++ linux-3.1.1/drivers/block/cciss.h 2011-11-16 18:39:07.000000000 -0500
24847@@ -100,7 +100,7 @@ struct ctlr_info
24848 /* information about each logical volume */
24849 drive_info_struct *drv[CISS_MAX_LUN];
24850
24851- struct access_method access;
24852+ struct access_method *access;
24853
24854 /* queue and queue Info */
24855 struct list_head reqQ;
24856diff -urNp linux-3.1.1/drivers/block/cpqarray.c linux-3.1.1/drivers/block/cpqarray.c
24857--- linux-3.1.1/drivers/block/cpqarray.c 2011-11-11 15:19:27.000000000 -0500
24858+++ linux-3.1.1/drivers/block/cpqarray.c 2011-11-16 18:40:10.000000000 -0500
24859@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
24860 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
24861 goto Enomem4;
24862 }
24863- hba[i]->access.set_intr_mask(hba[i], 0);
24864+ hba[i]->access->set_intr_mask(hba[i], 0);
24865 if (request_irq(hba[i]->intr, do_ida_intr,
24866 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
24867 {
24868@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
24869 add_timer(&hba[i]->timer);
24870
24871 /* Enable IRQ now that spinlock and rate limit timer are set up */
24872- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
24873+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
24874
24875 for(j=0; j<NWD; j++) {
24876 struct gendisk *disk = ida_gendisk[i][j];
24877@@ -694,7 +694,7 @@ DBGINFO(
24878 for(i=0; i<NR_PRODUCTS; i++) {
24879 if (board_id == products[i].board_id) {
24880 c->product_name = products[i].product_name;
24881- c->access = *(products[i].access);
24882+ c->access = products[i].access;
24883 break;
24884 }
24885 }
24886@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
24887 hba[ctlr]->intr = intr;
24888 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
24889 hba[ctlr]->product_name = products[j].product_name;
24890- hba[ctlr]->access = *(products[j].access);
24891+ hba[ctlr]->access = products[j].access;
24892 hba[ctlr]->ctlr = ctlr;
24893 hba[ctlr]->board_id = board_id;
24894 hba[ctlr]->pci_dev = NULL; /* not PCI */
24895@@ -911,6 +911,8 @@ static void do_ida_request(struct reques
24896 struct scatterlist tmp_sg[SG_MAX];
24897 int i, dir, seg;
24898
24899+ pax_track_stack();
24900+
24901 queue_next:
24902 creq = blk_peek_request(q);
24903 if (!creq)
24904@@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
24905
24906 while((c = h->reqQ) != NULL) {
24907 /* Can't do anything if we're busy */
24908- if (h->access.fifo_full(h) == 0)
24909+ if (h->access->fifo_full(h) == 0)
24910 return;
24911
24912 /* Get the first entry from the request Q */
24913@@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
24914 h->Qdepth--;
24915
24916 /* Tell the controller to do our bidding */
24917- h->access.submit_command(h, c);
24918+ h->access->submit_command(h, c);
24919
24920 /* Get onto the completion Q */
24921 addQ(&h->cmpQ, c);
24922@@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
24923 unsigned long flags;
24924 __u32 a,a1;
24925
24926- istat = h->access.intr_pending(h);
24927+ istat = h->access->intr_pending(h);
24928 /* Is this interrupt for us? */
24929 if (istat == 0)
24930 return IRQ_NONE;
24931@@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
24932 */
24933 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
24934 if (istat & FIFO_NOT_EMPTY) {
24935- while((a = h->access.command_completed(h))) {
24936+ while((a = h->access->command_completed(h))) {
24937 a1 = a; a &= ~3;
24938 if ((c = h->cmpQ) == NULL)
24939 {
24940@@ -1449,11 +1451,11 @@ static int sendcmd(
24941 /*
24942 * Disable interrupt
24943 */
24944- info_p->access.set_intr_mask(info_p, 0);
24945+ info_p->access->set_intr_mask(info_p, 0);
24946 /* Make sure there is room in the command FIFO */
24947 /* Actually it should be completely empty at this time. */
24948 for (i = 200000; i > 0; i--) {
24949- temp = info_p->access.fifo_full(info_p);
24950+ temp = info_p->access->fifo_full(info_p);
24951 if (temp != 0) {
24952 break;
24953 }
24954@@ -1466,7 +1468,7 @@ DBG(
24955 /*
24956 * Send the cmd
24957 */
24958- info_p->access.submit_command(info_p, c);
24959+ info_p->access->submit_command(info_p, c);
24960 complete = pollcomplete(ctlr);
24961
24962 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
24963@@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
24964 * we check the new geometry. Then turn interrupts back on when
24965 * we're done.
24966 */
24967- host->access.set_intr_mask(host, 0);
24968+ host->access->set_intr_mask(host, 0);
24969 getgeometry(ctlr);
24970- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
24971+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
24972
24973 for(i=0; i<NWD; i++) {
24974 struct gendisk *disk = ida_gendisk[ctlr][i];
24975@@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
24976 /* Wait (up to 2 seconds) for a command to complete */
24977
24978 for (i = 200000; i > 0; i--) {
24979- done = hba[ctlr]->access.command_completed(hba[ctlr]);
24980+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
24981 if (done == 0) {
24982 udelay(10); /* a short fixed delay */
24983 } else
24984diff -urNp linux-3.1.1/drivers/block/cpqarray.h linux-3.1.1/drivers/block/cpqarray.h
24985--- linux-3.1.1/drivers/block/cpqarray.h 2011-11-11 15:19:27.000000000 -0500
24986+++ linux-3.1.1/drivers/block/cpqarray.h 2011-11-16 18:39:07.000000000 -0500
24987@@ -99,7 +99,7 @@ struct ctlr_info {
24988 drv_info_t drv[NWD];
24989 struct proc_dir_entry *proc;
24990
24991- struct access_method access;
24992+ struct access_method *access;
24993
24994 cmdlist_t *reqQ;
24995 cmdlist_t *cmpQ;
24996diff -urNp linux-3.1.1/drivers/block/DAC960.c linux-3.1.1/drivers/block/DAC960.c
24997--- linux-3.1.1/drivers/block/DAC960.c 2011-11-11 15:19:27.000000000 -0500
24998+++ linux-3.1.1/drivers/block/DAC960.c 2011-11-16 18:40:10.000000000 -0500
24999@@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
25000 unsigned long flags;
25001 int Channel, TargetID;
25002
25003+ pax_track_stack();
25004+
25005 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
25006 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
25007 sizeof(DAC960_SCSI_Inquiry_T) +
25008diff -urNp linux-3.1.1/drivers/block/drbd/drbd_int.h linux-3.1.1/drivers/block/drbd/drbd_int.h
25009--- linux-3.1.1/drivers/block/drbd/drbd_int.h 2011-11-11 15:19:27.000000000 -0500
25010+++ linux-3.1.1/drivers/block/drbd/drbd_int.h 2011-11-16 18:39:07.000000000 -0500
25011@@ -737,7 +737,7 @@ struct drbd_request;
25012 struct drbd_epoch {
25013 struct list_head list;
25014 unsigned int barrier_nr;
25015- atomic_t epoch_size; /* increased on every request added. */
25016+ atomic_unchecked_t epoch_size; /* increased on every request added. */
25017 atomic_t active; /* increased on every req. added, and dec on every finished. */
25018 unsigned long flags;
25019 };
25020@@ -1109,7 +1109,7 @@ struct drbd_conf {
25021 void *int_dig_in;
25022 void *int_dig_vv;
25023 wait_queue_head_t seq_wait;
25024- atomic_t packet_seq;
25025+ atomic_unchecked_t packet_seq;
25026 unsigned int peer_seq;
25027 spinlock_t peer_seq_lock;
25028 unsigned int minor;
25029@@ -1618,30 +1618,30 @@ static inline int drbd_setsockopt(struct
25030
25031 static inline void drbd_tcp_cork(struct socket *sock)
25032 {
25033- int __user val = 1;
25034+ int val = 1;
25035 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
25036- (char __user *)&val, sizeof(val));
25037+ (char __force_user *)&val, sizeof(val));
25038 }
25039
25040 static inline void drbd_tcp_uncork(struct socket *sock)
25041 {
25042- int __user val = 0;
25043+ int val = 0;
25044 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
25045- (char __user *)&val, sizeof(val));
25046+ (char __force_user *)&val, sizeof(val));
25047 }
25048
25049 static inline void drbd_tcp_nodelay(struct socket *sock)
25050 {
25051- int __user val = 1;
25052+ int val = 1;
25053 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
25054- (char __user *)&val, sizeof(val));
25055+ (char __force_user *)&val, sizeof(val));
25056 }
25057
25058 static inline void drbd_tcp_quickack(struct socket *sock)
25059 {
25060- int __user val = 2;
25061+ int val = 2;
25062 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
25063- (char __user *)&val, sizeof(val));
25064+ (char __force_user *)&val, sizeof(val));
25065 }
25066
25067 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
25068diff -urNp linux-3.1.1/drivers/block/drbd/drbd_main.c linux-3.1.1/drivers/block/drbd/drbd_main.c
25069--- linux-3.1.1/drivers/block/drbd/drbd_main.c 2011-11-11 15:19:27.000000000 -0500
25070+++ linux-3.1.1/drivers/block/drbd/drbd_main.c 2011-11-16 18:39:07.000000000 -0500
25071@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_co
25072 p.sector = sector;
25073 p.block_id = block_id;
25074 p.blksize = blksize;
25075- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
25076+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
25077
25078 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
25079 return false;
25080@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *m
25081 p.sector = cpu_to_be64(req->sector);
25082 p.block_id = (unsigned long)req;
25083 p.seq_num = cpu_to_be32(req->seq_num =
25084- atomic_add_return(1, &mdev->packet_seq));
25085+ atomic_add_return_unchecked(1, &mdev->packet_seq));
25086
25087 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
25088
25089@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_
25090 atomic_set(&mdev->unacked_cnt, 0);
25091 atomic_set(&mdev->local_cnt, 0);
25092 atomic_set(&mdev->net_cnt, 0);
25093- atomic_set(&mdev->packet_seq, 0);
25094+ atomic_set_unchecked(&mdev->packet_seq, 0);
25095 atomic_set(&mdev->pp_in_use, 0);
25096 atomic_set(&mdev->pp_in_use_by_net, 0);
25097 atomic_set(&mdev->rs_sect_in, 0);
25098@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf
25099 mdev->receiver.t_state);
25100
25101 /* no need to lock it, I'm the only thread alive */
25102- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
25103- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
25104+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
25105+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
25106 mdev->al_writ_cnt =
25107 mdev->bm_writ_cnt =
25108 mdev->read_cnt =
25109diff -urNp linux-3.1.1/drivers/block/drbd/drbd_nl.c linux-3.1.1/drivers/block/drbd/drbd_nl.c
25110--- linux-3.1.1/drivers/block/drbd/drbd_nl.c 2011-11-11 15:19:27.000000000 -0500
25111+++ linux-3.1.1/drivers/block/drbd/drbd_nl.c 2011-11-16 18:39:07.000000000 -0500
25112@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(stru
25113 module_put(THIS_MODULE);
25114 }
25115
25116-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
25117+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
25118
25119 static unsigned short *
25120 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
25121@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *
25122 cn_reply->id.idx = CN_IDX_DRBD;
25123 cn_reply->id.val = CN_VAL_DRBD;
25124
25125- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
25126+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
25127 cn_reply->ack = 0; /* not used here. */
25128 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
25129 (int)((char *)tl - (char *)reply->tag_list);
25130@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_co
25131 cn_reply->id.idx = CN_IDX_DRBD;
25132 cn_reply->id.val = CN_VAL_DRBD;
25133
25134- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
25135+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
25136 cn_reply->ack = 0; /* not used here. */
25137 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
25138 (int)((char *)tl - (char *)reply->tag_list);
25139@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
25140 cn_reply->id.idx = CN_IDX_DRBD;
25141 cn_reply->id.val = CN_VAL_DRBD;
25142
25143- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
25144+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
25145 cn_reply->ack = 0; // not used here.
25146 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
25147 (int)((char*)tl - (char*)reply->tag_list);
25148@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drb
25149 cn_reply->id.idx = CN_IDX_DRBD;
25150 cn_reply->id.val = CN_VAL_DRBD;
25151
25152- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
25153+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
25154 cn_reply->ack = 0; /* not used here. */
25155 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
25156 (int)((char *)tl - (char *)reply->tag_list);
25157diff -urNp linux-3.1.1/drivers/block/drbd/drbd_receiver.c linux-3.1.1/drivers/block/drbd/drbd_receiver.c
25158--- linux-3.1.1/drivers/block/drbd/drbd_receiver.c 2011-11-11 15:19:27.000000000 -0500
25159+++ linux-3.1.1/drivers/block/drbd/drbd_receiver.c 2011-11-16 18:39:07.000000000 -0500
25160@@ -894,7 +894,7 @@ retry:
25161 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
25162 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
25163
25164- atomic_set(&mdev->packet_seq, 0);
25165+ atomic_set_unchecked(&mdev->packet_seq, 0);
25166 mdev->peer_seq = 0;
25167
25168 drbd_thread_start(&mdev->asender);
25169@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish
25170 do {
25171 next_epoch = NULL;
25172
25173- epoch_size = atomic_read(&epoch->epoch_size);
25174+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
25175
25176 switch (ev & ~EV_CLEANUP) {
25177 case EV_PUT:
25178@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish
25179 rv = FE_DESTROYED;
25180 } else {
25181 epoch->flags = 0;
25182- atomic_set(&epoch->epoch_size, 0);
25183+ atomic_set_unchecked(&epoch->epoch_size, 0);
25184 /* atomic_set(&epoch->active, 0); is already zero */
25185 if (rv == FE_STILL_LIVE)
25186 rv = FE_RECYCLED;
25187@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_c
25188 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
25189 drbd_flush(mdev);
25190
25191- if (atomic_read(&mdev->current_epoch->epoch_size)) {
25192+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
25193 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
25194 if (epoch)
25195 break;
25196 }
25197
25198 epoch = mdev->current_epoch;
25199- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
25200+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
25201
25202 D_ASSERT(atomic_read(&epoch->active) == 0);
25203 D_ASSERT(epoch->flags == 0);
25204@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_c
25205 }
25206
25207 epoch->flags = 0;
25208- atomic_set(&epoch->epoch_size, 0);
25209+ atomic_set_unchecked(&epoch->epoch_size, 0);
25210 atomic_set(&epoch->active, 0);
25211
25212 spin_lock(&mdev->epoch_lock);
25213- if (atomic_read(&mdev->current_epoch->epoch_size)) {
25214+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
25215 list_add(&epoch->list, &mdev->current_epoch->list);
25216 mdev->current_epoch = epoch;
25217 mdev->epochs++;
25218@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf
25219 spin_unlock(&mdev->peer_seq_lock);
25220
25221 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
25222- atomic_inc(&mdev->current_epoch->epoch_size);
25223+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
25224 return drbd_drain_block(mdev, data_size);
25225 }
25226
25227@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf
25228
25229 spin_lock(&mdev->epoch_lock);
25230 e->epoch = mdev->current_epoch;
25231- atomic_inc(&e->epoch->epoch_size);
25232+ atomic_inc_unchecked(&e->epoch->epoch_size);
25233 atomic_inc(&e->epoch->active);
25234 spin_unlock(&mdev->epoch_lock);
25235
25236@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_
25237 D_ASSERT(list_empty(&mdev->done_ee));
25238
25239 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
25240- atomic_set(&mdev->current_epoch->epoch_size, 0);
25241+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
25242 D_ASSERT(list_empty(&mdev->current_epoch->list));
25243 }
25244
25245diff -urNp linux-3.1.1/drivers/block/loop.c linux-3.1.1/drivers/block/loop.c
25246--- linux-3.1.1/drivers/block/loop.c 2011-11-11 15:19:27.000000000 -0500
25247+++ linux-3.1.1/drivers/block/loop.c 2011-11-16 18:39:07.000000000 -0500
25248@@ -283,7 +283,7 @@ static int __do_lo_send_write(struct fil
25249 mm_segment_t old_fs = get_fs();
25250
25251 set_fs(get_ds());
25252- bw = file->f_op->write(file, buf, len, &pos);
25253+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
25254 set_fs(old_fs);
25255 if (likely(bw == len))
25256 return 0;
25257diff -urNp linux-3.1.1/drivers/block/nbd.c linux-3.1.1/drivers/block/nbd.c
25258--- linux-3.1.1/drivers/block/nbd.c 2011-11-11 15:19:27.000000000 -0500
25259+++ linux-3.1.1/drivers/block/nbd.c 2011-11-16 18:40:10.000000000 -0500
25260@@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
25261 struct kvec iov;
25262 sigset_t blocked, oldset;
25263
25264+ pax_track_stack();
25265+
25266 if (unlikely(!sock)) {
25267 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
25268 lo->disk->disk_name, (send ? "send" : "recv"));
25269@@ -572,6 +574,8 @@ static void do_nbd_request(struct reques
25270 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
25271 unsigned int cmd, unsigned long arg)
25272 {
25273+ pax_track_stack();
25274+
25275 switch (cmd) {
25276 case NBD_DISCONNECT: {
25277 struct request sreq;
25278diff -urNp linux-3.1.1/drivers/char/agp/frontend.c linux-3.1.1/drivers/char/agp/frontend.c
25279--- linux-3.1.1/drivers/char/agp/frontend.c 2011-11-11 15:19:27.000000000 -0500
25280+++ linux-3.1.1/drivers/char/agp/frontend.c 2011-11-16 18:39:07.000000000 -0500
25281@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
25282 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
25283 return -EFAULT;
25284
25285- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
25286+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
25287 return -EFAULT;
25288
25289 client = agp_find_client_by_pid(reserve.pid);
25290diff -urNp linux-3.1.1/drivers/char/briq_panel.c linux-3.1.1/drivers/char/briq_panel.c
25291--- linux-3.1.1/drivers/char/briq_panel.c 2011-11-11 15:19:27.000000000 -0500
25292+++ linux-3.1.1/drivers/char/briq_panel.c 2011-11-16 18:40:10.000000000 -0500
25293@@ -9,6 +9,7 @@
25294 #include <linux/types.h>
25295 #include <linux/errno.h>
25296 #include <linux/tty.h>
25297+#include <linux/mutex.h>
25298 #include <linux/timer.h>
25299 #include <linux/kernel.h>
25300 #include <linux/wait.h>
25301@@ -34,6 +35,7 @@ static int vfd_is_open;
25302 static unsigned char vfd[40];
25303 static int vfd_cursor;
25304 static unsigned char ledpb, led;
25305+static DEFINE_MUTEX(vfd_mutex);
25306
25307 static void update_vfd(void)
25308 {
25309@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
25310 if (!vfd_is_open)
25311 return -EBUSY;
25312
25313+ mutex_lock(&vfd_mutex);
25314 for (;;) {
25315 char c;
25316 if (!indx)
25317 break;
25318- if (get_user(c, buf))
25319+ if (get_user(c, buf)) {
25320+ mutex_unlock(&vfd_mutex);
25321 return -EFAULT;
25322+ }
25323 if (esc) {
25324 set_led(c);
25325 esc = 0;
25326@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
25327 buf++;
25328 }
25329 update_vfd();
25330+ mutex_unlock(&vfd_mutex);
25331
25332 return len;
25333 }
25334diff -urNp linux-3.1.1/drivers/char/genrtc.c linux-3.1.1/drivers/char/genrtc.c
25335--- linux-3.1.1/drivers/char/genrtc.c 2011-11-11 15:19:27.000000000 -0500
25336+++ linux-3.1.1/drivers/char/genrtc.c 2011-11-16 18:40:10.000000000 -0500
25337@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
25338 switch (cmd) {
25339
25340 case RTC_PLL_GET:
25341+ memset(&pll, 0, sizeof(pll));
25342 if (get_rtc_pll(&pll))
25343 return -EINVAL;
25344 else
25345diff -urNp linux-3.1.1/drivers/char/hpet.c linux-3.1.1/drivers/char/hpet.c
25346--- linux-3.1.1/drivers/char/hpet.c 2011-11-11 15:19:27.000000000 -0500
25347+++ linux-3.1.1/drivers/char/hpet.c 2011-11-16 18:39:07.000000000 -0500
25348@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_di
25349 }
25350
25351 static int
25352-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
25353+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
25354 struct hpet_info *info)
25355 {
25356 struct hpet_timer __iomem *timer;
25357diff -urNp linux-3.1.1/drivers/char/ipmi/ipmi_msghandler.c linux-3.1.1/drivers/char/ipmi/ipmi_msghandler.c
25358--- linux-3.1.1/drivers/char/ipmi/ipmi_msghandler.c 2011-11-11 15:19:27.000000000 -0500
25359+++ linux-3.1.1/drivers/char/ipmi/ipmi_msghandler.c 2011-11-16 18:40:10.000000000 -0500
25360@@ -415,7 +415,7 @@ struct ipmi_smi {
25361 struct proc_dir_entry *proc_dir;
25362 char proc_dir_name[10];
25363
25364- atomic_t stats[IPMI_NUM_STATS];
25365+ atomic_unchecked_t stats[IPMI_NUM_STATS];
25366
25367 /*
25368 * run_to_completion duplicate of smb_info, smi_info
25369@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
25370
25371
25372 #define ipmi_inc_stat(intf, stat) \
25373- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
25374+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
25375 #define ipmi_get_stat(intf, stat) \
25376- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
25377+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
25378
25379 static int is_lan_addr(struct ipmi_addr *addr)
25380 {
25381@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
25382 INIT_LIST_HEAD(&intf->cmd_rcvrs);
25383 init_waitqueue_head(&intf->waitq);
25384 for (i = 0; i < IPMI_NUM_STATS; i++)
25385- atomic_set(&intf->stats[i], 0);
25386+ atomic_set_unchecked(&intf->stats[i], 0);
25387
25388 intf->proc_dir = NULL;
25389
25390@@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
25391 struct ipmi_smi_msg smi_msg;
25392 struct ipmi_recv_msg recv_msg;
25393
25394+ pax_track_stack();
25395+
25396 si = (struct ipmi_system_interface_addr *) &addr;
25397 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
25398 si->channel = IPMI_BMC_CHANNEL;
25399diff -urNp linux-3.1.1/drivers/char/ipmi/ipmi_si_intf.c linux-3.1.1/drivers/char/ipmi/ipmi_si_intf.c
25400--- linux-3.1.1/drivers/char/ipmi/ipmi_si_intf.c 2011-11-11 15:19:27.000000000 -0500
25401+++ linux-3.1.1/drivers/char/ipmi/ipmi_si_intf.c 2011-11-16 18:39:07.000000000 -0500
25402@@ -277,7 +277,7 @@ struct smi_info {
25403 unsigned char slave_addr;
25404
25405 /* Counters and things for the proc filesystem. */
25406- atomic_t stats[SI_NUM_STATS];
25407+ atomic_unchecked_t stats[SI_NUM_STATS];
25408
25409 struct task_struct *thread;
25410
25411@@ -286,9 +286,9 @@ struct smi_info {
25412 };
25413
25414 #define smi_inc_stat(smi, stat) \
25415- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
25416+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
25417 #define smi_get_stat(smi, stat) \
25418- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
25419+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
25420
25421 #define SI_MAX_PARMS 4
25422
25423@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info
25424 atomic_set(&new_smi->req_events, 0);
25425 new_smi->run_to_completion = 0;
25426 for (i = 0; i < SI_NUM_STATS; i++)
25427- atomic_set(&new_smi->stats[i], 0);
25428+ atomic_set_unchecked(&new_smi->stats[i], 0);
25429
25430 new_smi->interrupt_disabled = 1;
25431 atomic_set(&new_smi->stop_operation, 0);
25432diff -urNp linux-3.1.1/drivers/char/Kconfig linux-3.1.1/drivers/char/Kconfig
25433--- linux-3.1.1/drivers/char/Kconfig 2011-11-11 15:19:27.000000000 -0500
25434+++ linux-3.1.1/drivers/char/Kconfig 2011-11-16 18:40:10.000000000 -0500
25435@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
25436
25437 config DEVKMEM
25438 bool "/dev/kmem virtual device support"
25439- default y
25440+ default n
25441+ depends on !GRKERNSEC_KMEM
25442 help
25443 Say Y here if you want to support the /dev/kmem device. The
25444 /dev/kmem device is rarely used, but can be used for certain
25445@@ -596,6 +597,7 @@ config DEVPORT
25446 bool
25447 depends on !M68K
25448 depends on ISA || PCI
25449+ depends on !GRKERNSEC_KMEM
25450 default y
25451
25452 source "drivers/s390/char/Kconfig"
25453diff -urNp linux-3.1.1/drivers/char/mbcs.c linux-3.1.1/drivers/char/mbcs.c
25454--- linux-3.1.1/drivers/char/mbcs.c 2011-11-11 15:19:27.000000000 -0500
25455+++ linux-3.1.1/drivers/char/mbcs.c 2011-11-16 18:39:07.000000000 -0500
25456@@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *de
25457 return 0;
25458 }
25459
25460-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
25461+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
25462 {
25463 .part_num = MBCS_PART_NUM,
25464 .mfg_num = MBCS_MFG_NUM,
25465diff -urNp linux-3.1.1/drivers/char/mem.c linux-3.1.1/drivers/char/mem.c
25466--- linux-3.1.1/drivers/char/mem.c 2011-11-11 15:19:27.000000000 -0500
25467+++ linux-3.1.1/drivers/char/mem.c 2011-11-17 18:31:56.000000000 -0500
25468@@ -18,6 +18,7 @@
25469 #include <linux/raw.h>
25470 #include <linux/tty.h>
25471 #include <linux/capability.h>
25472+#include <linux/security.h>
25473 #include <linux/ptrace.h>
25474 #include <linux/device.h>
25475 #include <linux/highmem.h>
25476@@ -34,6 +35,10 @@
25477 # include <linux/efi.h>
25478 #endif
25479
25480+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
25481+extern const struct file_operations grsec_fops;
25482+#endif
25483+
25484 static inline unsigned long size_inside_page(unsigned long start,
25485 unsigned long size)
25486 {
25487@@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
25488
25489 while (cursor < to) {
25490 if (!devmem_is_allowed(pfn)) {
25491+#ifdef CONFIG_GRKERNSEC_KMEM
25492+ gr_handle_mem_readwrite(from, to);
25493+#else
25494 printk(KERN_INFO
25495 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25496 current->comm, from, to);
25497+#endif
25498 return 0;
25499 }
25500 cursor += PAGE_SIZE;
25501@@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
25502 }
25503 return 1;
25504 }
25505+#elif defined(CONFIG_GRKERNSEC_KMEM)
25506+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25507+{
25508+ return 0;
25509+}
25510 #else
25511 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25512 {
25513@@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
25514
25515 while (count > 0) {
25516 unsigned long remaining;
25517+ char *temp;
25518
25519 sz = size_inside_page(p, count);
25520
25521@@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
25522 if (!ptr)
25523 return -EFAULT;
25524
25525- remaining = copy_to_user(buf, ptr, sz);
25526+#ifdef CONFIG_PAX_USERCOPY
25527+ temp = kmalloc(sz, GFP_KERNEL);
25528+ if (!temp) {
25529+ unxlate_dev_mem_ptr(p, ptr);
25530+ return -ENOMEM;
25531+ }
25532+ memcpy(temp, ptr, sz);
25533+#else
25534+ temp = ptr;
25535+#endif
25536+
25537+ remaining = copy_to_user(buf, temp, sz);
25538+
25539+#ifdef CONFIG_PAX_USERCOPY
25540+ kfree(temp);
25541+#endif
25542+
25543 unxlate_dev_mem_ptr(p, ptr);
25544 if (remaining)
25545 return -EFAULT;
25546@@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
25547 size_t count, loff_t *ppos)
25548 {
25549 unsigned long p = *ppos;
25550- ssize_t low_count, read, sz;
25551+ ssize_t low_count, read, sz, err = 0;
25552 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
25553- int err = 0;
25554
25555 read = 0;
25556 if (p < (unsigned long) high_memory) {
25557@@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
25558 }
25559 #endif
25560 while (low_count > 0) {
25561+ char *temp;
25562+
25563 sz = size_inside_page(p, low_count);
25564
25565 /*
25566@@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
25567 */
25568 kbuf = xlate_dev_kmem_ptr((char *)p);
25569
25570- if (copy_to_user(buf, kbuf, sz))
25571+#ifdef CONFIG_PAX_USERCOPY
25572+ temp = kmalloc(sz, GFP_KERNEL);
25573+ if (!temp)
25574+ return -ENOMEM;
25575+ memcpy(temp, kbuf, sz);
25576+#else
25577+ temp = kbuf;
25578+#endif
25579+
25580+ err = copy_to_user(buf, temp, sz);
25581+
25582+#ifdef CONFIG_PAX_USERCOPY
25583+ kfree(temp);
25584+#endif
25585+
25586+ if (err)
25587 return -EFAULT;
25588 buf += sz;
25589 p += sz;
25590@@ -866,6 +913,9 @@ static const struct memdev {
25591 #ifdef CONFIG_CRASH_DUMP
25592 [12] = { "oldmem", 0, &oldmem_fops, NULL },
25593 #endif
25594+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
25595+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
25596+#endif
25597 };
25598
25599 static int memory_open(struct inode *inode, struct file *filp)
25600diff -urNp linux-3.1.1/drivers/char/nvram.c linux-3.1.1/drivers/char/nvram.c
25601--- linux-3.1.1/drivers/char/nvram.c 2011-11-11 15:19:27.000000000 -0500
25602+++ linux-3.1.1/drivers/char/nvram.c 2011-11-16 18:39:07.000000000 -0500
25603@@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *f
25604
25605 spin_unlock_irq(&rtc_lock);
25606
25607- if (copy_to_user(buf, contents, tmp - contents))
25608+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
25609 return -EFAULT;
25610
25611 *ppos = i;
25612diff -urNp linux-3.1.1/drivers/char/random.c linux-3.1.1/drivers/char/random.c
25613--- linux-3.1.1/drivers/char/random.c 2011-11-11 15:19:27.000000000 -0500
25614+++ linux-3.1.1/drivers/char/random.c 2011-11-16 18:40:10.000000000 -0500
25615@@ -261,8 +261,13 @@
25616 /*
25617 * Configuration information
25618 */
25619+#ifdef CONFIG_GRKERNSEC_RANDNET
25620+#define INPUT_POOL_WORDS 512
25621+#define OUTPUT_POOL_WORDS 128
25622+#else
25623 #define INPUT_POOL_WORDS 128
25624 #define OUTPUT_POOL_WORDS 32
25625+#endif
25626 #define SEC_XFER_SIZE 512
25627 #define EXTRACT_SIZE 10
25628
25629@@ -300,10 +305,17 @@ static struct poolinfo {
25630 int poolwords;
25631 int tap1, tap2, tap3, tap4, tap5;
25632 } poolinfo_table[] = {
25633+#ifdef CONFIG_GRKERNSEC_RANDNET
25634+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
25635+ { 512, 411, 308, 208, 104, 1 },
25636+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
25637+ { 128, 103, 76, 51, 25, 1 },
25638+#else
25639 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
25640 { 128, 103, 76, 51, 25, 1 },
25641 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
25642 { 32, 26, 20, 14, 7, 1 },
25643+#endif
25644 #if 0
25645 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
25646 { 2048, 1638, 1231, 819, 411, 1 },
25647@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
25648
25649 extract_buf(r, tmp);
25650 i = min_t(int, nbytes, EXTRACT_SIZE);
25651- if (copy_to_user(buf, tmp, i)) {
25652+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
25653 ret = -EFAULT;
25654 break;
25655 }
25656@@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
25657 #include <linux/sysctl.h>
25658
25659 static int min_read_thresh = 8, min_write_thresh;
25660-static int max_read_thresh = INPUT_POOL_WORDS * 32;
25661+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
25662 static int max_write_thresh = INPUT_POOL_WORDS * 32;
25663 static char sysctl_bootid[16];
25664
25665diff -urNp linux-3.1.1/drivers/char/sonypi.c linux-3.1.1/drivers/char/sonypi.c
25666--- linux-3.1.1/drivers/char/sonypi.c 2011-11-11 15:19:27.000000000 -0500
25667+++ linux-3.1.1/drivers/char/sonypi.c 2011-11-16 18:39:07.000000000 -0500
25668@@ -55,6 +55,7 @@
25669 #include <asm/uaccess.h>
25670 #include <asm/io.h>
25671 #include <asm/system.h>
25672+#include <asm/local.h>
25673
25674 #include <linux/sonypi.h>
25675
25676@@ -491,7 +492,7 @@ static struct sonypi_device {
25677 spinlock_t fifo_lock;
25678 wait_queue_head_t fifo_proc_list;
25679 struct fasync_struct *fifo_async;
25680- int open_count;
25681+ local_t open_count;
25682 int model;
25683 struct input_dev *input_jog_dev;
25684 struct input_dev *input_key_dev;
25685@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
25686 static int sonypi_misc_release(struct inode *inode, struct file *file)
25687 {
25688 mutex_lock(&sonypi_device.lock);
25689- sonypi_device.open_count--;
25690+ local_dec(&sonypi_device.open_count);
25691 mutex_unlock(&sonypi_device.lock);
25692 return 0;
25693 }
25694@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
25695 {
25696 mutex_lock(&sonypi_device.lock);
25697 /* Flush input queue on first open */
25698- if (!sonypi_device.open_count)
25699+ if (!local_read(&sonypi_device.open_count))
25700 kfifo_reset(&sonypi_device.fifo);
25701- sonypi_device.open_count++;
25702+ local_inc(&sonypi_device.open_count);
25703 mutex_unlock(&sonypi_device.lock);
25704
25705 return 0;
25706diff -urNp linux-3.1.1/drivers/char/tpm/tpm_bios.c linux-3.1.1/drivers/char/tpm/tpm_bios.c
25707--- linux-3.1.1/drivers/char/tpm/tpm_bios.c 2011-11-11 15:19:27.000000000 -0500
25708+++ linux-3.1.1/drivers/char/tpm/tpm_bios.c 2011-11-16 18:39:07.000000000 -0500
25709@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
25710 event = addr;
25711
25712 if ((event->event_type == 0 && event->event_size == 0) ||
25713- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
25714+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
25715 return NULL;
25716
25717 return addr;
25718@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
25719 return NULL;
25720
25721 if ((event->event_type == 0 && event->event_size == 0) ||
25722- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
25723+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
25724 return NULL;
25725
25726 (*pos)++;
25727@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
25728 int i;
25729
25730 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
25731- seq_putc(m, data[i]);
25732+ if (!seq_putc(m, data[i]))
25733+ return -EFAULT;
25734
25735 return 0;
25736 }
25737@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log
25738 log->bios_event_log_end = log->bios_event_log + len;
25739
25740 virt = acpi_os_map_memory(start, len);
25741+ if (!virt) {
25742+ kfree(log->bios_event_log);
25743+ log->bios_event_log = NULL;
25744+ return -EFAULT;
25745+ }
25746
25747- memcpy(log->bios_event_log, virt, len);
25748+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
25749
25750 acpi_os_unmap_memory(virt, len);
25751 return 0;
25752diff -urNp linux-3.1.1/drivers/char/tpm/tpm.c linux-3.1.1/drivers/char/tpm/tpm.c
25753--- linux-3.1.1/drivers/char/tpm/tpm.c 2011-11-11 15:19:27.000000000 -0500
25754+++ linux-3.1.1/drivers/char/tpm/tpm.c 2011-11-16 18:40:10.000000000 -0500
25755@@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_c
25756 chip->vendor.req_complete_val)
25757 goto out_recv;
25758
25759- if ((status == chip->vendor.req_canceled)) {
25760+ if (status == chip->vendor.req_canceled) {
25761 dev_err(chip->dev, "Operation Canceled\n");
25762 rc = -ECANCELED;
25763 goto out;
25764@@ -862,6 +862,8 @@ ssize_t tpm_show_pubek(struct device *de
25765
25766 struct tpm_chip *chip = dev_get_drvdata(dev);
25767
25768+ pax_track_stack();
25769+
25770 tpm_cmd.header.in = tpm_readpubek_header;
25771 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
25772 "attempting to read the PUBEK");
25773diff -urNp linux-3.1.1/drivers/char/virtio_console.c linux-3.1.1/drivers/char/virtio_console.c
25774--- linux-3.1.1/drivers/char/virtio_console.c 2011-11-11 15:19:27.000000000 -0500
25775+++ linux-3.1.1/drivers/char/virtio_console.c 2011-11-16 18:39:07.000000000 -0500
25776@@ -555,7 +555,7 @@ static ssize_t fill_readbuf(struct port
25777 if (to_user) {
25778 ssize_t ret;
25779
25780- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
25781+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
25782 if (ret)
25783 return -EFAULT;
25784 } else {
25785@@ -654,7 +654,7 @@ static ssize_t port_fops_read(struct fil
25786 if (!port_has_data(port) && !port->host_connected)
25787 return 0;
25788
25789- return fill_readbuf(port, ubuf, count, true);
25790+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
25791 }
25792
25793 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
25794diff -urNp linux-3.1.1/drivers/crypto/hifn_795x.c linux-3.1.1/drivers/crypto/hifn_795x.c
25795--- linux-3.1.1/drivers/crypto/hifn_795x.c 2011-11-11 15:19:27.000000000 -0500
25796+++ linux-3.1.1/drivers/crypto/hifn_795x.c 2011-11-16 18:40:10.000000000 -0500
25797@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
25798 0xCA, 0x34, 0x2B, 0x2E};
25799 struct scatterlist sg;
25800
25801+ pax_track_stack();
25802+
25803 memset(src, 0, sizeof(src));
25804 memset(ctx.key, 0, sizeof(ctx.key));
25805
25806diff -urNp linux-3.1.1/drivers/crypto/padlock-aes.c linux-3.1.1/drivers/crypto/padlock-aes.c
25807--- linux-3.1.1/drivers/crypto/padlock-aes.c 2011-11-11 15:19:27.000000000 -0500
25808+++ linux-3.1.1/drivers/crypto/padlock-aes.c 2011-11-16 18:40:10.000000000 -0500
25809@@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
25810 struct crypto_aes_ctx gen_aes;
25811 int cpu;
25812
25813+ pax_track_stack();
25814+
25815 if (key_len % 8) {
25816 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
25817 return -EINVAL;
25818diff -urNp linux-3.1.1/drivers/edac/amd64_edac.c linux-3.1.1/drivers/edac/amd64_edac.c
25819--- linux-3.1.1/drivers/edac/amd64_edac.c 2011-11-11 15:19:27.000000000 -0500
25820+++ linux-3.1.1/drivers/edac/amd64_edac.c 2011-11-16 18:39:07.000000000 -0500
25821@@ -2670,7 +2670,7 @@ static void __devexit amd64_remove_one_i
25822 * PCI core identifies what devices are on a system during boot, and then
25823 * inquiry this table to see if this driver is for a given device found.
25824 */
25825-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
25826+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
25827 {
25828 .vendor = PCI_VENDOR_ID_AMD,
25829 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
25830diff -urNp linux-3.1.1/drivers/edac/amd76x_edac.c linux-3.1.1/drivers/edac/amd76x_edac.c
25831--- linux-3.1.1/drivers/edac/amd76x_edac.c 2011-11-11 15:19:27.000000000 -0500
25832+++ linux-3.1.1/drivers/edac/amd76x_edac.c 2011-11-16 18:39:07.000000000 -0500
25833@@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(
25834 edac_mc_free(mci);
25835 }
25836
25837-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
25838+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
25839 {
25840 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25841 AMD762},
25842diff -urNp linux-3.1.1/drivers/edac/e752x_edac.c linux-3.1.1/drivers/edac/e752x_edac.c
25843--- linux-3.1.1/drivers/edac/e752x_edac.c 2011-11-11 15:19:27.000000000 -0500
25844+++ linux-3.1.1/drivers/edac/e752x_edac.c 2011-11-16 18:39:07.000000000 -0500
25845@@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(s
25846 edac_mc_free(mci);
25847 }
25848
25849-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
25850+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
25851 {
25852 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25853 E7520},
25854diff -urNp linux-3.1.1/drivers/edac/e7xxx_edac.c linux-3.1.1/drivers/edac/e7xxx_edac.c
25855--- linux-3.1.1/drivers/edac/e7xxx_edac.c 2011-11-11 15:19:27.000000000 -0500
25856+++ linux-3.1.1/drivers/edac/e7xxx_edac.c 2011-11-16 18:39:07.000000000 -0500
25857@@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(s
25858 edac_mc_free(mci);
25859 }
25860
25861-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
25862+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
25863 {
25864 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25865 E7205},
25866diff -urNp linux-3.1.1/drivers/edac/edac_pci_sysfs.c linux-3.1.1/drivers/edac/edac_pci_sysfs.c
25867--- linux-3.1.1/drivers/edac/edac_pci_sysfs.c 2011-11-11 15:19:27.000000000 -0500
25868+++ linux-3.1.1/drivers/edac/edac_pci_sysfs.c 2011-11-16 18:39:07.000000000 -0500
25869@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
25870 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
25871 static int edac_pci_poll_msec = 1000; /* one second workq period */
25872
25873-static atomic_t pci_parity_count = ATOMIC_INIT(0);
25874-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
25875+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
25876+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
25877
25878 static struct kobject *edac_pci_top_main_kobj;
25879 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
25880@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
25881 edac_printk(KERN_CRIT, EDAC_PCI,
25882 "Signaled System Error on %s\n",
25883 pci_name(dev));
25884- atomic_inc(&pci_nonparity_count);
25885+ atomic_inc_unchecked(&pci_nonparity_count);
25886 }
25887
25888 if (status & (PCI_STATUS_PARITY)) {
25889@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
25890 "Master Data Parity Error on %s\n",
25891 pci_name(dev));
25892
25893- atomic_inc(&pci_parity_count);
25894+ atomic_inc_unchecked(&pci_parity_count);
25895 }
25896
25897 if (status & (PCI_STATUS_DETECTED_PARITY)) {
25898@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
25899 "Detected Parity Error on %s\n",
25900 pci_name(dev));
25901
25902- atomic_inc(&pci_parity_count);
25903+ atomic_inc_unchecked(&pci_parity_count);
25904 }
25905 }
25906
25907@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
25908 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
25909 "Signaled System Error on %s\n",
25910 pci_name(dev));
25911- atomic_inc(&pci_nonparity_count);
25912+ atomic_inc_unchecked(&pci_nonparity_count);
25913 }
25914
25915 if (status & (PCI_STATUS_PARITY)) {
25916@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
25917 "Master Data Parity Error on "
25918 "%s\n", pci_name(dev));
25919
25920- atomic_inc(&pci_parity_count);
25921+ atomic_inc_unchecked(&pci_parity_count);
25922 }
25923
25924 if (status & (PCI_STATUS_DETECTED_PARITY)) {
25925@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
25926 "Detected Parity Error on %s\n",
25927 pci_name(dev));
25928
25929- atomic_inc(&pci_parity_count);
25930+ atomic_inc_unchecked(&pci_parity_count);
25931 }
25932 }
25933 }
25934@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
25935 if (!check_pci_errors)
25936 return;
25937
25938- before_count = atomic_read(&pci_parity_count);
25939+ before_count = atomic_read_unchecked(&pci_parity_count);
25940
25941 /* scan all PCI devices looking for a Parity Error on devices and
25942 * bridges.
25943@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
25944 /* Only if operator has selected panic on PCI Error */
25945 if (edac_pci_get_panic_on_pe()) {
25946 /* If the count is different 'after' from 'before' */
25947- if (before_count != atomic_read(&pci_parity_count))
25948+ if (before_count != atomic_read_unchecked(&pci_parity_count))
25949 panic("EDAC: PCI Parity Error");
25950 }
25951 }
25952diff -urNp linux-3.1.1/drivers/edac/i3000_edac.c linux-3.1.1/drivers/edac/i3000_edac.c
25953--- linux-3.1.1/drivers/edac/i3000_edac.c 2011-11-11 15:19:27.000000000 -0500
25954+++ linux-3.1.1/drivers/edac/i3000_edac.c 2011-11-16 18:39:07.000000000 -0500
25955@@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(s
25956 edac_mc_free(mci);
25957 }
25958
25959-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
25960+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
25961 {
25962 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25963 I3000},
25964diff -urNp linux-3.1.1/drivers/edac/i3200_edac.c linux-3.1.1/drivers/edac/i3200_edac.c
25965--- linux-3.1.1/drivers/edac/i3200_edac.c 2011-11-11 15:19:27.000000000 -0500
25966+++ linux-3.1.1/drivers/edac/i3200_edac.c 2011-11-16 18:39:07.000000000 -0500
25967@@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(s
25968 edac_mc_free(mci);
25969 }
25970
25971-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
25972+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
25973 {
25974 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25975 I3200},
25976diff -urNp linux-3.1.1/drivers/edac/i5000_edac.c linux-3.1.1/drivers/edac/i5000_edac.c
25977--- linux-3.1.1/drivers/edac/i5000_edac.c 2011-11-11 15:19:27.000000000 -0500
25978+++ linux-3.1.1/drivers/edac/i5000_edac.c 2011-11-16 18:39:07.000000000 -0500
25979@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(s
25980 *
25981 * The "E500P" device is the first device supported.
25982 */
25983-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
25984+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
25985 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
25986 .driver_data = I5000P},
25987
25988diff -urNp linux-3.1.1/drivers/edac/i5100_edac.c linux-3.1.1/drivers/edac/i5100_edac.c
25989--- linux-3.1.1/drivers/edac/i5100_edac.c 2011-11-11 15:19:27.000000000 -0500
25990+++ linux-3.1.1/drivers/edac/i5100_edac.c 2011-11-16 18:39:07.000000000 -0500
25991@@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(s
25992 edac_mc_free(mci);
25993 }
25994
25995-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
25996+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
25997 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
25998 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
25999 { 0, }
26000diff -urNp linux-3.1.1/drivers/edac/i5400_edac.c linux-3.1.1/drivers/edac/i5400_edac.c
26001--- linux-3.1.1/drivers/edac/i5400_edac.c 2011-11-11 15:19:27.000000000 -0500
26002+++ linux-3.1.1/drivers/edac/i5400_edac.c 2011-11-16 18:39:07.000000000 -0500
26003@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(s
26004 *
26005 * The "E500P" device is the first device supported.
26006 */
26007-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
26008+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
26009 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
26010 {0,} /* 0 terminated list. */
26011 };
26012diff -urNp linux-3.1.1/drivers/edac/i7300_edac.c linux-3.1.1/drivers/edac/i7300_edac.c
26013--- linux-3.1.1/drivers/edac/i7300_edac.c 2011-11-11 15:19:27.000000000 -0500
26014+++ linux-3.1.1/drivers/edac/i7300_edac.c 2011-11-16 18:39:07.000000000 -0500
26015@@ -1191,7 +1191,7 @@ static void __devexit i7300_remove_one(s
26016 *
26017 * Has only 8086:360c PCI ID
26018 */
26019-static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
26020+static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
26021 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
26022 {0,} /* 0 terminated list. */
26023 };
26024diff -urNp linux-3.1.1/drivers/edac/i7core_edac.c linux-3.1.1/drivers/edac/i7core_edac.c
26025--- linux-3.1.1/drivers/edac/i7core_edac.c 2011-11-11 15:19:27.000000000 -0500
26026+++ linux-3.1.1/drivers/edac/i7core_edac.c 2011-11-16 18:39:07.000000000 -0500
26027@@ -359,7 +359,7 @@ static const struct pci_id_table pci_dev
26028 /*
26029 * pci_device_id table for which devices we are looking for
26030 */
26031-static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
26032+static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
26033 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
26034 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
26035 {0,} /* 0 terminated list. */
26036diff -urNp linux-3.1.1/drivers/edac/i82443bxgx_edac.c linux-3.1.1/drivers/edac/i82443bxgx_edac.c
26037--- linux-3.1.1/drivers/edac/i82443bxgx_edac.c 2011-11-11 15:19:27.000000000 -0500
26038+++ linux-3.1.1/drivers/edac/i82443bxgx_edac.c 2011-11-16 18:39:07.000000000 -0500
26039@@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_
26040
26041 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
26042
26043-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
26044+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
26045 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
26046 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
26047 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
26048diff -urNp linux-3.1.1/drivers/edac/i82860_edac.c linux-3.1.1/drivers/edac/i82860_edac.c
26049--- linux-3.1.1/drivers/edac/i82860_edac.c 2011-11-11 15:19:27.000000000 -0500
26050+++ linux-3.1.1/drivers/edac/i82860_edac.c 2011-11-16 18:39:07.000000000 -0500
26051@@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(
26052 edac_mc_free(mci);
26053 }
26054
26055-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
26056+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
26057 {
26058 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26059 I82860},
26060diff -urNp linux-3.1.1/drivers/edac/i82875p_edac.c linux-3.1.1/drivers/edac/i82875p_edac.c
26061--- linux-3.1.1/drivers/edac/i82875p_edac.c 2011-11-11 15:19:27.000000000 -0500
26062+++ linux-3.1.1/drivers/edac/i82875p_edac.c 2011-11-16 18:39:07.000000000 -0500
26063@@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one
26064 edac_mc_free(mci);
26065 }
26066
26067-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
26068+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
26069 {
26070 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26071 I82875P},
26072diff -urNp linux-3.1.1/drivers/edac/i82975x_edac.c linux-3.1.1/drivers/edac/i82975x_edac.c
26073--- linux-3.1.1/drivers/edac/i82975x_edac.c 2011-11-11 15:19:27.000000000 -0500
26074+++ linux-3.1.1/drivers/edac/i82975x_edac.c 2011-11-16 18:39:07.000000000 -0500
26075@@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one
26076 edac_mc_free(mci);
26077 }
26078
26079-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
26080+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
26081 {
26082 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26083 I82975X
26084diff -urNp linux-3.1.1/drivers/edac/mce_amd.h linux-3.1.1/drivers/edac/mce_amd.h
26085--- linux-3.1.1/drivers/edac/mce_amd.h 2011-11-11 15:19:27.000000000 -0500
26086+++ linux-3.1.1/drivers/edac/mce_amd.h 2011-11-16 18:39:07.000000000 -0500
26087@@ -83,7 +83,7 @@ struct amd_decoder_ops {
26088 bool (*dc_mce)(u16, u8);
26089 bool (*ic_mce)(u16, u8);
26090 bool (*nb_mce)(u16, u8);
26091-};
26092+} __no_const;
26093
26094 void amd_report_gart_errors(bool);
26095 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
26096diff -urNp linux-3.1.1/drivers/edac/r82600_edac.c linux-3.1.1/drivers/edac/r82600_edac.c
26097--- linux-3.1.1/drivers/edac/r82600_edac.c 2011-11-11 15:19:27.000000000 -0500
26098+++ linux-3.1.1/drivers/edac/r82600_edac.c 2011-11-16 18:39:07.000000000 -0500
26099@@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(
26100 edac_mc_free(mci);
26101 }
26102
26103-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
26104+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
26105 {
26106 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
26107 },
26108diff -urNp linux-3.1.1/drivers/edac/x38_edac.c linux-3.1.1/drivers/edac/x38_edac.c
26109--- linux-3.1.1/drivers/edac/x38_edac.c 2011-11-11 15:19:27.000000000 -0500
26110+++ linux-3.1.1/drivers/edac/x38_edac.c 2011-11-16 18:39:07.000000000 -0500
26111@@ -440,7 +440,7 @@ static void __devexit x38_remove_one(str
26112 edac_mc_free(mci);
26113 }
26114
26115-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
26116+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
26117 {
26118 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26119 X38},
26120diff -urNp linux-3.1.1/drivers/firewire/core-card.c linux-3.1.1/drivers/firewire/core-card.c
26121--- linux-3.1.1/drivers/firewire/core-card.c 2011-11-11 15:19:27.000000000 -0500
26122+++ linux-3.1.1/drivers/firewire/core-card.c 2011-11-16 18:39:07.000000000 -0500
26123@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
26124
26125 void fw_core_remove_card(struct fw_card *card)
26126 {
26127- struct fw_card_driver dummy_driver = dummy_driver_template;
26128+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
26129
26130 card->driver->update_phy_reg(card, 4,
26131 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
26132diff -urNp linux-3.1.1/drivers/firewire/core-cdev.c linux-3.1.1/drivers/firewire/core-cdev.c
26133--- linux-3.1.1/drivers/firewire/core-cdev.c 2011-11-11 15:19:27.000000000 -0500
26134+++ linux-3.1.1/drivers/firewire/core-cdev.c 2011-11-16 18:39:07.000000000 -0500
26135@@ -1331,8 +1331,7 @@ static int init_iso_resource(struct clie
26136 int ret;
26137
26138 if ((request->channels == 0 && request->bandwidth == 0) ||
26139- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
26140- request->bandwidth < 0)
26141+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
26142 return -EINVAL;
26143
26144 r = kmalloc(sizeof(*r), GFP_KERNEL);
26145diff -urNp linux-3.1.1/drivers/firewire/core.h linux-3.1.1/drivers/firewire/core.h
26146--- linux-3.1.1/drivers/firewire/core.h 2011-11-11 15:19:27.000000000 -0500
26147+++ linux-3.1.1/drivers/firewire/core.h 2011-11-16 18:39:07.000000000 -0500
26148@@ -101,6 +101,7 @@ struct fw_card_driver {
26149
26150 int (*stop_iso)(struct fw_iso_context *ctx);
26151 };
26152+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
26153
26154 void fw_card_initialize(struct fw_card *card,
26155 const struct fw_card_driver *driver, struct device *device);
26156diff -urNp linux-3.1.1/drivers/firewire/core-transaction.c linux-3.1.1/drivers/firewire/core-transaction.c
26157--- linux-3.1.1/drivers/firewire/core-transaction.c 2011-11-11 15:19:27.000000000 -0500
26158+++ linux-3.1.1/drivers/firewire/core-transaction.c 2011-11-16 18:40:10.000000000 -0500
26159@@ -37,6 +37,7 @@
26160 #include <linux/timer.h>
26161 #include <linux/types.h>
26162 #include <linux/workqueue.h>
26163+#include <linux/sched.h>
26164
26165 #include <asm/byteorder.h>
26166
26167@@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *c
26168 struct transaction_callback_data d;
26169 struct fw_transaction t;
26170
26171+ pax_track_stack();
26172+
26173 init_timer_on_stack(&t.split_timeout_timer);
26174 init_completion(&d.done);
26175 d.payload = payload;
26176diff -urNp linux-3.1.1/drivers/firmware/dmi_scan.c linux-3.1.1/drivers/firmware/dmi_scan.c
26177--- linux-3.1.1/drivers/firmware/dmi_scan.c 2011-11-11 15:19:27.000000000 -0500
26178+++ linux-3.1.1/drivers/firmware/dmi_scan.c 2011-11-16 18:39:07.000000000 -0500
26179@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
26180 }
26181 }
26182 else {
26183- /*
26184- * no iounmap() for that ioremap(); it would be a no-op, but
26185- * it's so early in setup that sucker gets confused into doing
26186- * what it shouldn't if we actually call it.
26187- */
26188 p = dmi_ioremap(0xF0000, 0x10000);
26189 if (p == NULL)
26190 goto error;
26191@@ -725,7 +720,7 @@ int dmi_walk(void (*decode)(const struct
26192 if (buf == NULL)
26193 return -1;
26194
26195- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
26196+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
26197
26198 iounmap(buf);
26199 return 0;
26200diff -urNp linux-3.1.1/drivers/gpio/gpio-vr41xx.c linux-3.1.1/drivers/gpio/gpio-vr41xx.c
26201--- linux-3.1.1/drivers/gpio/gpio-vr41xx.c 2011-11-11 15:19:27.000000000 -0500
26202+++ linux-3.1.1/drivers/gpio/gpio-vr41xx.c 2011-11-16 18:39:07.000000000 -0500
26203@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
26204 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
26205 maskl, pendl, maskh, pendh);
26206
26207- atomic_inc(&irq_err_count);
26208+ atomic_inc_unchecked(&irq_err_count);
26209
26210 return -EINVAL;
26211 }
26212diff -urNp linux-3.1.1/drivers/gpu/drm/drm_crtc.c linux-3.1.1/drivers/gpu/drm/drm_crtc.c
26213--- linux-3.1.1/drivers/gpu/drm/drm_crtc.c 2011-11-11 15:19:27.000000000 -0500
26214+++ linux-3.1.1/drivers/gpu/drm/drm_crtc.c 2011-11-16 18:39:07.000000000 -0500
26215@@ -1374,7 +1374,7 @@ int drm_mode_getconnector(struct drm_dev
26216 */
26217 if ((out_resp->count_modes >= mode_count) && mode_count) {
26218 copied = 0;
26219- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
26220+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
26221 list_for_each_entry(mode, &connector->modes, head) {
26222 drm_crtc_convert_to_umode(&u_mode, mode);
26223 if (copy_to_user(mode_ptr + copied,
26224@@ -1389,8 +1389,8 @@ int drm_mode_getconnector(struct drm_dev
26225
26226 if ((out_resp->count_props >= props_count) && props_count) {
26227 copied = 0;
26228- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
26229- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
26230+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
26231+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
26232 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
26233 if (connector->property_ids[i] != 0) {
26234 if (put_user(connector->property_ids[i],
26235@@ -1412,7 +1412,7 @@ int drm_mode_getconnector(struct drm_dev
26236
26237 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
26238 copied = 0;
26239- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
26240+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
26241 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
26242 if (connector->encoder_ids[i] != 0) {
26243 if (put_user(connector->encoder_ids[i],
26244@@ -1571,7 +1571,7 @@ int drm_mode_setcrtc(struct drm_device *
26245 }
26246
26247 for (i = 0; i < crtc_req->count_connectors; i++) {
26248- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
26249+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
26250 if (get_user(out_id, &set_connectors_ptr[i])) {
26251 ret = -EFAULT;
26252 goto out;
26253@@ -1852,7 +1852,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_de
26254 fb = obj_to_fb(obj);
26255
26256 num_clips = r->num_clips;
26257- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
26258+ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
26259
26260 if (!num_clips != !clips_ptr) {
26261 ret = -EINVAL;
26262@@ -2272,7 +2272,7 @@ int drm_mode_getproperty_ioctl(struct dr
26263 out_resp->flags = property->flags;
26264
26265 if ((out_resp->count_values >= value_count) && value_count) {
26266- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
26267+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
26268 for (i = 0; i < value_count; i++) {
26269 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
26270 ret = -EFAULT;
26271@@ -2285,7 +2285,7 @@ int drm_mode_getproperty_ioctl(struct dr
26272 if (property->flags & DRM_MODE_PROP_ENUM) {
26273 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
26274 copied = 0;
26275- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
26276+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
26277 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
26278
26279 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
26280@@ -2308,7 +2308,7 @@ int drm_mode_getproperty_ioctl(struct dr
26281 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
26282 copied = 0;
26283 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
26284- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
26285+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
26286
26287 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
26288 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
26289@@ -2369,7 +2369,7 @@ int drm_mode_getblob_ioctl(struct drm_de
26290 struct drm_mode_get_blob *out_resp = data;
26291 struct drm_property_blob *blob;
26292 int ret = 0;
26293- void *blob_ptr;
26294+ void __user *blob_ptr;
26295
26296 if (!drm_core_check_feature(dev, DRIVER_MODESET))
26297 return -EINVAL;
26298@@ -2383,7 +2383,7 @@ int drm_mode_getblob_ioctl(struct drm_de
26299 blob = obj_to_blob(obj);
26300
26301 if (out_resp->length == blob->length) {
26302- blob_ptr = (void *)(unsigned long)out_resp->data;
26303+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
26304 if (copy_to_user(blob_ptr, blob->data, blob->length)){
26305 ret = -EFAULT;
26306 goto done;
26307diff -urNp linux-3.1.1/drivers/gpu/drm/drm_crtc_helper.c linux-3.1.1/drivers/gpu/drm/drm_crtc_helper.c
26308--- linux-3.1.1/drivers/gpu/drm/drm_crtc_helper.c 2011-11-11 15:19:27.000000000 -0500
26309+++ linux-3.1.1/drivers/gpu/drm/drm_crtc_helper.c 2011-11-16 18:40:10.000000000 -0500
26310@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
26311 struct drm_crtc *tmp;
26312 int crtc_mask = 1;
26313
26314- WARN(!crtc, "checking null crtc?\n");
26315+ BUG_ON(!crtc);
26316
26317 dev = crtc->dev;
26318
26319@@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
26320 struct drm_encoder *encoder;
26321 bool ret = true;
26322
26323+ pax_track_stack();
26324+
26325 crtc->enabled = drm_helper_crtc_in_use(crtc);
26326 if (!crtc->enabled)
26327 return true;
26328diff -urNp linux-3.1.1/drivers/gpu/drm/drm_drv.c linux-3.1.1/drivers/gpu/drm/drm_drv.c
26329--- linux-3.1.1/drivers/gpu/drm/drm_drv.c 2011-11-11 15:19:27.000000000 -0500
26330+++ linux-3.1.1/drivers/gpu/drm/drm_drv.c 2011-11-16 18:39:07.000000000 -0500
26331@@ -307,7 +307,7 @@ module_exit(drm_core_exit);
26332 /**
26333 * Copy and IOCTL return string to user space
26334 */
26335-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
26336+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
26337 {
26338 int len;
26339
26340@@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
26341
26342 dev = file_priv->minor->dev;
26343 atomic_inc(&dev->ioctl_count);
26344- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
26345+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
26346 ++file_priv->ioctl_count;
26347
26348 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
26349diff -urNp linux-3.1.1/drivers/gpu/drm/drm_fops.c linux-3.1.1/drivers/gpu/drm/drm_fops.c
26350--- linux-3.1.1/drivers/gpu/drm/drm_fops.c 2011-11-11 15:19:27.000000000 -0500
26351+++ linux-3.1.1/drivers/gpu/drm/drm_fops.c 2011-11-16 18:39:07.000000000 -0500
26352@@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
26353 }
26354
26355 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
26356- atomic_set(&dev->counts[i], 0);
26357+ atomic_set_unchecked(&dev->counts[i], 0);
26358
26359 dev->sigdata.lock = NULL;
26360
26361@@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
26362
26363 retcode = drm_open_helper(inode, filp, dev);
26364 if (!retcode) {
26365- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
26366- if (!dev->open_count++)
26367+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
26368+ if (local_inc_return(&dev->open_count) == 1)
26369 retcode = drm_setup(dev);
26370 }
26371 if (!retcode) {
26372@@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
26373
26374 mutex_lock(&drm_global_mutex);
26375
26376- DRM_DEBUG("open_count = %d\n", dev->open_count);
26377+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
26378
26379 if (dev->driver->preclose)
26380 dev->driver->preclose(dev, file_priv);
26381@@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
26382 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
26383 task_pid_nr(current),
26384 (long)old_encode_dev(file_priv->minor->device),
26385- dev->open_count);
26386+ local_read(&dev->open_count));
26387
26388 /* if the master has gone away we can't do anything with the lock */
26389 if (file_priv->minor->master)
26390@@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
26391 * End inline drm_release
26392 */
26393
26394- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
26395- if (!--dev->open_count) {
26396+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
26397+ if (local_dec_and_test(&dev->open_count)) {
26398 if (atomic_read(&dev->ioctl_count)) {
26399 DRM_ERROR("Device busy: %d\n",
26400 atomic_read(&dev->ioctl_count));
26401diff -urNp linux-3.1.1/drivers/gpu/drm/drm_global.c linux-3.1.1/drivers/gpu/drm/drm_global.c
26402--- linux-3.1.1/drivers/gpu/drm/drm_global.c 2011-11-11 15:19:27.000000000 -0500
26403+++ linux-3.1.1/drivers/gpu/drm/drm_global.c 2011-11-16 18:39:07.000000000 -0500
26404@@ -36,7 +36,7 @@
26405 struct drm_global_item {
26406 struct mutex mutex;
26407 void *object;
26408- int refcount;
26409+ atomic_t refcount;
26410 };
26411
26412 static struct drm_global_item glob[DRM_GLOBAL_NUM];
26413@@ -49,7 +49,7 @@ void drm_global_init(void)
26414 struct drm_global_item *item = &glob[i];
26415 mutex_init(&item->mutex);
26416 item->object = NULL;
26417- item->refcount = 0;
26418+ atomic_set(&item->refcount, 0);
26419 }
26420 }
26421
26422@@ -59,7 +59,7 @@ void drm_global_release(void)
26423 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
26424 struct drm_global_item *item = &glob[i];
26425 BUG_ON(item->object != NULL);
26426- BUG_ON(item->refcount != 0);
26427+ BUG_ON(atomic_read(&item->refcount) != 0);
26428 }
26429 }
26430
26431@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
26432 void *object;
26433
26434 mutex_lock(&item->mutex);
26435- if (item->refcount == 0) {
26436+ if (atomic_read(&item->refcount) == 0) {
26437 item->object = kzalloc(ref->size, GFP_KERNEL);
26438 if (unlikely(item->object == NULL)) {
26439 ret = -ENOMEM;
26440@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
26441 goto out_err;
26442
26443 }
26444- ++item->refcount;
26445+ atomic_inc(&item->refcount);
26446 ref->object = item->object;
26447 object = item->object;
26448 mutex_unlock(&item->mutex);
26449@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
26450 struct drm_global_item *item = &glob[ref->global_type];
26451
26452 mutex_lock(&item->mutex);
26453- BUG_ON(item->refcount == 0);
26454+ BUG_ON(atomic_read(&item->refcount) == 0);
26455 BUG_ON(ref->object != item->object);
26456- if (--item->refcount == 0) {
26457+ if (atomic_dec_and_test(&item->refcount)) {
26458 ref->release(ref);
26459 item->object = NULL;
26460 }
26461diff -urNp linux-3.1.1/drivers/gpu/drm/drm_info.c linux-3.1.1/drivers/gpu/drm/drm_info.c
26462--- linux-3.1.1/drivers/gpu/drm/drm_info.c 2011-11-11 15:19:27.000000000 -0500
26463+++ linux-3.1.1/drivers/gpu/drm/drm_info.c 2011-11-16 18:40:10.000000000 -0500
26464@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
26465 struct drm_local_map *map;
26466 struct drm_map_list *r_list;
26467
26468- /* Hardcoded from _DRM_FRAME_BUFFER,
26469- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
26470- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
26471- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
26472+ static const char * const types[] = {
26473+ [_DRM_FRAME_BUFFER] = "FB",
26474+ [_DRM_REGISTERS] = "REG",
26475+ [_DRM_SHM] = "SHM",
26476+ [_DRM_AGP] = "AGP",
26477+ [_DRM_SCATTER_GATHER] = "SG",
26478+ [_DRM_CONSISTENT] = "PCI",
26479+ [_DRM_GEM] = "GEM" };
26480 const char *type;
26481 int i;
26482
26483@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
26484 map = r_list->map;
26485 if (!map)
26486 continue;
26487- if (map->type < 0 || map->type > 5)
26488+ if (map->type >= ARRAY_SIZE(types))
26489 type = "??";
26490 else
26491 type = types[map->type];
26492@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
26493 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
26494 vma->vm_flags & VM_LOCKED ? 'l' : '-',
26495 vma->vm_flags & VM_IO ? 'i' : '-',
26496+#ifdef CONFIG_GRKERNSEC_HIDESYM
26497+ 0);
26498+#else
26499 vma->vm_pgoff);
26500+#endif
26501
26502 #if defined(__i386__)
26503 pgprot = pgprot_val(vma->vm_page_prot);
26504diff -urNp linux-3.1.1/drivers/gpu/drm/drm_ioc32.c linux-3.1.1/drivers/gpu/drm/drm_ioc32.c
26505--- linux-3.1.1/drivers/gpu/drm/drm_ioc32.c 2011-11-11 15:19:27.000000000 -0500
26506+++ linux-3.1.1/drivers/gpu/drm/drm_ioc32.c 2011-11-16 18:39:07.000000000 -0500
26507@@ -455,7 +455,7 @@ static int compat_drm_infobufs(struct fi
26508 request = compat_alloc_user_space(nbytes);
26509 if (!access_ok(VERIFY_WRITE, request, nbytes))
26510 return -EFAULT;
26511- list = (struct drm_buf_desc *) (request + 1);
26512+ list = (struct drm_buf_desc __user *) (request + 1);
26513
26514 if (__put_user(count, &request->count)
26515 || __put_user(list, &request->list))
26516@@ -516,7 +516,7 @@ static int compat_drm_mapbufs(struct fil
26517 request = compat_alloc_user_space(nbytes);
26518 if (!access_ok(VERIFY_WRITE, request, nbytes))
26519 return -EFAULT;
26520- list = (struct drm_buf_pub *) (request + 1);
26521+ list = (struct drm_buf_pub __user *) (request + 1);
26522
26523 if (__put_user(count, &request->count)
26524 || __put_user(list, &request->list))
26525diff -urNp linux-3.1.1/drivers/gpu/drm/drm_ioctl.c linux-3.1.1/drivers/gpu/drm/drm_ioctl.c
26526--- linux-3.1.1/drivers/gpu/drm/drm_ioctl.c 2011-11-11 15:19:27.000000000 -0500
26527+++ linux-3.1.1/drivers/gpu/drm/drm_ioctl.c 2011-11-16 18:39:07.000000000 -0500
26528@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
26529 stats->data[i].value =
26530 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
26531 else
26532- stats->data[i].value = atomic_read(&dev->counts[i]);
26533+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
26534 stats->data[i].type = dev->types[i];
26535 }
26536
26537diff -urNp linux-3.1.1/drivers/gpu/drm/drm_lock.c linux-3.1.1/drivers/gpu/drm/drm_lock.c
26538--- linux-3.1.1/drivers/gpu/drm/drm_lock.c 2011-11-11 15:19:27.000000000 -0500
26539+++ linux-3.1.1/drivers/gpu/drm/drm_lock.c 2011-11-16 18:39:07.000000000 -0500
26540@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
26541 if (drm_lock_take(&master->lock, lock->context)) {
26542 master->lock.file_priv = file_priv;
26543 master->lock.lock_time = jiffies;
26544- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
26545+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
26546 break; /* Got lock */
26547 }
26548
26549@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
26550 return -EINVAL;
26551 }
26552
26553- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
26554+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
26555
26556 if (drm_lock_free(&master->lock, lock->context)) {
26557 /* FIXME: Should really bail out here. */
26558diff -urNp linux-3.1.1/drivers/gpu/drm/i810/i810_dma.c linux-3.1.1/drivers/gpu/drm/i810/i810_dma.c
26559--- linux-3.1.1/drivers/gpu/drm/i810/i810_dma.c 2011-11-11 15:19:27.000000000 -0500
26560+++ linux-3.1.1/drivers/gpu/drm/i810/i810_dma.c 2011-11-16 18:39:07.000000000 -0500
26561@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
26562 dma->buflist[vertex->idx],
26563 vertex->discard, vertex->used);
26564
26565- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
26566- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
26567+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
26568+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
26569 sarea_priv->last_enqueue = dev_priv->counter - 1;
26570 sarea_priv->last_dispatch = (int)hw_status[5];
26571
26572@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
26573 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
26574 mc->last_render);
26575
26576- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
26577- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
26578+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
26579+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
26580 sarea_priv->last_enqueue = dev_priv->counter - 1;
26581 sarea_priv->last_dispatch = (int)hw_status[5];
26582
26583diff -urNp linux-3.1.1/drivers/gpu/drm/i810/i810_drv.h linux-3.1.1/drivers/gpu/drm/i810/i810_drv.h
26584--- linux-3.1.1/drivers/gpu/drm/i810/i810_drv.h 2011-11-11 15:19:27.000000000 -0500
26585+++ linux-3.1.1/drivers/gpu/drm/i810/i810_drv.h 2011-11-16 18:39:07.000000000 -0500
26586@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
26587 int page_flipping;
26588
26589 wait_queue_head_t irq_queue;
26590- atomic_t irq_received;
26591- atomic_t irq_emitted;
26592+ atomic_unchecked_t irq_received;
26593+ atomic_unchecked_t irq_emitted;
26594
26595 int front_offset;
26596 } drm_i810_private_t;
26597diff -urNp linux-3.1.1/drivers/gpu/drm/i915/i915_debugfs.c linux-3.1.1/drivers/gpu/drm/i915/i915_debugfs.c
26598--- linux-3.1.1/drivers/gpu/drm/i915/i915_debugfs.c 2011-11-11 15:19:27.000000000 -0500
26599+++ linux-3.1.1/drivers/gpu/drm/i915/i915_debugfs.c 2011-11-16 18:39:07.000000000 -0500
26600@@ -497,7 +497,7 @@ static int i915_interrupt_info(struct se
26601 I915_READ(GTIMR));
26602 }
26603 seq_printf(m, "Interrupts received: %d\n",
26604- atomic_read(&dev_priv->irq_received));
26605+ atomic_read_unchecked(&dev_priv->irq_received));
26606 for (i = 0; i < I915_NUM_RINGS; i++) {
26607 if (IS_GEN6(dev) || IS_GEN7(dev)) {
26608 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
26609@@ -1185,7 +1185,7 @@ static int i915_opregion(struct seq_file
26610 return ret;
26611
26612 if (opregion->header)
26613- seq_write(m, opregion->header, OPREGION_SIZE);
26614+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
26615
26616 mutex_unlock(&dev->struct_mutex);
26617
26618diff -urNp linux-3.1.1/drivers/gpu/drm/i915/i915_dma.c linux-3.1.1/drivers/gpu/drm/i915/i915_dma.c
26619--- linux-3.1.1/drivers/gpu/drm/i915/i915_dma.c 2011-11-11 15:19:27.000000000 -0500
26620+++ linux-3.1.1/drivers/gpu/drm/i915/i915_dma.c 2011-11-16 18:39:07.000000000 -0500
26621@@ -1171,7 +1171,7 @@ static bool i915_switcheroo_can_switch(s
26622 bool can_switch;
26623
26624 spin_lock(&dev->count_lock);
26625- can_switch = (dev->open_count == 0);
26626+ can_switch = (local_read(&dev->open_count) == 0);
26627 spin_unlock(&dev->count_lock);
26628 return can_switch;
26629 }
26630diff -urNp linux-3.1.1/drivers/gpu/drm/i915/i915_drv.h linux-3.1.1/drivers/gpu/drm/i915/i915_drv.h
26631--- linux-3.1.1/drivers/gpu/drm/i915/i915_drv.h 2011-11-11 15:19:27.000000000 -0500
26632+++ linux-3.1.1/drivers/gpu/drm/i915/i915_drv.h 2011-11-16 18:39:07.000000000 -0500
26633@@ -222,7 +222,7 @@ struct drm_i915_display_funcs {
26634 /* render clock increase/decrease */
26635 /* display clock increase/decrease */
26636 /* pll clock increase/decrease */
26637-};
26638+} __no_const;
26639
26640 struct intel_device_info {
26641 u8 gen;
26642@@ -305,7 +305,7 @@ typedef struct drm_i915_private {
26643 int current_page;
26644 int page_flipping;
26645
26646- atomic_t irq_received;
26647+ atomic_unchecked_t irq_received;
26648
26649 /* protects the irq masks */
26650 spinlock_t irq_lock;
26651@@ -882,7 +882,7 @@ struct drm_i915_gem_object {
26652 * will be page flipped away on the next vblank. When it
26653 * reaches 0, dev_priv->pending_flip_queue will be woken up.
26654 */
26655- atomic_t pending_flip;
26656+ atomic_unchecked_t pending_flip;
26657 };
26658
26659 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
26660@@ -1262,7 +1262,7 @@ extern int intel_setup_gmbus(struct drm_
26661 extern void intel_teardown_gmbus(struct drm_device *dev);
26662 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
26663 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
26664-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
26665+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
26666 {
26667 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
26668 }
26669diff -urNp linux-3.1.1/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.1.1/drivers/gpu/drm/i915/i915_gem_execbuffer.c
26670--- linux-3.1.1/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-11-11 15:19:27.000000000 -0500
26671+++ linux-3.1.1/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-11-16 18:39:07.000000000 -0500
26672@@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct
26673 i915_gem_clflush_object(obj);
26674
26675 if (obj->base.pending_write_domain)
26676- cd->flips |= atomic_read(&obj->pending_flip);
26677+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
26678
26679 /* The actual obj->write_domain will be updated with
26680 * pending_write_domain after we emit the accumulated flush for all
26681diff -urNp linux-3.1.1/drivers/gpu/drm/i915/i915_irq.c linux-3.1.1/drivers/gpu/drm/i915/i915_irq.c
26682--- linux-3.1.1/drivers/gpu/drm/i915/i915_irq.c 2011-11-11 15:19:27.000000000 -0500
26683+++ linux-3.1.1/drivers/gpu/drm/i915/i915_irq.c 2011-11-16 18:39:07.000000000 -0500
26684@@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler
26685 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
26686 struct drm_i915_master_private *master_priv;
26687
26688- atomic_inc(&dev_priv->irq_received);
26689+ atomic_inc_unchecked(&dev_priv->irq_received);
26690
26691 /* disable master interrupt before clearing iir */
26692 de_ier = I915_READ(DEIER);
26693@@ -565,7 +565,7 @@ static irqreturn_t ironlake_irq_handler(
26694 struct drm_i915_master_private *master_priv;
26695 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
26696
26697- atomic_inc(&dev_priv->irq_received);
26698+ atomic_inc_unchecked(&dev_priv->irq_received);
26699
26700 if (IS_GEN6(dev))
26701 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
26702@@ -1228,7 +1228,7 @@ static irqreturn_t i915_driver_irq_handl
26703 int ret = IRQ_NONE, pipe;
26704 bool blc_event = false;
26705
26706- atomic_inc(&dev_priv->irq_received);
26707+ atomic_inc_unchecked(&dev_priv->irq_received);
26708
26709 iir = I915_READ(IIR);
26710
26711@@ -1740,7 +1740,7 @@ static void ironlake_irq_preinstall(stru
26712 {
26713 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
26714
26715- atomic_set(&dev_priv->irq_received, 0);
26716+ atomic_set_unchecked(&dev_priv->irq_received, 0);
26717
26718 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
26719 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
26720@@ -1904,7 +1904,7 @@ static void i915_driver_irq_preinstall(s
26721 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
26722 int pipe;
26723
26724- atomic_set(&dev_priv->irq_received, 0);
26725+ atomic_set_unchecked(&dev_priv->irq_received, 0);
26726
26727 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
26728 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
26729diff -urNp linux-3.1.1/drivers/gpu/drm/i915/intel_display.c linux-3.1.1/drivers/gpu/drm/i915/intel_display.c
26730--- linux-3.1.1/drivers/gpu/drm/i915/intel_display.c 2011-11-11 15:19:27.000000000 -0500
26731+++ linux-3.1.1/drivers/gpu/drm/i915/intel_display.c 2011-11-16 18:39:07.000000000 -0500
26732@@ -2205,7 +2205,7 @@ intel_pipe_set_base(struct drm_crtc *crt
26733
26734 wait_event(dev_priv->pending_flip_queue,
26735 atomic_read(&dev_priv->mm.wedged) ||
26736- atomic_read(&obj->pending_flip) == 0);
26737+ atomic_read_unchecked(&obj->pending_flip) == 0);
26738
26739 /* Big Hammer, we also need to ensure that any pending
26740 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
26741@@ -2824,7 +2824,7 @@ static void intel_crtc_wait_for_pending_
26742 obj = to_intel_framebuffer(crtc->fb)->obj;
26743 dev_priv = crtc->dev->dev_private;
26744 wait_event(dev_priv->pending_flip_queue,
26745- atomic_read(&obj->pending_flip) == 0);
26746+ atomic_read_unchecked(&obj->pending_flip) == 0);
26747 }
26748
26749 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
26750@@ -6644,7 +6644,7 @@ static void do_intel_finish_page_flip(st
26751
26752 atomic_clear_mask(1 << intel_crtc->plane,
26753 &obj->pending_flip.counter);
26754- if (atomic_read(&obj->pending_flip) == 0)
26755+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
26756 wake_up(&dev_priv->pending_flip_queue);
26757
26758 schedule_work(&work->work);
26759@@ -6933,7 +6933,7 @@ static int intel_crtc_page_flip(struct d
26760 /* Block clients from rendering to the new back buffer until
26761 * the flip occurs and the object is no longer visible.
26762 */
26763- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26764+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26765
26766 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
26767 if (ret)
26768@@ -6947,7 +6947,7 @@ static int intel_crtc_page_flip(struct d
26769 return 0;
26770
26771 cleanup_pending:
26772- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26773+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26774 cleanup_objs:
26775 drm_gem_object_unreference(&work->old_fb_obj->base);
26776 drm_gem_object_unreference(&obj->base);
26777diff -urNp linux-3.1.1/drivers/gpu/drm/mga/mga_drv.h linux-3.1.1/drivers/gpu/drm/mga/mga_drv.h
26778--- linux-3.1.1/drivers/gpu/drm/mga/mga_drv.h 2011-11-11 15:19:27.000000000 -0500
26779+++ linux-3.1.1/drivers/gpu/drm/mga/mga_drv.h 2011-11-16 18:39:07.000000000 -0500
26780@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
26781 u32 clear_cmd;
26782 u32 maccess;
26783
26784- atomic_t vbl_received; /**< Number of vblanks received. */
26785+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
26786 wait_queue_head_t fence_queue;
26787- atomic_t last_fence_retired;
26788+ atomic_unchecked_t last_fence_retired;
26789 u32 next_fence_to_post;
26790
26791 unsigned int fb_cpp;
26792diff -urNp linux-3.1.1/drivers/gpu/drm/mga/mga_irq.c linux-3.1.1/drivers/gpu/drm/mga/mga_irq.c
26793--- linux-3.1.1/drivers/gpu/drm/mga/mga_irq.c 2011-11-11 15:19:27.000000000 -0500
26794+++ linux-3.1.1/drivers/gpu/drm/mga/mga_irq.c 2011-11-16 18:39:07.000000000 -0500
26795@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
26796 if (crtc != 0)
26797 return 0;
26798
26799- return atomic_read(&dev_priv->vbl_received);
26800+ return atomic_read_unchecked(&dev_priv->vbl_received);
26801 }
26802
26803
26804@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
26805 /* VBLANK interrupt */
26806 if (status & MGA_VLINEPEN) {
26807 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
26808- atomic_inc(&dev_priv->vbl_received);
26809+ atomic_inc_unchecked(&dev_priv->vbl_received);
26810 drm_handle_vblank(dev, 0);
26811 handled = 1;
26812 }
26813@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
26814 if ((prim_start & ~0x03) != (prim_end & ~0x03))
26815 MGA_WRITE(MGA_PRIMEND, prim_end);
26816
26817- atomic_inc(&dev_priv->last_fence_retired);
26818+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
26819 DRM_WAKEUP(&dev_priv->fence_queue);
26820 handled = 1;
26821 }
26822@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
26823 * using fences.
26824 */
26825 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
26826- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
26827+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
26828 - *sequence) <= (1 << 23)));
26829
26830 *sequence = cur_fence;
26831diff -urNp linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_bios.c
26832--- linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-11-11 15:19:27.000000000 -0500
26833+++ linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-11-16 18:39:07.000000000 -0500
26834@@ -201,7 +201,7 @@ struct methods {
26835 const char desc[8];
26836 void (*loadbios)(struct drm_device *, uint8_t *);
26837 const bool rw;
26838-};
26839+} __do_const;
26840
26841 static struct methods shadow_methods[] = {
26842 { "PRAMIN", load_vbios_pramin, true },
26843@@ -5489,7 +5489,7 @@ parse_bit_displayport_tbl_entry(struct d
26844 struct bit_table {
26845 const char id;
26846 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
26847-};
26848+} __no_const;
26849
26850 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
26851
26852diff -urNp linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_drv.h linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_drv.h
26853--- linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-11-11 15:19:27.000000000 -0500
26854+++ linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-11-16 18:39:07.000000000 -0500
26855@@ -238,7 +238,7 @@ struct nouveau_channel {
26856 struct list_head pending;
26857 uint32_t sequence;
26858 uint32_t sequence_ack;
26859- atomic_t last_sequence_irq;
26860+ atomic_unchecked_t last_sequence_irq;
26861 struct nouveau_vma vma;
26862 } fence;
26863
26864@@ -319,7 +319,7 @@ struct nouveau_exec_engine {
26865 u32 handle, u16 class);
26866 void (*set_tile_region)(struct drm_device *dev, int i);
26867 void (*tlb_flush)(struct drm_device *, int engine);
26868-};
26869+} __no_const;
26870
26871 struct nouveau_instmem_engine {
26872 void *priv;
26873@@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
26874 struct nouveau_mc_engine {
26875 int (*init)(struct drm_device *dev);
26876 void (*takedown)(struct drm_device *dev);
26877-};
26878+} __no_const;
26879
26880 struct nouveau_timer_engine {
26881 int (*init)(struct drm_device *dev);
26882 void (*takedown)(struct drm_device *dev);
26883 uint64_t (*read)(struct drm_device *dev);
26884-};
26885+} __no_const;
26886
26887 struct nouveau_fb_engine {
26888 int num_tiles;
26889@@ -513,7 +513,7 @@ struct nouveau_vram_engine {
26890 void (*put)(struct drm_device *, struct nouveau_mem **);
26891
26892 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
26893-};
26894+} __no_const;
26895
26896 struct nouveau_engine {
26897 struct nouveau_instmem_engine instmem;
26898@@ -660,7 +660,7 @@ struct drm_nouveau_private {
26899 struct drm_global_reference mem_global_ref;
26900 struct ttm_bo_global_ref bo_global_ref;
26901 struct ttm_bo_device bdev;
26902- atomic_t validate_sequence;
26903+ atomic_unchecked_t validate_sequence;
26904 } ttm;
26905
26906 struct {
26907diff -urNp linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_fence.c linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_fence.c
26908--- linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-11-11 15:19:27.000000000 -0500
26909+++ linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-11-16 18:39:07.000000000 -0500
26910@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
26911 if (USE_REFCNT(dev))
26912 sequence = nvchan_rd32(chan, 0x48);
26913 else
26914- sequence = atomic_read(&chan->fence.last_sequence_irq);
26915+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
26916
26917 if (chan->fence.sequence_ack == sequence)
26918 goto out;
26919@@ -541,7 +541,7 @@ nouveau_fence_channel_init(struct nouvea
26920
26921 INIT_LIST_HEAD(&chan->fence.pending);
26922 spin_lock_init(&chan->fence.lock);
26923- atomic_set(&chan->fence.last_sequence_irq, 0);
26924+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
26925 return 0;
26926 }
26927
26928diff -urNp linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_gem.c linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_gem.c
26929--- linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-11-11 15:19:27.000000000 -0500
26930+++ linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-11-16 18:39:07.000000000 -0500
26931@@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *ch
26932 int trycnt = 0;
26933 int ret, i;
26934
26935- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
26936+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
26937 retry:
26938 if (++trycnt > 100000) {
26939 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
26940diff -urNp linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_state.c linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_state.c
26941--- linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_state.c 2011-11-11 15:19:27.000000000 -0500
26942+++ linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_state.c 2011-11-16 18:39:07.000000000 -0500
26943@@ -496,7 +496,7 @@ static bool nouveau_switcheroo_can_switc
26944 bool can_switch;
26945
26946 spin_lock(&dev->count_lock);
26947- can_switch = (dev->open_count == 0);
26948+ can_switch = (local_read(&dev->open_count) == 0);
26949 spin_unlock(&dev->count_lock);
26950 return can_switch;
26951 }
26952diff -urNp linux-3.1.1/drivers/gpu/drm/nouveau/nv04_graph.c linux-3.1.1/drivers/gpu/drm/nouveau/nv04_graph.c
26953--- linux-3.1.1/drivers/gpu/drm/nouveau/nv04_graph.c 2011-11-11 15:19:27.000000000 -0500
26954+++ linux-3.1.1/drivers/gpu/drm/nouveau/nv04_graph.c 2011-11-16 18:39:07.000000000 -0500
26955@@ -554,7 +554,7 @@ static int
26956 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
26957 u32 class, u32 mthd, u32 data)
26958 {
26959- atomic_set(&chan->fence.last_sequence_irq, data);
26960+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
26961 return 0;
26962 }
26963
26964diff -urNp linux-3.1.1/drivers/gpu/drm/r128/r128_cce.c linux-3.1.1/drivers/gpu/drm/r128/r128_cce.c
26965--- linux-3.1.1/drivers/gpu/drm/r128/r128_cce.c 2011-11-11 15:19:27.000000000 -0500
26966+++ linux-3.1.1/drivers/gpu/drm/r128/r128_cce.c 2011-11-16 18:39:07.000000000 -0500
26967@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
26968
26969 /* GH: Simple idle check.
26970 */
26971- atomic_set(&dev_priv->idle_count, 0);
26972+ atomic_set_unchecked(&dev_priv->idle_count, 0);
26973
26974 /* We don't support anything other than bus-mastering ring mode,
26975 * but the ring can be in either AGP or PCI space for the ring
26976diff -urNp linux-3.1.1/drivers/gpu/drm/r128/r128_drv.h linux-3.1.1/drivers/gpu/drm/r128/r128_drv.h
26977--- linux-3.1.1/drivers/gpu/drm/r128/r128_drv.h 2011-11-11 15:19:27.000000000 -0500
26978+++ linux-3.1.1/drivers/gpu/drm/r128/r128_drv.h 2011-11-16 18:39:07.000000000 -0500
26979@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
26980 int is_pci;
26981 unsigned long cce_buffers_offset;
26982
26983- atomic_t idle_count;
26984+ atomic_unchecked_t idle_count;
26985
26986 int page_flipping;
26987 int current_page;
26988 u32 crtc_offset;
26989 u32 crtc_offset_cntl;
26990
26991- atomic_t vbl_received;
26992+ atomic_unchecked_t vbl_received;
26993
26994 u32 color_fmt;
26995 unsigned int front_offset;
26996diff -urNp linux-3.1.1/drivers/gpu/drm/r128/r128_irq.c linux-3.1.1/drivers/gpu/drm/r128/r128_irq.c
26997--- linux-3.1.1/drivers/gpu/drm/r128/r128_irq.c 2011-11-11 15:19:27.000000000 -0500
26998+++ linux-3.1.1/drivers/gpu/drm/r128/r128_irq.c 2011-11-16 18:39:07.000000000 -0500
26999@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
27000 if (crtc != 0)
27001 return 0;
27002
27003- return atomic_read(&dev_priv->vbl_received);
27004+ return atomic_read_unchecked(&dev_priv->vbl_received);
27005 }
27006
27007 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
27008@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
27009 /* VBLANK interrupt */
27010 if (status & R128_CRTC_VBLANK_INT) {
27011 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
27012- atomic_inc(&dev_priv->vbl_received);
27013+ atomic_inc_unchecked(&dev_priv->vbl_received);
27014 drm_handle_vblank(dev, 0);
27015 return IRQ_HANDLED;
27016 }
27017diff -urNp linux-3.1.1/drivers/gpu/drm/r128/r128_state.c linux-3.1.1/drivers/gpu/drm/r128/r128_state.c
27018--- linux-3.1.1/drivers/gpu/drm/r128/r128_state.c 2011-11-11 15:19:27.000000000 -0500
27019+++ linux-3.1.1/drivers/gpu/drm/r128/r128_state.c 2011-11-16 18:39:07.000000000 -0500
27020@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
27021
27022 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
27023 {
27024- if (atomic_read(&dev_priv->idle_count) == 0)
27025+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
27026 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
27027 else
27028- atomic_set(&dev_priv->idle_count, 0);
27029+ atomic_set_unchecked(&dev_priv->idle_count, 0);
27030 }
27031
27032 #endif
27033diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/atom.c linux-3.1.1/drivers/gpu/drm/radeon/atom.c
27034--- linux-3.1.1/drivers/gpu/drm/radeon/atom.c 2011-11-11 15:19:27.000000000 -0500
27035+++ linux-3.1.1/drivers/gpu/drm/radeon/atom.c 2011-11-16 19:09:42.000000000 -0500
27036@@ -1254,6 +1254,8 @@ struct atom_context *atom_parse(struct c
27037 char name[512];
27038 int i;
27039
27040+ pax_track_stack();
27041+
27042 if (!ctx)
27043 return NULL;
27044
27045diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/mkregtable.c linux-3.1.1/drivers/gpu/drm/radeon/mkregtable.c
27046--- linux-3.1.1/drivers/gpu/drm/radeon/mkregtable.c 2011-11-11 15:19:27.000000000 -0500
27047+++ linux-3.1.1/drivers/gpu/drm/radeon/mkregtable.c 2011-11-16 18:39:07.000000000 -0500
27048@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
27049 regex_t mask_rex;
27050 regmatch_t match[4];
27051 char buf[1024];
27052- size_t end;
27053+ long end;
27054 int len;
27055 int done = 0;
27056 int r;
27057 unsigned o;
27058 struct offset *offset;
27059 char last_reg_s[10];
27060- int last_reg;
27061+ unsigned long last_reg;
27062
27063 if (regcomp
27064 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
27065diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_atombios.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_atombios.c
27066--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_atombios.c 2011-11-11 15:19:27.000000000 -0500
27067+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_atombios.c 2011-11-16 18:40:10.000000000 -0500
27068@@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
27069 struct radeon_gpio_rec gpio;
27070 struct radeon_hpd hpd;
27071
27072+ pax_track_stack();
27073+
27074 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
27075 return false;
27076
27077diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_device.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_device.c
27078--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_device.c 2011-11-11 15:19:27.000000000 -0500
27079+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_device.c 2011-11-16 18:39:07.000000000 -0500
27080@@ -684,7 +684,7 @@ static bool radeon_switcheroo_can_switch
27081 bool can_switch;
27082
27083 spin_lock(&dev->count_lock);
27084- can_switch = (dev->open_count == 0);
27085+ can_switch = (local_read(&dev->open_count) == 0);
27086 spin_unlock(&dev->count_lock);
27087 return can_switch;
27088 }
27089diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_display.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_display.c
27090--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_display.c 2011-11-11 15:19:27.000000000 -0500
27091+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_display.c 2011-11-16 18:40:10.000000000 -0500
27092@@ -925,6 +925,8 @@ void radeon_compute_pll_legacy(struct ra
27093 uint32_t post_div;
27094 u32 pll_out_min, pll_out_max;
27095
27096+ pax_track_stack();
27097+
27098 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
27099 freq = freq * 1000;
27100
27101diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_drv.h linux-3.1.1/drivers/gpu/drm/radeon/radeon_drv.h
27102--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_drv.h 2011-11-11 15:19:27.000000000 -0500
27103+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_drv.h 2011-11-16 18:39:07.000000000 -0500
27104@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
27105
27106 /* SW interrupt */
27107 wait_queue_head_t swi_queue;
27108- atomic_t swi_emitted;
27109+ atomic_unchecked_t swi_emitted;
27110 int vblank_crtc;
27111 uint32_t irq_enable_reg;
27112 uint32_t r500_disp_irq_reg;
27113diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_fence.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_fence.c
27114--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_fence.c 2011-11-11 15:19:27.000000000 -0500
27115+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_fence.c 2011-11-16 18:39:07.000000000 -0500
27116@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_devi
27117 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
27118 return 0;
27119 }
27120- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
27121+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
27122 if (!rdev->cp.ready)
27123 /* FIXME: cp is not running assume everythings is done right
27124 * away
27125@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct rade
27126 return r;
27127 }
27128 radeon_fence_write(rdev, 0);
27129- atomic_set(&rdev->fence_drv.seq, 0);
27130+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
27131 INIT_LIST_HEAD(&rdev->fence_drv.created);
27132 INIT_LIST_HEAD(&rdev->fence_drv.emited);
27133 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
27134diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon.h linux-3.1.1/drivers/gpu/drm/radeon/radeon.h
27135--- linux-3.1.1/drivers/gpu/drm/radeon/radeon.h 2011-11-11 15:19:27.000000000 -0500
27136+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon.h 2011-11-16 18:39:07.000000000 -0500
27137@@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_d
27138 */
27139 struct radeon_fence_driver {
27140 uint32_t scratch_reg;
27141- atomic_t seq;
27142+ atomic_unchecked_t seq;
27143 uint32_t last_seq;
27144 unsigned long last_jiffies;
27145 unsigned long last_timeout;
27146@@ -962,7 +962,7 @@ struct radeon_asic {
27147 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
27148 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
27149 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
27150-};
27151+} __no_const;
27152
27153 /*
27154 * Asic structures
27155diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_ioc32.c
27156--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-11-11 15:19:27.000000000 -0500
27157+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-11-16 18:39:07.000000000 -0500
27158@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
27159 request = compat_alloc_user_space(sizeof(*request));
27160 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
27161 || __put_user(req32.param, &request->param)
27162- || __put_user((void __user *)(unsigned long)req32.value,
27163+ || __put_user((unsigned long)req32.value,
27164 &request->value))
27165 return -EFAULT;
27166
27167diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_irq.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_irq.c
27168--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_irq.c 2011-11-11 15:19:27.000000000 -0500
27169+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_irq.c 2011-11-16 18:39:07.000000000 -0500
27170@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
27171 unsigned int ret;
27172 RING_LOCALS;
27173
27174- atomic_inc(&dev_priv->swi_emitted);
27175- ret = atomic_read(&dev_priv->swi_emitted);
27176+ atomic_inc_unchecked(&dev_priv->swi_emitted);
27177+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
27178
27179 BEGIN_RING(4);
27180 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
27181@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
27182 drm_radeon_private_t *dev_priv =
27183 (drm_radeon_private_t *) dev->dev_private;
27184
27185- atomic_set(&dev_priv->swi_emitted, 0);
27186+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
27187 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
27188
27189 dev->max_vblank_count = 0x001fffff;
27190diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_state.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_state.c
27191--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_state.c 2011-11-11 15:19:27.000000000 -0500
27192+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_state.c 2011-11-16 18:39:07.000000000 -0500
27193@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
27194 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
27195 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
27196
27197- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
27198+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
27199 sarea_priv->nbox * sizeof(depth_boxes[0])))
27200 return -EFAULT;
27201
27202@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
27203 {
27204 drm_radeon_private_t *dev_priv = dev->dev_private;
27205 drm_radeon_getparam_t *param = data;
27206- int value;
27207+ int value = 0;
27208
27209 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
27210
27211diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_ttm.c
27212--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_ttm.c 2011-11-11 15:19:27.000000000 -0500
27213+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_ttm.c 2011-11-16 18:39:07.000000000 -0500
27214@@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struc
27215 }
27216 if (unlikely(ttm_vm_ops == NULL)) {
27217 ttm_vm_ops = vma->vm_ops;
27218- radeon_ttm_vm_ops = *ttm_vm_ops;
27219- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
27220+ pax_open_kernel();
27221+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
27222+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
27223+ pax_close_kernel();
27224 }
27225 vma->vm_ops = &radeon_ttm_vm_ops;
27226 return 0;
27227diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/rs690.c linux-3.1.1/drivers/gpu/drm/radeon/rs690.c
27228--- linux-3.1.1/drivers/gpu/drm/radeon/rs690.c 2011-11-11 15:19:27.000000000 -0500
27229+++ linux-3.1.1/drivers/gpu/drm/radeon/rs690.c 2011-11-16 18:39:07.000000000 -0500
27230@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
27231 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
27232 rdev->pm.sideport_bandwidth.full)
27233 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
27234- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
27235+ read_delay_latency.full = dfixed_const(800 * 1000);
27236 read_delay_latency.full = dfixed_div(read_delay_latency,
27237 rdev->pm.igp_sideport_mclk);
27238+ a.full = dfixed_const(370);
27239+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
27240 } else {
27241 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
27242 rdev->pm.k8_bandwidth.full)
27243diff -urNp linux-3.1.1/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.1.1/drivers/gpu/drm/ttm/ttm_page_alloc.c
27244--- linux-3.1.1/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-11-11 15:19:27.000000000 -0500
27245+++ linux-3.1.1/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-11-16 18:39:07.000000000 -0500
27246@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages
27247 static int ttm_pool_mm_shrink(struct shrinker *shrink,
27248 struct shrink_control *sc)
27249 {
27250- static atomic_t start_pool = ATOMIC_INIT(0);
27251+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
27252 unsigned i;
27253- unsigned pool_offset = atomic_add_return(1, &start_pool);
27254+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
27255 struct ttm_page_pool *pool;
27256 int shrink_pages = sc->nr_to_scan;
27257
27258diff -urNp linux-3.1.1/drivers/gpu/drm/via/via_drv.h linux-3.1.1/drivers/gpu/drm/via/via_drv.h
27259--- linux-3.1.1/drivers/gpu/drm/via/via_drv.h 2011-11-11 15:19:27.000000000 -0500
27260+++ linux-3.1.1/drivers/gpu/drm/via/via_drv.h 2011-11-16 18:39:07.000000000 -0500
27261@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
27262 typedef uint32_t maskarray_t[5];
27263
27264 typedef struct drm_via_irq {
27265- atomic_t irq_received;
27266+ atomic_unchecked_t irq_received;
27267 uint32_t pending_mask;
27268 uint32_t enable_mask;
27269 wait_queue_head_t irq_queue;
27270@@ -75,7 +75,7 @@ typedef struct drm_via_private {
27271 struct timeval last_vblank;
27272 int last_vblank_valid;
27273 unsigned usec_per_vblank;
27274- atomic_t vbl_received;
27275+ atomic_unchecked_t vbl_received;
27276 drm_via_state_t hc_state;
27277 char pci_buf[VIA_PCI_BUF_SIZE];
27278 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
27279diff -urNp linux-3.1.1/drivers/gpu/drm/via/via_irq.c linux-3.1.1/drivers/gpu/drm/via/via_irq.c
27280--- linux-3.1.1/drivers/gpu/drm/via/via_irq.c 2011-11-11 15:19:27.000000000 -0500
27281+++ linux-3.1.1/drivers/gpu/drm/via/via_irq.c 2011-11-16 18:39:07.000000000 -0500
27282@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
27283 if (crtc != 0)
27284 return 0;
27285
27286- return atomic_read(&dev_priv->vbl_received);
27287+ return atomic_read_unchecked(&dev_priv->vbl_received);
27288 }
27289
27290 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
27291@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
27292
27293 status = VIA_READ(VIA_REG_INTERRUPT);
27294 if (status & VIA_IRQ_VBLANK_PENDING) {
27295- atomic_inc(&dev_priv->vbl_received);
27296- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
27297+ atomic_inc_unchecked(&dev_priv->vbl_received);
27298+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
27299 do_gettimeofday(&cur_vblank);
27300 if (dev_priv->last_vblank_valid) {
27301 dev_priv->usec_per_vblank =
27302@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
27303 dev_priv->last_vblank = cur_vblank;
27304 dev_priv->last_vblank_valid = 1;
27305 }
27306- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
27307+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
27308 DRM_DEBUG("US per vblank is: %u\n",
27309 dev_priv->usec_per_vblank);
27310 }
27311@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
27312
27313 for (i = 0; i < dev_priv->num_irqs; ++i) {
27314 if (status & cur_irq->pending_mask) {
27315- atomic_inc(&cur_irq->irq_received);
27316+ atomic_inc_unchecked(&cur_irq->irq_received);
27317 DRM_WAKEUP(&cur_irq->irq_queue);
27318 handled = 1;
27319 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
27320@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
27321 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
27322 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
27323 masks[irq][4]));
27324- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
27325+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
27326 } else {
27327 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
27328 (((cur_irq_sequence =
27329- atomic_read(&cur_irq->irq_received)) -
27330+ atomic_read_unchecked(&cur_irq->irq_received)) -
27331 *sequence) <= (1 << 23)));
27332 }
27333 *sequence = cur_irq_sequence;
27334@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
27335 }
27336
27337 for (i = 0; i < dev_priv->num_irqs; ++i) {
27338- atomic_set(&cur_irq->irq_received, 0);
27339+ atomic_set_unchecked(&cur_irq->irq_received, 0);
27340 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
27341 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
27342 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
27343@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
27344 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
27345 case VIA_IRQ_RELATIVE:
27346 irqwait->request.sequence +=
27347- atomic_read(&cur_irq->irq_received);
27348+ atomic_read_unchecked(&cur_irq->irq_received);
27349 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
27350 case VIA_IRQ_ABSOLUTE:
27351 break;
27352diff -urNp linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
27353--- linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-11-11 15:19:27.000000000 -0500
27354+++ linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-11-16 18:39:07.000000000 -0500
27355@@ -240,7 +240,7 @@ struct vmw_private {
27356 * Fencing and IRQs.
27357 */
27358
27359- atomic_t fence_seq;
27360+ atomic_unchecked_t fence_seq;
27361 wait_queue_head_t fence_queue;
27362 wait_queue_head_t fifo_queue;
27363 atomic_t fence_queue_waiters;
27364diff -urNp linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
27365--- linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 2011-11-11 15:19:27.000000000 -0500
27366+++ linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 2011-11-16 18:39:07.000000000 -0500
27367@@ -610,7 +610,7 @@ int vmw_execbuf_ioctl(struct drm_device
27368 struct drm_vmw_fence_rep fence_rep;
27369 struct drm_vmw_fence_rep __user *user_fence_rep;
27370 int ret;
27371- void *user_cmd;
27372+ void __user *user_cmd;
27373 void *cmd;
27374 uint32_t sequence;
27375 struct vmw_sw_context *sw_context = &dev_priv->ctx;
27376diff -urNp linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
27377--- linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-11-11 15:19:27.000000000 -0500
27378+++ linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-11-16 18:39:07.000000000 -0500
27379@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
27380 while (!vmw_lag_lt(queue, us)) {
27381 spin_lock(&queue->lock);
27382 if (list_empty(&queue->head))
27383- sequence = atomic_read(&dev_priv->fence_seq);
27384+ sequence = atomic_read_unchecked(&dev_priv->fence_seq);
27385 else {
27386 fence = list_first_entry(&queue->head,
27387 struct vmw_fence, head);
27388diff -urNp linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
27389--- linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-11-11 15:19:27.000000000 -0500
27390+++ linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-11-16 18:39:07.000000000 -0500
27391@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
27392 (unsigned int) min,
27393 (unsigned int) fifo->capabilities);
27394
27395- atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
27396+ atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
27397 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
27398 vmw_fence_queue_init(&fifo->fence_queue);
27399 return vmw_fifo_send_fence(dev_priv, &dummy);
27400@@ -356,7 +356,7 @@ void *vmw_fifo_reserve(struct vmw_privat
27401 if (reserveable)
27402 iowrite32(bytes, fifo_mem +
27403 SVGA_FIFO_RESERVED);
27404- return fifo_mem + (next_cmd >> 2);
27405+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
27406 } else {
27407 need_bounce = true;
27408 }
27409@@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
27410
27411 fm = vmw_fifo_reserve(dev_priv, bytes);
27412 if (unlikely(fm == NULL)) {
27413- *sequence = atomic_read(&dev_priv->fence_seq);
27414+ *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
27415 ret = -ENOMEM;
27416 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
27417 false, 3*HZ);
27418@@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
27419 }
27420
27421 do {
27422- *sequence = atomic_add_return(1, &dev_priv->fence_seq);
27423+ *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
27424 } while (*sequence == 0);
27425
27426 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
27427diff -urNp linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
27428--- linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-11-11 15:19:27.000000000 -0500
27429+++ linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-11-16 18:39:07.000000000 -0500
27430@@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
27431 * emitted. Then the fence is stale and signaled.
27432 */
27433
27434- ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
27435+ ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
27436 > VMW_FENCE_WRAP);
27437
27438 return ret;
27439@@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
27440
27441 if (fifo_idle)
27442 down_read(&fifo_state->rwsem);
27443- signal_seq = atomic_read(&dev_priv->fence_seq);
27444+ signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
27445 ret = 0;
27446
27447 for (;;) {
27448diff -urNp linux-3.1.1/drivers/hid/hid-core.c linux-3.1.1/drivers/hid/hid-core.c
27449--- linux-3.1.1/drivers/hid/hid-core.c 2011-11-11 15:19:27.000000000 -0500
27450+++ linux-3.1.1/drivers/hid/hid-core.c 2011-11-16 18:39:07.000000000 -0500
27451@@ -1951,7 +1951,7 @@ static bool hid_ignore(struct hid_device
27452
27453 int hid_add_device(struct hid_device *hdev)
27454 {
27455- static atomic_t id = ATOMIC_INIT(0);
27456+ static atomic_unchecked_t id = ATOMIC_INIT(0);
27457 int ret;
27458
27459 if (WARN_ON(hdev->status & HID_STAT_ADDED))
27460@@ -1966,7 +1966,7 @@ int hid_add_device(struct hid_device *hd
27461 /* XXX hack, any other cleaner solution after the driver core
27462 * is converted to allow more than 20 bytes as the device name? */
27463 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
27464- hdev->vendor, hdev->product, atomic_inc_return(&id));
27465+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
27466
27467 hid_debug_register(hdev, dev_name(&hdev->dev));
27468 ret = device_add(&hdev->dev);
27469diff -urNp linux-3.1.1/drivers/hid/usbhid/hiddev.c linux-3.1.1/drivers/hid/usbhid/hiddev.c
27470--- linux-3.1.1/drivers/hid/usbhid/hiddev.c 2011-11-11 15:19:27.000000000 -0500
27471+++ linux-3.1.1/drivers/hid/usbhid/hiddev.c 2011-11-16 18:39:07.000000000 -0500
27472@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *fi
27473 break;
27474
27475 case HIDIOCAPPLICATION:
27476- if (arg < 0 || arg >= hid->maxapplication)
27477+ if (arg >= hid->maxapplication)
27478 break;
27479
27480 for (i = 0; i < hid->maxcollection; i++)
27481diff -urNp linux-3.1.1/drivers/hwmon/acpi_power_meter.c linux-3.1.1/drivers/hwmon/acpi_power_meter.c
27482--- linux-3.1.1/drivers/hwmon/acpi_power_meter.c 2011-11-11 15:19:27.000000000 -0500
27483+++ linux-3.1.1/drivers/hwmon/acpi_power_meter.c 2011-11-16 18:39:07.000000000 -0500
27484@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
27485 return res;
27486
27487 temp /= 1000;
27488- if (temp < 0)
27489- return -EINVAL;
27490
27491 mutex_lock(&resource->lock);
27492 resource->trip[attr->index - 7] = temp;
27493diff -urNp linux-3.1.1/drivers/hwmon/sht15.c linux-3.1.1/drivers/hwmon/sht15.c
27494--- linux-3.1.1/drivers/hwmon/sht15.c 2011-11-11 15:19:27.000000000 -0500
27495+++ linux-3.1.1/drivers/hwmon/sht15.c 2011-11-16 18:39:07.000000000 -0500
27496@@ -166,7 +166,7 @@ struct sht15_data {
27497 int supply_uV;
27498 bool supply_uV_valid;
27499 struct work_struct update_supply_work;
27500- atomic_t interrupt_handled;
27501+ atomic_unchecked_t interrupt_handled;
27502 };
27503
27504 /**
27505@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht1
27506 return ret;
27507
27508 gpio_direction_input(data->pdata->gpio_data);
27509- atomic_set(&data->interrupt_handled, 0);
27510+ atomic_set_unchecked(&data->interrupt_handled, 0);
27511
27512 enable_irq(gpio_to_irq(data->pdata->gpio_data));
27513 if (gpio_get_value(data->pdata->gpio_data) == 0) {
27514 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
27515 /* Only relevant if the interrupt hasn't occurred. */
27516- if (!atomic_read(&data->interrupt_handled))
27517+ if (!atomic_read_unchecked(&data->interrupt_handled))
27518 schedule_work(&data->read_work);
27519 }
27520 ret = wait_event_timeout(data->wait_queue,
27521@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired
27522
27523 /* First disable the interrupt */
27524 disable_irq_nosync(irq);
27525- atomic_inc(&data->interrupt_handled);
27526+ atomic_inc_unchecked(&data->interrupt_handled);
27527 /* Then schedule a reading work struct */
27528 if (data->state != SHT15_READING_NOTHING)
27529 schedule_work(&data->read_work);
27530@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct wo
27531 * If not, then start the interrupt again - care here as could
27532 * have gone low in meantime so verify it hasn't!
27533 */
27534- atomic_set(&data->interrupt_handled, 0);
27535+ atomic_set_unchecked(&data->interrupt_handled, 0);
27536 enable_irq(gpio_to_irq(data->pdata->gpio_data));
27537 /* If still not occurred or another handler has been scheduled */
27538 if (gpio_get_value(data->pdata->gpio_data)
27539- || atomic_read(&data->interrupt_handled))
27540+ || atomic_read_unchecked(&data->interrupt_handled))
27541 return;
27542 }
27543
27544diff -urNp linux-3.1.1/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.1.1/drivers/i2c/busses/i2c-amd756-s4882.c
27545--- linux-3.1.1/drivers/i2c/busses/i2c-amd756-s4882.c 2011-11-11 15:19:27.000000000 -0500
27546+++ linux-3.1.1/drivers/i2c/busses/i2c-amd756-s4882.c 2011-11-16 18:39:07.000000000 -0500
27547@@ -43,7 +43,7 @@
27548 extern struct i2c_adapter amd756_smbus;
27549
27550 static struct i2c_adapter *s4882_adapter;
27551-static struct i2c_algorithm *s4882_algo;
27552+static i2c_algorithm_no_const *s4882_algo;
27553
27554 /* Wrapper access functions for multiplexed SMBus */
27555 static DEFINE_MUTEX(amd756_lock);
27556diff -urNp linux-3.1.1/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.1.1/drivers/i2c/busses/i2c-nforce2-s4985.c
27557--- linux-3.1.1/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-11-11 15:19:27.000000000 -0500
27558+++ linux-3.1.1/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-11-16 18:39:07.000000000 -0500
27559@@ -41,7 +41,7 @@
27560 extern struct i2c_adapter *nforce2_smbus;
27561
27562 static struct i2c_adapter *s4985_adapter;
27563-static struct i2c_algorithm *s4985_algo;
27564+static i2c_algorithm_no_const *s4985_algo;
27565
27566 /* Wrapper access functions for multiplexed SMBus */
27567 static DEFINE_MUTEX(nforce2_lock);
27568diff -urNp linux-3.1.1/drivers/i2c/i2c-mux.c linux-3.1.1/drivers/i2c/i2c-mux.c
27569--- linux-3.1.1/drivers/i2c/i2c-mux.c 2011-11-11 15:19:27.000000000 -0500
27570+++ linux-3.1.1/drivers/i2c/i2c-mux.c 2011-11-16 18:39:07.000000000 -0500
27571@@ -28,7 +28,7 @@
27572 /* multiplexer per channel data */
27573 struct i2c_mux_priv {
27574 struct i2c_adapter adap;
27575- struct i2c_algorithm algo;
27576+ i2c_algorithm_no_const algo;
27577
27578 struct i2c_adapter *parent;
27579 void *mux_dev; /* the mux chip/device */
27580diff -urNp linux-3.1.1/drivers/ide/aec62xx.c linux-3.1.1/drivers/ide/aec62xx.c
27581--- linux-3.1.1/drivers/ide/aec62xx.c 2011-11-11 15:19:27.000000000 -0500
27582+++ linux-3.1.1/drivers/ide/aec62xx.c 2011-11-16 18:39:07.000000000 -0500
27583@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_
27584 .cable_detect = atp86x_cable_detect,
27585 };
27586
27587-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
27588+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
27589 { /* 0: AEC6210 */
27590 .name = DRV_NAME,
27591 .init_chipset = init_chipset_aec62xx,
27592diff -urNp linux-3.1.1/drivers/ide/alim15x3.c linux-3.1.1/drivers/ide/alim15x3.c
27593--- linux-3.1.1/drivers/ide/alim15x3.c 2011-11-11 15:19:27.000000000 -0500
27594+++ linux-3.1.1/drivers/ide/alim15x3.c 2011-11-16 18:39:07.000000000 -0500
27595@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_
27596 .dma_sff_read_status = ide_dma_sff_read_status,
27597 };
27598
27599-static const struct ide_port_info ali15x3_chipset __devinitdata = {
27600+static const struct ide_port_info ali15x3_chipset __devinitconst = {
27601 .name = DRV_NAME,
27602 .init_chipset = init_chipset_ali15x3,
27603 .init_hwif = init_hwif_ali15x3,
27604diff -urNp linux-3.1.1/drivers/ide/amd74xx.c linux-3.1.1/drivers/ide/amd74xx.c
27605--- linux-3.1.1/drivers/ide/amd74xx.c 2011-11-11 15:19:27.000000000 -0500
27606+++ linux-3.1.1/drivers/ide/amd74xx.c 2011-11-16 18:39:07.000000000 -0500
27607@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_por
27608 .udma_mask = udma, \
27609 }
27610
27611-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
27612+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
27613 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
27614 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
27615 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
27616diff -urNp linux-3.1.1/drivers/ide/atiixp.c linux-3.1.1/drivers/ide/atiixp.c
27617--- linux-3.1.1/drivers/ide/atiixp.c 2011-11-11 15:19:27.000000000 -0500
27618+++ linux-3.1.1/drivers/ide/atiixp.c 2011-11-16 18:39:07.000000000 -0500
27619@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_
27620 .cable_detect = atiixp_cable_detect,
27621 };
27622
27623-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
27624+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
27625 { /* 0: IXP200/300/400/700 */
27626 .name = DRV_NAME,
27627 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
27628diff -urNp linux-3.1.1/drivers/ide/cmd64x.c linux-3.1.1/drivers/ide/cmd64x.c
27629--- linux-3.1.1/drivers/ide/cmd64x.c 2011-11-11 15:19:27.000000000 -0500
27630+++ linux-3.1.1/drivers/ide/cmd64x.c 2011-11-16 18:39:07.000000000 -0500
27631@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_r
27632 .dma_sff_read_status = ide_dma_sff_read_status,
27633 };
27634
27635-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
27636+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
27637 { /* 0: CMD643 */
27638 .name = DRV_NAME,
27639 .init_chipset = init_chipset_cmd64x,
27640diff -urNp linux-3.1.1/drivers/ide/cs5520.c linux-3.1.1/drivers/ide/cs5520.c
27641--- linux-3.1.1/drivers/ide/cs5520.c 2011-11-11 15:19:27.000000000 -0500
27642+++ linux-3.1.1/drivers/ide/cs5520.c 2011-11-16 18:39:07.000000000 -0500
27643@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_
27644 .set_dma_mode = cs5520_set_dma_mode,
27645 };
27646
27647-static const struct ide_port_info cyrix_chipset __devinitdata = {
27648+static const struct ide_port_info cyrix_chipset __devinitconst = {
27649 .name = DRV_NAME,
27650 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
27651 .port_ops = &cs5520_port_ops,
27652diff -urNp linux-3.1.1/drivers/ide/cs5530.c linux-3.1.1/drivers/ide/cs5530.c
27653--- linux-3.1.1/drivers/ide/cs5530.c 2011-11-11 15:19:27.000000000 -0500
27654+++ linux-3.1.1/drivers/ide/cs5530.c 2011-11-16 18:39:07.000000000 -0500
27655@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_
27656 .udma_filter = cs5530_udma_filter,
27657 };
27658
27659-static const struct ide_port_info cs5530_chipset __devinitdata = {
27660+static const struct ide_port_info cs5530_chipset __devinitconst = {
27661 .name = DRV_NAME,
27662 .init_chipset = init_chipset_cs5530,
27663 .init_hwif = init_hwif_cs5530,
27664diff -urNp linux-3.1.1/drivers/ide/cs5535.c linux-3.1.1/drivers/ide/cs5535.c
27665--- linux-3.1.1/drivers/ide/cs5535.c 2011-11-11 15:19:27.000000000 -0500
27666+++ linux-3.1.1/drivers/ide/cs5535.c 2011-11-16 18:39:07.000000000 -0500
27667@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_
27668 .cable_detect = cs5535_cable_detect,
27669 };
27670
27671-static const struct ide_port_info cs5535_chipset __devinitdata = {
27672+static const struct ide_port_info cs5535_chipset __devinitconst = {
27673 .name = DRV_NAME,
27674 .port_ops = &cs5535_port_ops,
27675 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
27676diff -urNp linux-3.1.1/drivers/ide/cy82c693.c linux-3.1.1/drivers/ide/cy82c693.c
27677--- linux-3.1.1/drivers/ide/cy82c693.c 2011-11-11 15:19:27.000000000 -0500
27678+++ linux-3.1.1/drivers/ide/cy82c693.c 2011-11-16 18:39:07.000000000 -0500
27679@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c69
27680 .set_dma_mode = cy82c693_set_dma_mode,
27681 };
27682
27683-static const struct ide_port_info cy82c693_chipset __devinitdata = {
27684+static const struct ide_port_info cy82c693_chipset __devinitconst = {
27685 .name = DRV_NAME,
27686 .init_iops = init_iops_cy82c693,
27687 .port_ops = &cy82c693_port_ops,
27688diff -urNp linux-3.1.1/drivers/ide/hpt366.c linux-3.1.1/drivers/ide/hpt366.c
27689--- linux-3.1.1/drivers/ide/hpt366.c 2011-11-11 15:19:27.000000000 -0500
27690+++ linux-3.1.1/drivers/ide/hpt366.c 2011-11-16 18:39:07.000000000 -0500
27691@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings
27692 }
27693 };
27694
27695-static const struct hpt_info hpt36x __devinitdata = {
27696+static const struct hpt_info hpt36x __devinitconst = {
27697 .chip_name = "HPT36x",
27698 .chip_type = HPT36x,
27699 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
27700@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __de
27701 .timings = &hpt36x_timings
27702 };
27703
27704-static const struct hpt_info hpt370 __devinitdata = {
27705+static const struct hpt_info hpt370 __devinitconst = {
27706 .chip_name = "HPT370",
27707 .chip_type = HPT370,
27708 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
27709@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __de
27710 .timings = &hpt37x_timings
27711 };
27712
27713-static const struct hpt_info hpt370a __devinitdata = {
27714+static const struct hpt_info hpt370a __devinitconst = {
27715 .chip_name = "HPT370A",
27716 .chip_type = HPT370A,
27717 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
27718@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __d
27719 .timings = &hpt37x_timings
27720 };
27721
27722-static const struct hpt_info hpt374 __devinitdata = {
27723+static const struct hpt_info hpt374 __devinitconst = {
27724 .chip_name = "HPT374",
27725 .chip_type = HPT374,
27726 .udma_mask = ATA_UDMA5,
27727@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __de
27728 .timings = &hpt37x_timings
27729 };
27730
27731-static const struct hpt_info hpt372 __devinitdata = {
27732+static const struct hpt_info hpt372 __devinitconst = {
27733 .chip_name = "HPT372",
27734 .chip_type = HPT372,
27735 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27736@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __de
27737 .timings = &hpt37x_timings
27738 };
27739
27740-static const struct hpt_info hpt372a __devinitdata = {
27741+static const struct hpt_info hpt372a __devinitconst = {
27742 .chip_name = "HPT372A",
27743 .chip_type = HPT372A,
27744 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27745@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __d
27746 .timings = &hpt37x_timings
27747 };
27748
27749-static const struct hpt_info hpt302 __devinitdata = {
27750+static const struct hpt_info hpt302 __devinitconst = {
27751 .chip_name = "HPT302",
27752 .chip_type = HPT302,
27753 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27754@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __de
27755 .timings = &hpt37x_timings
27756 };
27757
27758-static const struct hpt_info hpt371 __devinitdata = {
27759+static const struct hpt_info hpt371 __devinitconst = {
27760 .chip_name = "HPT371",
27761 .chip_type = HPT371,
27762 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27763@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __de
27764 .timings = &hpt37x_timings
27765 };
27766
27767-static const struct hpt_info hpt372n __devinitdata = {
27768+static const struct hpt_info hpt372n __devinitconst = {
27769 .chip_name = "HPT372N",
27770 .chip_type = HPT372N,
27771 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27772@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __d
27773 .timings = &hpt37x_timings
27774 };
27775
27776-static const struct hpt_info hpt302n __devinitdata = {
27777+static const struct hpt_info hpt302n __devinitconst = {
27778 .chip_name = "HPT302N",
27779 .chip_type = HPT302N,
27780 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27781@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __d
27782 .timings = &hpt37x_timings
27783 };
27784
27785-static const struct hpt_info hpt371n __devinitdata = {
27786+static const struct hpt_info hpt371n __devinitconst = {
27787 .chip_name = "HPT371N",
27788 .chip_type = HPT371N,
27789 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27790@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_d
27791 .dma_sff_read_status = ide_dma_sff_read_status,
27792 };
27793
27794-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
27795+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
27796 { /* 0: HPT36x */
27797 .name = DRV_NAME,
27798 .init_chipset = init_chipset_hpt366,
27799diff -urNp linux-3.1.1/drivers/ide/ide-cd.c linux-3.1.1/drivers/ide/ide-cd.c
27800--- linux-3.1.1/drivers/ide/ide-cd.c 2011-11-11 15:19:27.000000000 -0500
27801+++ linux-3.1.1/drivers/ide/ide-cd.c 2011-11-16 18:39:07.000000000 -0500
27802@@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
27803 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
27804 if ((unsigned long)buf & alignment
27805 || blk_rq_bytes(rq) & q->dma_pad_mask
27806- || object_is_on_stack(buf))
27807+ || object_starts_on_stack(buf))
27808 drive->dma = 0;
27809 }
27810 }
27811diff -urNp linux-3.1.1/drivers/ide/ide-floppy.c linux-3.1.1/drivers/ide/ide-floppy.c
27812--- linux-3.1.1/drivers/ide/ide-floppy.c 2011-11-11 15:19:27.000000000 -0500
27813+++ linux-3.1.1/drivers/ide/ide-floppy.c 2011-11-16 18:40:10.000000000 -0500
27814@@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
27815 u8 pc_buf[256], header_len, desc_cnt;
27816 int i, rc = 1, blocks, length;
27817
27818+ pax_track_stack();
27819+
27820 ide_debug_log(IDE_DBG_FUNC, "enter");
27821
27822 drive->bios_cyl = 0;
27823diff -urNp linux-3.1.1/drivers/ide/ide-pci-generic.c linux-3.1.1/drivers/ide/ide-pci-generic.c
27824--- linux-3.1.1/drivers/ide/ide-pci-generic.c 2011-11-11 15:19:27.000000000 -0500
27825+++ linux-3.1.1/drivers/ide/ide-pci-generic.c 2011-11-16 18:39:07.000000000 -0500
27826@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell
27827 .udma_mask = ATA_UDMA6, \
27828 }
27829
27830-static const struct ide_port_info generic_chipsets[] __devinitdata = {
27831+static const struct ide_port_info generic_chipsets[] __devinitconst = {
27832 /* 0: Unknown */
27833 DECLARE_GENERIC_PCI_DEV(0),
27834
27835diff -urNp linux-3.1.1/drivers/ide/it8172.c linux-3.1.1/drivers/ide/it8172.c
27836--- linux-3.1.1/drivers/ide/it8172.c 2011-11-11 15:19:27.000000000 -0500
27837+++ linux-3.1.1/drivers/ide/it8172.c 2011-11-16 18:39:07.000000000 -0500
27838@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_
27839 .set_dma_mode = it8172_set_dma_mode,
27840 };
27841
27842-static const struct ide_port_info it8172_port_info __devinitdata = {
27843+static const struct ide_port_info it8172_port_info __devinitconst = {
27844 .name = DRV_NAME,
27845 .port_ops = &it8172_port_ops,
27846 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
27847diff -urNp linux-3.1.1/drivers/ide/it8213.c linux-3.1.1/drivers/ide/it8213.c
27848--- linux-3.1.1/drivers/ide/it8213.c 2011-11-11 15:19:27.000000000 -0500
27849+++ linux-3.1.1/drivers/ide/it8213.c 2011-11-16 18:39:07.000000000 -0500
27850@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_
27851 .cable_detect = it8213_cable_detect,
27852 };
27853
27854-static const struct ide_port_info it8213_chipset __devinitdata = {
27855+static const struct ide_port_info it8213_chipset __devinitconst = {
27856 .name = DRV_NAME,
27857 .enablebits = { {0x41, 0x80, 0x80} },
27858 .port_ops = &it8213_port_ops,
27859diff -urNp linux-3.1.1/drivers/ide/it821x.c linux-3.1.1/drivers/ide/it821x.c
27860--- linux-3.1.1/drivers/ide/it821x.c 2011-11-11 15:19:27.000000000 -0500
27861+++ linux-3.1.1/drivers/ide/it821x.c 2011-11-16 18:39:07.000000000 -0500
27862@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_
27863 .cable_detect = it821x_cable_detect,
27864 };
27865
27866-static const struct ide_port_info it821x_chipset __devinitdata = {
27867+static const struct ide_port_info it821x_chipset __devinitconst = {
27868 .name = DRV_NAME,
27869 .init_chipset = init_chipset_it821x,
27870 .init_hwif = init_hwif_it821x,
27871diff -urNp linux-3.1.1/drivers/ide/jmicron.c linux-3.1.1/drivers/ide/jmicron.c
27872--- linux-3.1.1/drivers/ide/jmicron.c 2011-11-11 15:19:27.000000000 -0500
27873+++ linux-3.1.1/drivers/ide/jmicron.c 2011-11-16 18:39:07.000000000 -0500
27874@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron
27875 .cable_detect = jmicron_cable_detect,
27876 };
27877
27878-static const struct ide_port_info jmicron_chipset __devinitdata = {
27879+static const struct ide_port_info jmicron_chipset __devinitconst = {
27880 .name = DRV_NAME,
27881 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
27882 .port_ops = &jmicron_port_ops,
27883diff -urNp linux-3.1.1/drivers/ide/ns87415.c linux-3.1.1/drivers/ide/ns87415.c
27884--- linux-3.1.1/drivers/ide/ns87415.c 2011-11-11 15:19:27.000000000 -0500
27885+++ linux-3.1.1/drivers/ide/ns87415.c 2011-11-16 18:39:07.000000000 -0500
27886@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_
27887 .dma_sff_read_status = superio_dma_sff_read_status,
27888 };
27889
27890-static const struct ide_port_info ns87415_chipset __devinitdata = {
27891+static const struct ide_port_info ns87415_chipset __devinitconst = {
27892 .name = DRV_NAME,
27893 .init_hwif = init_hwif_ns87415,
27894 .tp_ops = &ns87415_tp_ops,
27895diff -urNp linux-3.1.1/drivers/ide/opti621.c linux-3.1.1/drivers/ide/opti621.c
27896--- linux-3.1.1/drivers/ide/opti621.c 2011-11-11 15:19:27.000000000 -0500
27897+++ linux-3.1.1/drivers/ide/opti621.c 2011-11-16 18:39:07.000000000 -0500
27898@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621
27899 .set_pio_mode = opti621_set_pio_mode,
27900 };
27901
27902-static const struct ide_port_info opti621_chipset __devinitdata = {
27903+static const struct ide_port_info opti621_chipset __devinitconst = {
27904 .name = DRV_NAME,
27905 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
27906 .port_ops = &opti621_port_ops,
27907diff -urNp linux-3.1.1/drivers/ide/pdc202xx_new.c linux-3.1.1/drivers/ide/pdc202xx_new.c
27908--- linux-3.1.1/drivers/ide/pdc202xx_new.c 2011-11-11 15:19:27.000000000 -0500
27909+++ linux-3.1.1/drivers/ide/pdc202xx_new.c 2011-11-16 18:39:07.000000000 -0500
27910@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_
27911 .udma_mask = udma, \
27912 }
27913
27914-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
27915+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
27916 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
27917 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
27918 };
27919diff -urNp linux-3.1.1/drivers/ide/pdc202xx_old.c linux-3.1.1/drivers/ide/pdc202xx_old.c
27920--- linux-3.1.1/drivers/ide/pdc202xx_old.c 2011-11-11 15:19:27.000000000 -0500
27921+++ linux-3.1.1/drivers/ide/pdc202xx_old.c 2011-11-16 18:39:07.000000000 -0500
27922@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x
27923 .max_sectors = sectors, \
27924 }
27925
27926-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
27927+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
27928 { /* 0: PDC20246 */
27929 .name = DRV_NAME,
27930 .init_chipset = init_chipset_pdc202xx,
27931diff -urNp linux-3.1.1/drivers/ide/piix.c linux-3.1.1/drivers/ide/piix.c
27932--- linux-3.1.1/drivers/ide/piix.c 2011-11-11 15:19:27.000000000 -0500
27933+++ linux-3.1.1/drivers/ide/piix.c 2011-11-16 18:39:07.000000000 -0500
27934@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_por
27935 .udma_mask = udma, \
27936 }
27937
27938-static const struct ide_port_info piix_pci_info[] __devinitdata = {
27939+static const struct ide_port_info piix_pci_info[] __devinitconst = {
27940 /* 0: MPIIX */
27941 { /*
27942 * MPIIX actually has only a single IDE channel mapped to
27943diff -urNp linux-3.1.1/drivers/ide/rz1000.c linux-3.1.1/drivers/ide/rz1000.c
27944--- linux-3.1.1/drivers/ide/rz1000.c 2011-11-11 15:19:27.000000000 -0500
27945+++ linux-3.1.1/drivers/ide/rz1000.c 2011-11-16 18:39:07.000000000 -0500
27946@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_read
27947 }
27948 }
27949
27950-static const struct ide_port_info rz1000_chipset __devinitdata = {
27951+static const struct ide_port_info rz1000_chipset __devinitconst = {
27952 .name = DRV_NAME,
27953 .host_flags = IDE_HFLAG_NO_DMA,
27954 };
27955diff -urNp linux-3.1.1/drivers/ide/sc1200.c linux-3.1.1/drivers/ide/sc1200.c
27956--- linux-3.1.1/drivers/ide/sc1200.c 2011-11-11 15:19:27.000000000 -0500
27957+++ linux-3.1.1/drivers/ide/sc1200.c 2011-11-16 18:39:07.000000000 -0500
27958@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_d
27959 .dma_sff_read_status = ide_dma_sff_read_status,
27960 };
27961
27962-static const struct ide_port_info sc1200_chipset __devinitdata = {
27963+static const struct ide_port_info sc1200_chipset __devinitconst = {
27964 .name = DRV_NAME,
27965 .port_ops = &sc1200_port_ops,
27966 .dma_ops = &sc1200_dma_ops,
27967diff -urNp linux-3.1.1/drivers/ide/scc_pata.c linux-3.1.1/drivers/ide/scc_pata.c
27968--- linux-3.1.1/drivers/ide/scc_pata.c 2011-11-11 15:19:27.000000000 -0500
27969+++ linux-3.1.1/drivers/ide/scc_pata.c 2011-11-16 18:39:07.000000000 -0500
27970@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_
27971 .dma_sff_read_status = scc_dma_sff_read_status,
27972 };
27973
27974-static const struct ide_port_info scc_chipset __devinitdata = {
27975+static const struct ide_port_info scc_chipset __devinitconst = {
27976 .name = "sccIDE",
27977 .init_iops = init_iops_scc,
27978 .init_dma = scc_init_dma,
27979diff -urNp linux-3.1.1/drivers/ide/serverworks.c linux-3.1.1/drivers/ide/serverworks.c
27980--- linux-3.1.1/drivers/ide/serverworks.c 2011-11-11 15:19:27.000000000 -0500
27981+++ linux-3.1.1/drivers/ide/serverworks.c 2011-11-16 18:39:07.000000000 -0500
27982@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_p
27983 .cable_detect = svwks_cable_detect,
27984 };
27985
27986-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
27987+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
27988 { /* 0: OSB4 */
27989 .name = DRV_NAME,
27990 .init_chipset = init_chipset_svwks,
27991diff -urNp linux-3.1.1/drivers/ide/setup-pci.c linux-3.1.1/drivers/ide/setup-pci.c
27992--- linux-3.1.1/drivers/ide/setup-pci.c 2011-11-11 15:19:27.000000000 -0500
27993+++ linux-3.1.1/drivers/ide/setup-pci.c 2011-11-16 18:40:10.000000000 -0500
27994@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
27995 int ret, i, n_ports = dev2 ? 4 : 2;
27996 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
27997
27998+ pax_track_stack();
27999+
28000 for (i = 0; i < n_ports / 2; i++) {
28001 ret = ide_setup_pci_controller(pdev[i], d, !i);
28002 if (ret < 0)
28003diff -urNp linux-3.1.1/drivers/ide/siimage.c linux-3.1.1/drivers/ide/siimage.c
28004--- linux-3.1.1/drivers/ide/siimage.c 2011-11-11 15:19:27.000000000 -0500
28005+++ linux-3.1.1/drivers/ide/siimage.c 2011-11-16 18:39:07.000000000 -0500
28006@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_
28007 .udma_mask = ATA_UDMA6, \
28008 }
28009
28010-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
28011+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
28012 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
28013 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
28014 };
28015diff -urNp linux-3.1.1/drivers/ide/sis5513.c linux-3.1.1/drivers/ide/sis5513.c
28016--- linux-3.1.1/drivers/ide/sis5513.c 2011-11-11 15:19:27.000000000 -0500
28017+++ linux-3.1.1/drivers/ide/sis5513.c 2011-11-16 18:39:07.000000000 -0500
28018@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata
28019 .cable_detect = sis_cable_detect,
28020 };
28021
28022-static const struct ide_port_info sis5513_chipset __devinitdata = {
28023+static const struct ide_port_info sis5513_chipset __devinitconst = {
28024 .name = DRV_NAME,
28025 .init_chipset = init_chipset_sis5513,
28026 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
28027diff -urNp linux-3.1.1/drivers/ide/sl82c105.c linux-3.1.1/drivers/ide/sl82c105.c
28028--- linux-3.1.1/drivers/ide/sl82c105.c 2011-11-11 15:19:27.000000000 -0500
28029+++ linux-3.1.1/drivers/ide/sl82c105.c 2011-11-16 18:39:07.000000000 -0500
28030@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105
28031 .dma_sff_read_status = ide_dma_sff_read_status,
28032 };
28033
28034-static const struct ide_port_info sl82c105_chipset __devinitdata = {
28035+static const struct ide_port_info sl82c105_chipset __devinitconst = {
28036 .name = DRV_NAME,
28037 .init_chipset = init_chipset_sl82c105,
28038 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
28039diff -urNp linux-3.1.1/drivers/ide/slc90e66.c linux-3.1.1/drivers/ide/slc90e66.c
28040--- linux-3.1.1/drivers/ide/slc90e66.c 2011-11-11 15:19:27.000000000 -0500
28041+++ linux-3.1.1/drivers/ide/slc90e66.c 2011-11-16 18:39:07.000000000 -0500
28042@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e6
28043 .cable_detect = slc90e66_cable_detect,
28044 };
28045
28046-static const struct ide_port_info slc90e66_chipset __devinitdata = {
28047+static const struct ide_port_info slc90e66_chipset __devinitconst = {
28048 .name = DRV_NAME,
28049 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
28050 .port_ops = &slc90e66_port_ops,
28051diff -urNp linux-3.1.1/drivers/ide/tc86c001.c linux-3.1.1/drivers/ide/tc86c001.c
28052--- linux-3.1.1/drivers/ide/tc86c001.c 2011-11-11 15:19:27.000000000 -0500
28053+++ linux-3.1.1/drivers/ide/tc86c001.c 2011-11-16 18:39:07.000000000 -0500
28054@@ -191,7 +191,7 @@ static const struct ide_dma_ops tc86c001
28055 .dma_sff_read_status = ide_dma_sff_read_status,
28056 };
28057
28058-static const struct ide_port_info tc86c001_chipset __devinitdata = {
28059+static const struct ide_port_info tc86c001_chipset __devinitconst = {
28060 .name = DRV_NAME,
28061 .init_hwif = init_hwif_tc86c001,
28062 .port_ops = &tc86c001_port_ops,
28063diff -urNp linux-3.1.1/drivers/ide/triflex.c linux-3.1.1/drivers/ide/triflex.c
28064--- linux-3.1.1/drivers/ide/triflex.c 2011-11-11 15:19:27.000000000 -0500
28065+++ linux-3.1.1/drivers/ide/triflex.c 2011-11-16 18:39:07.000000000 -0500
28066@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex
28067 .set_dma_mode = triflex_set_mode,
28068 };
28069
28070-static const struct ide_port_info triflex_device __devinitdata = {
28071+static const struct ide_port_info triflex_device __devinitconst = {
28072 .name = DRV_NAME,
28073 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
28074 .port_ops = &triflex_port_ops,
28075diff -urNp linux-3.1.1/drivers/ide/trm290.c linux-3.1.1/drivers/ide/trm290.c
28076--- linux-3.1.1/drivers/ide/trm290.c 2011-11-11 15:19:27.000000000 -0500
28077+++ linux-3.1.1/drivers/ide/trm290.c 2011-11-16 18:39:07.000000000 -0500
28078@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops
28079 .dma_check = trm290_dma_check,
28080 };
28081
28082-static const struct ide_port_info trm290_chipset __devinitdata = {
28083+static const struct ide_port_info trm290_chipset __devinitconst = {
28084 .name = DRV_NAME,
28085 .init_hwif = init_hwif_trm290,
28086 .tp_ops = &trm290_tp_ops,
28087diff -urNp linux-3.1.1/drivers/ide/via82cxxx.c linux-3.1.1/drivers/ide/via82cxxx.c
28088--- linux-3.1.1/drivers/ide/via82cxxx.c 2011-11-11 15:19:27.000000000 -0500
28089+++ linux-3.1.1/drivers/ide/via82cxxx.c 2011-11-16 18:39:07.000000000 -0500
28090@@ -403,7 +403,7 @@ static const struct ide_port_ops via_por
28091 .cable_detect = via82cxxx_cable_detect,
28092 };
28093
28094-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
28095+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
28096 .name = DRV_NAME,
28097 .init_chipset = init_chipset_via82cxxx,
28098 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
28099diff -urNp linux-3.1.1/drivers/infiniband/core/cm.c linux-3.1.1/drivers/infiniband/core/cm.c
28100--- linux-3.1.1/drivers/infiniband/core/cm.c 2011-11-11 15:19:27.000000000 -0500
28101+++ linux-3.1.1/drivers/infiniband/core/cm.c 2011-11-16 18:39:07.000000000 -0500
28102@@ -113,7 +113,7 @@ static char const counter_group_names[CM
28103
28104 struct cm_counter_group {
28105 struct kobject obj;
28106- atomic_long_t counter[CM_ATTR_COUNT];
28107+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
28108 };
28109
28110 struct cm_counter_attribute {
28111@@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
28112 struct ib_mad_send_buf *msg = NULL;
28113 int ret;
28114
28115- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28116+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28117 counter[CM_REQ_COUNTER]);
28118
28119 /* Quick state check to discard duplicate REQs. */
28120@@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
28121 if (!cm_id_priv)
28122 return;
28123
28124- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28125+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28126 counter[CM_REP_COUNTER]);
28127 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
28128 if (ret)
28129@@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
28130 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
28131 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
28132 spin_unlock_irq(&cm_id_priv->lock);
28133- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28134+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28135 counter[CM_RTU_COUNTER]);
28136 goto out;
28137 }
28138@@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
28139 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
28140 dreq_msg->local_comm_id);
28141 if (!cm_id_priv) {
28142- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28143+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28144 counter[CM_DREQ_COUNTER]);
28145 cm_issue_drep(work->port, work->mad_recv_wc);
28146 return -EINVAL;
28147@@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
28148 case IB_CM_MRA_REP_RCVD:
28149 break;
28150 case IB_CM_TIMEWAIT:
28151- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28152+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28153 counter[CM_DREQ_COUNTER]);
28154 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
28155 goto unlock;
28156@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
28157 cm_free_msg(msg);
28158 goto deref;
28159 case IB_CM_DREQ_RCVD:
28160- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28161+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28162 counter[CM_DREQ_COUNTER]);
28163 goto unlock;
28164 default:
28165@@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
28166 ib_modify_mad(cm_id_priv->av.port->mad_agent,
28167 cm_id_priv->msg, timeout)) {
28168 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
28169- atomic_long_inc(&work->port->
28170+ atomic_long_inc_unchecked(&work->port->
28171 counter_group[CM_RECV_DUPLICATES].
28172 counter[CM_MRA_COUNTER]);
28173 goto out;
28174@@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
28175 break;
28176 case IB_CM_MRA_REQ_RCVD:
28177 case IB_CM_MRA_REP_RCVD:
28178- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28179+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28180 counter[CM_MRA_COUNTER]);
28181 /* fall through */
28182 default:
28183@@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
28184 case IB_CM_LAP_IDLE:
28185 break;
28186 case IB_CM_MRA_LAP_SENT:
28187- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28188+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28189 counter[CM_LAP_COUNTER]);
28190 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
28191 goto unlock;
28192@@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
28193 cm_free_msg(msg);
28194 goto deref;
28195 case IB_CM_LAP_RCVD:
28196- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28197+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28198 counter[CM_LAP_COUNTER]);
28199 goto unlock;
28200 default:
28201@@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
28202 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
28203 if (cur_cm_id_priv) {
28204 spin_unlock_irq(&cm.lock);
28205- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28206+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28207 counter[CM_SIDR_REQ_COUNTER]);
28208 goto out; /* Duplicate message. */
28209 }
28210@@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
28211 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
28212 msg->retries = 1;
28213
28214- atomic_long_add(1 + msg->retries,
28215+ atomic_long_add_unchecked(1 + msg->retries,
28216 &port->counter_group[CM_XMIT].counter[attr_index]);
28217 if (msg->retries)
28218- atomic_long_add(msg->retries,
28219+ atomic_long_add_unchecked(msg->retries,
28220 &port->counter_group[CM_XMIT_RETRIES].
28221 counter[attr_index]);
28222
28223@@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
28224 }
28225
28226 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
28227- atomic_long_inc(&port->counter_group[CM_RECV].
28228+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
28229 counter[attr_id - CM_ATTR_ID_OFFSET]);
28230
28231 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
28232@@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
28233 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
28234
28235 return sprintf(buf, "%ld\n",
28236- atomic_long_read(&group->counter[cm_attr->index]));
28237+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
28238 }
28239
28240 static const struct sysfs_ops cm_counter_ops = {
28241diff -urNp linux-3.1.1/drivers/infiniband/core/fmr_pool.c linux-3.1.1/drivers/infiniband/core/fmr_pool.c
28242--- linux-3.1.1/drivers/infiniband/core/fmr_pool.c 2011-11-11 15:19:27.000000000 -0500
28243+++ linux-3.1.1/drivers/infiniband/core/fmr_pool.c 2011-11-16 18:39:07.000000000 -0500
28244@@ -97,8 +97,8 @@ struct ib_fmr_pool {
28245
28246 struct task_struct *thread;
28247
28248- atomic_t req_ser;
28249- atomic_t flush_ser;
28250+ atomic_unchecked_t req_ser;
28251+ atomic_unchecked_t flush_ser;
28252
28253 wait_queue_head_t force_wait;
28254 };
28255@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
28256 struct ib_fmr_pool *pool = pool_ptr;
28257
28258 do {
28259- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
28260+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
28261 ib_fmr_batch_release(pool);
28262
28263- atomic_inc(&pool->flush_ser);
28264+ atomic_inc_unchecked(&pool->flush_ser);
28265 wake_up_interruptible(&pool->force_wait);
28266
28267 if (pool->flush_function)
28268@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
28269 }
28270
28271 set_current_state(TASK_INTERRUPTIBLE);
28272- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
28273+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
28274 !kthread_should_stop())
28275 schedule();
28276 __set_current_state(TASK_RUNNING);
28277@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
28278 pool->dirty_watermark = params->dirty_watermark;
28279 pool->dirty_len = 0;
28280 spin_lock_init(&pool->pool_lock);
28281- atomic_set(&pool->req_ser, 0);
28282- atomic_set(&pool->flush_ser, 0);
28283+ atomic_set_unchecked(&pool->req_ser, 0);
28284+ atomic_set_unchecked(&pool->flush_ser, 0);
28285 init_waitqueue_head(&pool->force_wait);
28286
28287 pool->thread = kthread_run(ib_fmr_cleanup_thread,
28288@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
28289 }
28290 spin_unlock_irq(&pool->pool_lock);
28291
28292- serial = atomic_inc_return(&pool->req_ser);
28293+ serial = atomic_inc_return_unchecked(&pool->req_ser);
28294 wake_up_process(pool->thread);
28295
28296 if (wait_event_interruptible(pool->force_wait,
28297- atomic_read(&pool->flush_ser) - serial >= 0))
28298+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
28299 return -EINTR;
28300
28301 return 0;
28302@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
28303 } else {
28304 list_add_tail(&fmr->list, &pool->dirty_list);
28305 if (++pool->dirty_len >= pool->dirty_watermark) {
28306- atomic_inc(&pool->req_ser);
28307+ atomic_inc_unchecked(&pool->req_ser);
28308 wake_up_process(pool->thread);
28309 }
28310 }
28311diff -urNp linux-3.1.1/drivers/infiniband/hw/cxgb4/mem.c linux-3.1.1/drivers/infiniband/hw/cxgb4/mem.c
28312--- linux-3.1.1/drivers/infiniband/hw/cxgb4/mem.c 2011-11-11 15:19:27.000000000 -0500
28313+++ linux-3.1.1/drivers/infiniband/hw/cxgb4/mem.c 2011-11-16 18:39:07.000000000 -0500
28314@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
28315 int err;
28316 struct fw_ri_tpte tpt;
28317 u32 stag_idx;
28318- static atomic_t key;
28319+ static atomic_unchecked_t key;
28320
28321 if (c4iw_fatal_error(rdev))
28322 return -EIO;
28323@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
28324 &rdev->resource.tpt_fifo_lock);
28325 if (!stag_idx)
28326 return -ENOMEM;
28327- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
28328+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
28329 }
28330 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
28331 __func__, stag_state, type, pdid, stag_idx);
28332diff -urNp linux-3.1.1/drivers/infiniband/hw/ipath/ipath_fs.c linux-3.1.1/drivers/infiniband/hw/ipath/ipath_fs.c
28333--- linux-3.1.1/drivers/infiniband/hw/ipath/ipath_fs.c 2011-11-11 15:19:27.000000000 -0500
28334+++ linux-3.1.1/drivers/infiniband/hw/ipath/ipath_fs.c 2011-11-16 18:40:10.000000000 -0500
28335@@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
28336 struct infinipath_counters counters;
28337 struct ipath_devdata *dd;
28338
28339+ pax_track_stack();
28340+
28341 dd = file->f_path.dentry->d_inode->i_private;
28342 dd->ipath_f_read_counters(dd, &counters);
28343
28344diff -urNp linux-3.1.1/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.1.1/drivers/infiniband/hw/ipath/ipath_rc.c
28345--- linux-3.1.1/drivers/infiniband/hw/ipath/ipath_rc.c 2011-11-11 15:19:27.000000000 -0500
28346+++ linux-3.1.1/drivers/infiniband/hw/ipath/ipath_rc.c 2011-11-16 18:39:07.000000000 -0500
28347@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
28348 struct ib_atomic_eth *ateth;
28349 struct ipath_ack_entry *e;
28350 u64 vaddr;
28351- atomic64_t *maddr;
28352+ atomic64_unchecked_t *maddr;
28353 u64 sdata;
28354 u32 rkey;
28355 u8 next;
28356@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
28357 IB_ACCESS_REMOTE_ATOMIC)))
28358 goto nack_acc_unlck;
28359 /* Perform atomic OP and save result. */
28360- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
28361+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
28362 sdata = be64_to_cpu(ateth->swap_data);
28363 e = &qp->s_ack_queue[qp->r_head_ack_queue];
28364 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
28365- (u64) atomic64_add_return(sdata, maddr) - sdata :
28366+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
28367 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
28368 be64_to_cpu(ateth->compare_data),
28369 sdata);
28370diff -urNp linux-3.1.1/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.1.1/drivers/infiniband/hw/ipath/ipath_ruc.c
28371--- linux-3.1.1/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-11-11 15:19:27.000000000 -0500
28372+++ linux-3.1.1/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-11-16 18:39:07.000000000 -0500
28373@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
28374 unsigned long flags;
28375 struct ib_wc wc;
28376 u64 sdata;
28377- atomic64_t *maddr;
28378+ atomic64_unchecked_t *maddr;
28379 enum ib_wc_status send_status;
28380
28381 /*
28382@@ -382,11 +382,11 @@ again:
28383 IB_ACCESS_REMOTE_ATOMIC)))
28384 goto acc_err;
28385 /* Perform atomic OP and save result. */
28386- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
28387+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
28388 sdata = wqe->wr.wr.atomic.compare_add;
28389 *(u64 *) sqp->s_sge.sge.vaddr =
28390 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
28391- (u64) atomic64_add_return(sdata, maddr) - sdata :
28392+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
28393 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
28394 sdata, wqe->wr.wr.atomic.swap);
28395 goto send_comp;
28396diff -urNp linux-3.1.1/drivers/infiniband/hw/nes/nes.c linux-3.1.1/drivers/infiniband/hw/nes/nes.c
28397--- linux-3.1.1/drivers/infiniband/hw/nes/nes.c 2011-11-11 15:19:27.000000000 -0500
28398+++ linux-3.1.1/drivers/infiniband/hw/nes/nes.c 2011-11-16 18:39:07.000000000 -0500
28399@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
28400 LIST_HEAD(nes_adapter_list);
28401 static LIST_HEAD(nes_dev_list);
28402
28403-atomic_t qps_destroyed;
28404+atomic_unchecked_t qps_destroyed;
28405
28406 static unsigned int ee_flsh_adapter;
28407 static unsigned int sysfs_nonidx_addr;
28408@@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
28409 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
28410 struct nes_adapter *nesadapter = nesdev->nesadapter;
28411
28412- atomic_inc(&qps_destroyed);
28413+ atomic_inc_unchecked(&qps_destroyed);
28414
28415 /* Free the control structures */
28416
28417diff -urNp linux-3.1.1/drivers/infiniband/hw/nes/nes_cm.c linux-3.1.1/drivers/infiniband/hw/nes/nes_cm.c
28418--- linux-3.1.1/drivers/infiniband/hw/nes/nes_cm.c 2011-11-11 15:19:27.000000000 -0500
28419+++ linux-3.1.1/drivers/infiniband/hw/nes/nes_cm.c 2011-11-16 18:39:07.000000000 -0500
28420@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
28421 u32 cm_packets_retrans;
28422 u32 cm_packets_created;
28423 u32 cm_packets_received;
28424-atomic_t cm_listens_created;
28425-atomic_t cm_listens_destroyed;
28426+atomic_unchecked_t cm_listens_created;
28427+atomic_unchecked_t cm_listens_destroyed;
28428 u32 cm_backlog_drops;
28429-atomic_t cm_loopbacks;
28430-atomic_t cm_nodes_created;
28431-atomic_t cm_nodes_destroyed;
28432-atomic_t cm_accel_dropped_pkts;
28433-atomic_t cm_resets_recvd;
28434+atomic_unchecked_t cm_loopbacks;
28435+atomic_unchecked_t cm_nodes_created;
28436+atomic_unchecked_t cm_nodes_destroyed;
28437+atomic_unchecked_t cm_accel_dropped_pkts;
28438+atomic_unchecked_t cm_resets_recvd;
28439
28440 static inline int mini_cm_accelerated(struct nes_cm_core *,
28441 struct nes_cm_node *);
28442@@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
28443
28444 static struct nes_cm_core *g_cm_core;
28445
28446-atomic_t cm_connects;
28447-atomic_t cm_accepts;
28448-atomic_t cm_disconnects;
28449-atomic_t cm_closes;
28450-atomic_t cm_connecteds;
28451-atomic_t cm_connect_reqs;
28452-atomic_t cm_rejects;
28453+atomic_unchecked_t cm_connects;
28454+atomic_unchecked_t cm_accepts;
28455+atomic_unchecked_t cm_disconnects;
28456+atomic_unchecked_t cm_closes;
28457+atomic_unchecked_t cm_connecteds;
28458+atomic_unchecked_t cm_connect_reqs;
28459+atomic_unchecked_t cm_rejects;
28460
28461
28462 /**
28463@@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
28464 kfree(listener);
28465 listener = NULL;
28466 ret = 0;
28467- atomic_inc(&cm_listens_destroyed);
28468+ atomic_inc_unchecked(&cm_listens_destroyed);
28469 } else {
28470 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
28471 }
28472@@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
28473 cm_node->rem_mac);
28474
28475 add_hte_node(cm_core, cm_node);
28476- atomic_inc(&cm_nodes_created);
28477+ atomic_inc_unchecked(&cm_nodes_created);
28478
28479 return cm_node;
28480 }
28481@@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
28482 }
28483
28484 atomic_dec(&cm_core->node_cnt);
28485- atomic_inc(&cm_nodes_destroyed);
28486+ atomic_inc_unchecked(&cm_nodes_destroyed);
28487 nesqp = cm_node->nesqp;
28488 if (nesqp) {
28489 nesqp->cm_node = NULL;
28490@@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
28491
28492 static void drop_packet(struct sk_buff *skb)
28493 {
28494- atomic_inc(&cm_accel_dropped_pkts);
28495+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
28496 dev_kfree_skb_any(skb);
28497 }
28498
28499@@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
28500 {
28501
28502 int reset = 0; /* whether to send reset in case of err.. */
28503- atomic_inc(&cm_resets_recvd);
28504+ atomic_inc_unchecked(&cm_resets_recvd);
28505 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
28506 " refcnt=%d\n", cm_node, cm_node->state,
28507 atomic_read(&cm_node->ref_count));
28508@@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
28509 rem_ref_cm_node(cm_node->cm_core, cm_node);
28510 return NULL;
28511 }
28512- atomic_inc(&cm_loopbacks);
28513+ atomic_inc_unchecked(&cm_loopbacks);
28514 loopbackremotenode->loopbackpartner = cm_node;
28515 loopbackremotenode->tcp_cntxt.rcv_wscale =
28516 NES_CM_DEFAULT_RCV_WND_SCALE;
28517@@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
28518 add_ref_cm_node(cm_node);
28519 } else if (cm_node->state == NES_CM_STATE_TSA) {
28520 rem_ref_cm_node(cm_core, cm_node);
28521- atomic_inc(&cm_accel_dropped_pkts);
28522+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
28523 dev_kfree_skb_any(skb);
28524 break;
28525 }
28526@@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
28527
28528 if ((cm_id) && (cm_id->event_handler)) {
28529 if (issue_disconn) {
28530- atomic_inc(&cm_disconnects);
28531+ atomic_inc_unchecked(&cm_disconnects);
28532 cm_event.event = IW_CM_EVENT_DISCONNECT;
28533 cm_event.status = disconn_status;
28534 cm_event.local_addr = cm_id->local_addr;
28535@@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
28536 }
28537
28538 if (issue_close) {
28539- atomic_inc(&cm_closes);
28540+ atomic_inc_unchecked(&cm_closes);
28541 nes_disconnect(nesqp, 1);
28542
28543 cm_id->provider_data = nesqp;
28544@@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
28545
28546 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
28547 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
28548- atomic_inc(&cm_accepts);
28549+ atomic_inc_unchecked(&cm_accepts);
28550
28551 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
28552 netdev_refcnt_read(nesvnic->netdev));
28553@@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
28554
28555 struct nes_cm_core *cm_core;
28556
28557- atomic_inc(&cm_rejects);
28558+ atomic_inc_unchecked(&cm_rejects);
28559 cm_node = (struct nes_cm_node *) cm_id->provider_data;
28560 loopback = cm_node->loopbackpartner;
28561 cm_core = cm_node->cm_core;
28562@@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
28563 ntohl(cm_id->local_addr.sin_addr.s_addr),
28564 ntohs(cm_id->local_addr.sin_port));
28565
28566- atomic_inc(&cm_connects);
28567+ atomic_inc_unchecked(&cm_connects);
28568 nesqp->active_conn = 1;
28569
28570 /* cache the cm_id in the qp */
28571@@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
28572 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
28573 return err;
28574 }
28575- atomic_inc(&cm_listens_created);
28576+ atomic_inc_unchecked(&cm_listens_created);
28577 }
28578
28579 cm_id->add_ref(cm_id);
28580@@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
28581 if (nesqp->destroyed) {
28582 return;
28583 }
28584- atomic_inc(&cm_connecteds);
28585+ atomic_inc_unchecked(&cm_connecteds);
28586 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
28587 " local port 0x%04X. jiffies = %lu.\n",
28588 nesqp->hwqp.qp_id,
28589@@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
28590
28591 cm_id->add_ref(cm_id);
28592 ret = cm_id->event_handler(cm_id, &cm_event);
28593- atomic_inc(&cm_closes);
28594+ atomic_inc_unchecked(&cm_closes);
28595 cm_event.event = IW_CM_EVENT_CLOSE;
28596 cm_event.status = 0;
28597 cm_event.provider_data = cm_id->provider_data;
28598@@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
28599 return;
28600 cm_id = cm_node->cm_id;
28601
28602- atomic_inc(&cm_connect_reqs);
28603+ atomic_inc_unchecked(&cm_connect_reqs);
28604 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
28605 cm_node, cm_id, jiffies);
28606
28607@@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
28608 return;
28609 cm_id = cm_node->cm_id;
28610
28611- atomic_inc(&cm_connect_reqs);
28612+ atomic_inc_unchecked(&cm_connect_reqs);
28613 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
28614 cm_node, cm_id, jiffies);
28615
28616diff -urNp linux-3.1.1/drivers/infiniband/hw/nes/nes.h linux-3.1.1/drivers/infiniband/hw/nes/nes.h
28617--- linux-3.1.1/drivers/infiniband/hw/nes/nes.h 2011-11-11 15:19:27.000000000 -0500
28618+++ linux-3.1.1/drivers/infiniband/hw/nes/nes.h 2011-11-16 18:39:07.000000000 -0500
28619@@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
28620 extern unsigned int wqm_quanta;
28621 extern struct list_head nes_adapter_list;
28622
28623-extern atomic_t cm_connects;
28624-extern atomic_t cm_accepts;
28625-extern atomic_t cm_disconnects;
28626-extern atomic_t cm_closes;
28627-extern atomic_t cm_connecteds;
28628-extern atomic_t cm_connect_reqs;
28629-extern atomic_t cm_rejects;
28630-extern atomic_t mod_qp_timouts;
28631-extern atomic_t qps_created;
28632-extern atomic_t qps_destroyed;
28633-extern atomic_t sw_qps_destroyed;
28634+extern atomic_unchecked_t cm_connects;
28635+extern atomic_unchecked_t cm_accepts;
28636+extern atomic_unchecked_t cm_disconnects;
28637+extern atomic_unchecked_t cm_closes;
28638+extern atomic_unchecked_t cm_connecteds;
28639+extern atomic_unchecked_t cm_connect_reqs;
28640+extern atomic_unchecked_t cm_rejects;
28641+extern atomic_unchecked_t mod_qp_timouts;
28642+extern atomic_unchecked_t qps_created;
28643+extern atomic_unchecked_t qps_destroyed;
28644+extern atomic_unchecked_t sw_qps_destroyed;
28645 extern u32 mh_detected;
28646 extern u32 mh_pauses_sent;
28647 extern u32 cm_packets_sent;
28648@@ -194,14 +194,14 @@ extern u32 cm_packets_created;
28649 extern u32 cm_packets_received;
28650 extern u32 cm_packets_dropped;
28651 extern u32 cm_packets_retrans;
28652-extern atomic_t cm_listens_created;
28653-extern atomic_t cm_listens_destroyed;
28654+extern atomic_unchecked_t cm_listens_created;
28655+extern atomic_unchecked_t cm_listens_destroyed;
28656 extern u32 cm_backlog_drops;
28657-extern atomic_t cm_loopbacks;
28658-extern atomic_t cm_nodes_created;
28659-extern atomic_t cm_nodes_destroyed;
28660-extern atomic_t cm_accel_dropped_pkts;
28661-extern atomic_t cm_resets_recvd;
28662+extern atomic_unchecked_t cm_loopbacks;
28663+extern atomic_unchecked_t cm_nodes_created;
28664+extern atomic_unchecked_t cm_nodes_destroyed;
28665+extern atomic_unchecked_t cm_accel_dropped_pkts;
28666+extern atomic_unchecked_t cm_resets_recvd;
28667
28668 extern u32 int_mod_timer_init;
28669 extern u32 int_mod_cq_depth_256;
28670diff -urNp linux-3.1.1/drivers/infiniband/hw/nes/nes_nic.c linux-3.1.1/drivers/infiniband/hw/nes/nes_nic.c
28671--- linux-3.1.1/drivers/infiniband/hw/nes/nes_nic.c 2011-11-11 15:19:27.000000000 -0500
28672+++ linux-3.1.1/drivers/infiniband/hw/nes/nes_nic.c 2011-11-16 18:39:07.000000000 -0500
28673@@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats
28674 target_stat_values[++index] = mh_detected;
28675 target_stat_values[++index] = mh_pauses_sent;
28676 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
28677- target_stat_values[++index] = atomic_read(&cm_connects);
28678- target_stat_values[++index] = atomic_read(&cm_accepts);
28679- target_stat_values[++index] = atomic_read(&cm_disconnects);
28680- target_stat_values[++index] = atomic_read(&cm_connecteds);
28681- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
28682- target_stat_values[++index] = atomic_read(&cm_rejects);
28683- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
28684- target_stat_values[++index] = atomic_read(&qps_created);
28685- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
28686- target_stat_values[++index] = atomic_read(&qps_destroyed);
28687- target_stat_values[++index] = atomic_read(&cm_closes);
28688+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
28689+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
28690+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
28691+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
28692+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
28693+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
28694+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
28695+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
28696+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
28697+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
28698+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
28699 target_stat_values[++index] = cm_packets_sent;
28700 target_stat_values[++index] = cm_packets_bounced;
28701 target_stat_values[++index] = cm_packets_created;
28702 target_stat_values[++index] = cm_packets_received;
28703 target_stat_values[++index] = cm_packets_dropped;
28704 target_stat_values[++index] = cm_packets_retrans;
28705- target_stat_values[++index] = atomic_read(&cm_listens_created);
28706- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
28707+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
28708+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
28709 target_stat_values[++index] = cm_backlog_drops;
28710- target_stat_values[++index] = atomic_read(&cm_loopbacks);
28711- target_stat_values[++index] = atomic_read(&cm_nodes_created);
28712- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
28713- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
28714- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
28715+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
28716+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
28717+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
28718+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
28719+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
28720 target_stat_values[++index] = nesadapter->free_4kpbl;
28721 target_stat_values[++index] = nesadapter->free_256pbl;
28722 target_stat_values[++index] = int_mod_timer_init;
28723diff -urNp linux-3.1.1/drivers/infiniband/hw/nes/nes_verbs.c linux-3.1.1/drivers/infiniband/hw/nes/nes_verbs.c
28724--- linux-3.1.1/drivers/infiniband/hw/nes/nes_verbs.c 2011-11-11 15:19:27.000000000 -0500
28725+++ linux-3.1.1/drivers/infiniband/hw/nes/nes_verbs.c 2011-11-16 18:39:07.000000000 -0500
28726@@ -46,9 +46,9 @@
28727
28728 #include <rdma/ib_umem.h>
28729
28730-atomic_t mod_qp_timouts;
28731-atomic_t qps_created;
28732-atomic_t sw_qps_destroyed;
28733+atomic_unchecked_t mod_qp_timouts;
28734+atomic_unchecked_t qps_created;
28735+atomic_unchecked_t sw_qps_destroyed;
28736
28737 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
28738
28739@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struc
28740 if (init_attr->create_flags)
28741 return ERR_PTR(-EINVAL);
28742
28743- atomic_inc(&qps_created);
28744+ atomic_inc_unchecked(&qps_created);
28745 switch (init_attr->qp_type) {
28746 case IB_QPT_RC:
28747 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
28748@@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *
28749 struct iw_cm_event cm_event;
28750 int ret;
28751
28752- atomic_inc(&sw_qps_destroyed);
28753+ atomic_inc_unchecked(&sw_qps_destroyed);
28754 nesqp->destroyed = 1;
28755
28756 /* Blow away the connection if it exists. */
28757diff -urNp linux-3.1.1/drivers/infiniband/hw/qib/qib.h linux-3.1.1/drivers/infiniband/hw/qib/qib.h
28758--- linux-3.1.1/drivers/infiniband/hw/qib/qib.h 2011-11-11 15:19:27.000000000 -0500
28759+++ linux-3.1.1/drivers/infiniband/hw/qib/qib.h 2011-11-16 18:39:07.000000000 -0500
28760@@ -51,6 +51,7 @@
28761 #include <linux/completion.h>
28762 #include <linux/kref.h>
28763 #include <linux/sched.h>
28764+#include <linux/slab.h>
28765
28766 #include "qib_common.h"
28767 #include "qib_verbs.h"
28768diff -urNp linux-3.1.1/drivers/input/gameport/gameport.c linux-3.1.1/drivers/input/gameport/gameport.c
28769--- linux-3.1.1/drivers/input/gameport/gameport.c 2011-11-11 15:19:27.000000000 -0500
28770+++ linux-3.1.1/drivers/input/gameport/gameport.c 2011-11-16 18:39:07.000000000 -0500
28771@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
28772 */
28773 static void gameport_init_port(struct gameport *gameport)
28774 {
28775- static atomic_t gameport_no = ATOMIC_INIT(0);
28776+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
28777
28778 __module_get(THIS_MODULE);
28779
28780 mutex_init(&gameport->drv_mutex);
28781 device_initialize(&gameport->dev);
28782 dev_set_name(&gameport->dev, "gameport%lu",
28783- (unsigned long)atomic_inc_return(&gameport_no) - 1);
28784+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
28785 gameport->dev.bus = &gameport_bus;
28786 gameport->dev.release = gameport_release_port;
28787 if (gameport->parent)
28788diff -urNp linux-3.1.1/drivers/input/input.c linux-3.1.1/drivers/input/input.c
28789--- linux-3.1.1/drivers/input/input.c 2011-11-11 15:19:27.000000000 -0500
28790+++ linux-3.1.1/drivers/input/input.c 2011-11-16 18:39:07.000000000 -0500
28791@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struc
28792 */
28793 int input_register_device(struct input_dev *dev)
28794 {
28795- static atomic_t input_no = ATOMIC_INIT(0);
28796+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
28797 struct input_handler *handler;
28798 const char *path;
28799 int error;
28800@@ -1851,7 +1851,7 @@ int input_register_device(struct input_d
28801 dev->setkeycode = input_default_setkeycode;
28802
28803 dev_set_name(&dev->dev, "input%ld",
28804- (unsigned long) atomic_inc_return(&input_no) - 1);
28805+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
28806
28807 error = device_add(&dev->dev);
28808 if (error)
28809diff -urNp linux-3.1.1/drivers/input/joystick/sidewinder.c linux-3.1.1/drivers/input/joystick/sidewinder.c
28810--- linux-3.1.1/drivers/input/joystick/sidewinder.c 2011-11-11 15:19:27.000000000 -0500
28811+++ linux-3.1.1/drivers/input/joystick/sidewinder.c 2011-11-16 18:40:10.000000000 -0500
28812@@ -30,6 +30,7 @@
28813 #include <linux/kernel.h>
28814 #include <linux/module.h>
28815 #include <linux/slab.h>
28816+#include <linux/sched.h>
28817 #include <linux/init.h>
28818 #include <linux/input.h>
28819 #include <linux/gameport.h>
28820@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
28821 unsigned char buf[SW_LENGTH];
28822 int i;
28823
28824+ pax_track_stack();
28825+
28826 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
28827
28828 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
28829diff -urNp linux-3.1.1/drivers/input/joystick/xpad.c linux-3.1.1/drivers/input/joystick/xpad.c
28830--- linux-3.1.1/drivers/input/joystick/xpad.c 2011-11-11 15:19:27.000000000 -0500
28831+++ linux-3.1.1/drivers/input/joystick/xpad.c 2011-11-16 18:39:07.000000000 -0500
28832@@ -710,7 +710,7 @@ static void xpad_led_set(struct led_clas
28833
28834 static int xpad_led_probe(struct usb_xpad *xpad)
28835 {
28836- static atomic_t led_seq = ATOMIC_INIT(0);
28837+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
28838 long led_no;
28839 struct xpad_led *led;
28840 struct led_classdev *led_cdev;
28841@@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpa
28842 if (!led)
28843 return -ENOMEM;
28844
28845- led_no = (long)atomic_inc_return(&led_seq) - 1;
28846+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
28847
28848 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
28849 led->xpad = xpad;
28850diff -urNp linux-3.1.1/drivers/input/mousedev.c linux-3.1.1/drivers/input/mousedev.c
28851--- linux-3.1.1/drivers/input/mousedev.c 2011-11-11 15:19:27.000000000 -0500
28852+++ linux-3.1.1/drivers/input/mousedev.c 2011-11-16 18:39:07.000000000 -0500
28853@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file
28854
28855 spin_unlock_irq(&client->packet_lock);
28856
28857- if (copy_to_user(buffer, data, count))
28858+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
28859 return -EFAULT;
28860
28861 return count;
28862diff -urNp linux-3.1.1/drivers/input/serio/serio.c linux-3.1.1/drivers/input/serio/serio.c
28863--- linux-3.1.1/drivers/input/serio/serio.c 2011-11-11 15:19:27.000000000 -0500
28864+++ linux-3.1.1/drivers/input/serio/serio.c 2011-11-16 18:39:07.000000000 -0500
28865@@ -497,7 +497,7 @@ static void serio_release_port(struct de
28866 */
28867 static void serio_init_port(struct serio *serio)
28868 {
28869- static atomic_t serio_no = ATOMIC_INIT(0);
28870+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
28871
28872 __module_get(THIS_MODULE);
28873
28874@@ -508,7 +508,7 @@ static void serio_init_port(struct serio
28875 mutex_init(&serio->drv_mutex);
28876 device_initialize(&serio->dev);
28877 dev_set_name(&serio->dev, "serio%ld",
28878- (long)atomic_inc_return(&serio_no) - 1);
28879+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
28880 serio->dev.bus = &serio_bus;
28881 serio->dev.release = serio_release_port;
28882 serio->dev.groups = serio_device_attr_groups;
28883diff -urNp linux-3.1.1/drivers/isdn/capi/capi.c linux-3.1.1/drivers/isdn/capi/capi.c
28884--- linux-3.1.1/drivers/isdn/capi/capi.c 2011-11-11 15:19:27.000000000 -0500
28885+++ linux-3.1.1/drivers/isdn/capi/capi.c 2011-11-16 18:39:07.000000000 -0500
28886@@ -83,8 +83,8 @@ struct capiminor {
28887
28888 struct capi20_appl *ap;
28889 u32 ncci;
28890- atomic_t datahandle;
28891- atomic_t msgid;
28892+ atomic_unchecked_t datahandle;
28893+ atomic_unchecked_t msgid;
28894
28895 struct tty_port port;
28896 int ttyinstop;
28897@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *m
28898 capimsg_setu16(s, 2, mp->ap->applid);
28899 capimsg_setu8 (s, 4, CAPI_DATA_B3);
28900 capimsg_setu8 (s, 5, CAPI_RESP);
28901- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
28902+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
28903 capimsg_setu32(s, 8, mp->ncci);
28904 capimsg_setu16(s, 12, datahandle);
28905 }
28906@@ -518,14 +518,14 @@ static void handle_minor_send(struct cap
28907 mp->outbytes -= len;
28908 spin_unlock_bh(&mp->outlock);
28909
28910- datahandle = atomic_inc_return(&mp->datahandle);
28911+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
28912 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
28913 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
28914 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
28915 capimsg_setu16(skb->data, 2, mp->ap->applid);
28916 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
28917 capimsg_setu8 (skb->data, 5, CAPI_REQ);
28918- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
28919+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
28920 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
28921 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
28922 capimsg_setu16(skb->data, 16, len); /* Data length */
28923diff -urNp linux-3.1.1/drivers/isdn/gigaset/common.c linux-3.1.1/drivers/isdn/gigaset/common.c
28924--- linux-3.1.1/drivers/isdn/gigaset/common.c 2011-11-11 15:19:27.000000000 -0500
28925+++ linux-3.1.1/drivers/isdn/gigaset/common.c 2011-11-16 18:39:07.000000000 -0500
28926@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
28927 cs->commands_pending = 0;
28928 cs->cur_at_seq = 0;
28929 cs->gotfwver = -1;
28930- cs->open_count = 0;
28931+ local_set(&cs->open_count, 0);
28932 cs->dev = NULL;
28933 cs->tty = NULL;
28934 cs->tty_dev = NULL;
28935diff -urNp linux-3.1.1/drivers/isdn/gigaset/gigaset.h linux-3.1.1/drivers/isdn/gigaset/gigaset.h
28936--- linux-3.1.1/drivers/isdn/gigaset/gigaset.h 2011-11-11 15:19:27.000000000 -0500
28937+++ linux-3.1.1/drivers/isdn/gigaset/gigaset.h 2011-11-16 18:39:07.000000000 -0500
28938@@ -35,6 +35,7 @@
28939 #include <linux/tty_driver.h>
28940 #include <linux/list.h>
28941 #include <linux/atomic.h>
28942+#include <asm/local.h>
28943
28944 #define GIG_VERSION {0, 5, 0, 0}
28945 #define GIG_COMPAT {0, 4, 0, 0}
28946@@ -433,7 +434,7 @@ struct cardstate {
28947 spinlock_t cmdlock;
28948 unsigned curlen, cmdbytes;
28949
28950- unsigned open_count;
28951+ local_t open_count;
28952 struct tty_struct *tty;
28953 struct tasklet_struct if_wake_tasklet;
28954 unsigned control_state;
28955diff -urNp linux-3.1.1/drivers/isdn/gigaset/interface.c linux-3.1.1/drivers/isdn/gigaset/interface.c
28956--- linux-3.1.1/drivers/isdn/gigaset/interface.c 2011-11-11 15:19:27.000000000 -0500
28957+++ linux-3.1.1/drivers/isdn/gigaset/interface.c 2011-11-16 18:39:07.000000000 -0500
28958@@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tt
28959 }
28960 tty->driver_data = cs;
28961
28962- ++cs->open_count;
28963-
28964- if (cs->open_count == 1) {
28965+ if (local_inc_return(&cs->open_count) == 1) {
28966 spin_lock_irqsave(&cs->lock, flags);
28967 cs->tty = tty;
28968 spin_unlock_irqrestore(&cs->lock, flags);
28969@@ -192,10 +190,10 @@ static void if_close(struct tty_struct *
28970
28971 if (!cs->connected)
28972 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
28973- else if (!cs->open_count)
28974+ else if (!local_read(&cs->open_count))
28975 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28976 else {
28977- if (!--cs->open_count) {
28978+ if (!local_dec_return(&cs->open_count)) {
28979 spin_lock_irqsave(&cs->lock, flags);
28980 cs->tty = NULL;
28981 spin_unlock_irqrestore(&cs->lock, flags);
28982@@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *t
28983 if (!cs->connected) {
28984 gig_dbg(DEBUG_IF, "not connected");
28985 retval = -ENODEV;
28986- } else if (!cs->open_count)
28987+ } else if (!local_read(&cs->open_count))
28988 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28989 else {
28990 retval = 0;
28991@@ -360,7 +358,7 @@ static int if_write(struct tty_struct *t
28992 retval = -ENODEV;
28993 goto done;
28994 }
28995- if (!cs->open_count) {
28996+ if (!local_read(&cs->open_count)) {
28997 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28998 retval = -ENODEV;
28999 goto done;
29000@@ -413,7 +411,7 @@ static int if_write_room(struct tty_stru
29001 if (!cs->connected) {
29002 gig_dbg(DEBUG_IF, "not connected");
29003 retval = -ENODEV;
29004- } else if (!cs->open_count)
29005+ } else if (!local_read(&cs->open_count))
29006 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29007 else if (cs->mstate != MS_LOCKED) {
29008 dev_warn(cs->dev, "can't write to unlocked device\n");
29009@@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty
29010
29011 if (!cs->connected)
29012 gig_dbg(DEBUG_IF, "not connected");
29013- else if (!cs->open_count)
29014+ else if (!local_read(&cs->open_count))
29015 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29016 else if (cs->mstate != MS_LOCKED)
29017 dev_warn(cs->dev, "can't write to unlocked device\n");
29018@@ -471,7 +469,7 @@ static void if_throttle(struct tty_struc
29019
29020 if (!cs->connected)
29021 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29022- else if (!cs->open_count)
29023+ else if (!local_read(&cs->open_count))
29024 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29025 else
29026 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
29027@@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_str
29028
29029 if (!cs->connected)
29030 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29031- else if (!cs->open_count)
29032+ else if (!local_read(&cs->open_count))
29033 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29034 else
29035 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
29036@@ -526,7 +524,7 @@ static void if_set_termios(struct tty_st
29037 goto out;
29038 }
29039
29040- if (!cs->open_count) {
29041+ if (!local_read(&cs->open_count)) {
29042 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29043 goto out;
29044 }
29045diff -urNp linux-3.1.1/drivers/isdn/hardware/avm/b1.c linux-3.1.1/drivers/isdn/hardware/avm/b1.c
29046--- linux-3.1.1/drivers/isdn/hardware/avm/b1.c 2011-11-11 15:19:27.000000000 -0500
29047+++ linux-3.1.1/drivers/isdn/hardware/avm/b1.c 2011-11-16 18:39:07.000000000 -0500
29048@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
29049 }
29050 if (left) {
29051 if (t4file->user) {
29052- if (copy_from_user(buf, dp, left))
29053+ if (left > sizeof buf || copy_from_user(buf, dp, left))
29054 return -EFAULT;
29055 } else {
29056 memcpy(buf, dp, left);
29057@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
29058 }
29059 if (left) {
29060 if (config->user) {
29061- if (copy_from_user(buf, dp, left))
29062+ if (left > sizeof buf || copy_from_user(buf, dp, left))
29063 return -EFAULT;
29064 } else {
29065 memcpy(buf, dp, left);
29066diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/capidtmf.c linux-3.1.1/drivers/isdn/hardware/eicon/capidtmf.c
29067--- linux-3.1.1/drivers/isdn/hardware/eicon/capidtmf.c 2011-11-11 15:19:27.000000000 -0500
29068+++ linux-3.1.1/drivers/isdn/hardware/eicon/capidtmf.c 2011-11-16 18:40:10.000000000 -0500
29069@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
29070 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
29071 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
29072
29073+ pax_track_stack();
29074
29075 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
29076 {
29077diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/capifunc.c linux-3.1.1/drivers/isdn/hardware/eicon/capifunc.c
29078--- linux-3.1.1/drivers/isdn/hardware/eicon/capifunc.c 2011-11-11 15:19:27.000000000 -0500
29079+++ linux-3.1.1/drivers/isdn/hardware/eicon/capifunc.c 2011-11-16 18:40:10.000000000 -0500
29080@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
29081 IDI_SYNC_REQ req;
29082 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29083
29084+ pax_track_stack();
29085+
29086 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29087
29088 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29089diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/diddfunc.c linux-3.1.1/drivers/isdn/hardware/eicon/diddfunc.c
29090--- linux-3.1.1/drivers/isdn/hardware/eicon/diddfunc.c 2011-11-11 15:19:27.000000000 -0500
29091+++ linux-3.1.1/drivers/isdn/hardware/eicon/diddfunc.c 2011-11-16 18:40:10.000000000 -0500
29092@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
29093 IDI_SYNC_REQ req;
29094 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29095
29096+ pax_track_stack();
29097+
29098 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29099
29100 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29101diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/divasfunc.c linux-3.1.1/drivers/isdn/hardware/eicon/divasfunc.c
29102--- linux-3.1.1/drivers/isdn/hardware/eicon/divasfunc.c 2011-11-11 15:19:27.000000000 -0500
29103+++ linux-3.1.1/drivers/isdn/hardware/eicon/divasfunc.c 2011-11-16 18:40:10.000000000 -0500
29104@@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_di
29105 IDI_SYNC_REQ req;
29106 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29107
29108+ pax_track_stack();
29109+
29110 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29111
29112 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29113diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/divasync.h linux-3.1.1/drivers/isdn/hardware/eicon/divasync.h
29114--- linux-3.1.1/drivers/isdn/hardware/eicon/divasync.h 2011-11-11 15:19:27.000000000 -0500
29115+++ linux-3.1.1/drivers/isdn/hardware/eicon/divasync.h 2011-11-16 18:39:07.000000000 -0500
29116@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
29117 } diva_didd_add_adapter_t;
29118 typedef struct _diva_didd_remove_adapter {
29119 IDI_CALL p_request;
29120-} diva_didd_remove_adapter_t;
29121+} __no_const diva_didd_remove_adapter_t;
29122 typedef struct _diva_didd_read_adapter_array {
29123 void * buffer;
29124 dword length;
29125diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/idifunc.c linux-3.1.1/drivers/isdn/hardware/eicon/idifunc.c
29126--- linux-3.1.1/drivers/isdn/hardware/eicon/idifunc.c 2011-11-11 15:19:27.000000000 -0500
29127+++ linux-3.1.1/drivers/isdn/hardware/eicon/idifunc.c 2011-11-16 18:40:10.000000000 -0500
29128@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
29129 IDI_SYNC_REQ req;
29130 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29131
29132+ pax_track_stack();
29133+
29134 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29135
29136 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29137diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/message.c linux-3.1.1/drivers/isdn/hardware/eicon/message.c
29138--- linux-3.1.1/drivers/isdn/hardware/eicon/message.c 2011-11-11 15:19:27.000000000 -0500
29139+++ linux-3.1.1/drivers/isdn/hardware/eicon/message.c 2011-11-16 18:40:10.000000000 -0500
29140@@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
29141 dword d;
29142 word w;
29143
29144+ pax_track_stack();
29145+
29146 a = plci->adapter;
29147 Id = ((word)plci->Id<<8)|a->Id;
29148 PUT_WORD(&SS_Ind[4],0x0000);
29149@@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE
29150 word j, n, w;
29151 dword d;
29152
29153+ pax_track_stack();
29154+
29155
29156 for(i=0;i<8;i++) bp_parms[i].length = 0;
29157 for(i=0;i<2;i++) global_config[i].length = 0;
29158@@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARS
29159 const byte llc3[] = {4,3,2,2,6,6,0};
29160 const byte header[] = {0,2,3,3,0,0,0};
29161
29162+ pax_track_stack();
29163+
29164 for(i=0;i<8;i++) bp_parms[i].length = 0;
29165 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
29166 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
29167@@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI
29168 word appl_number_group_type[MAX_APPL];
29169 PLCI *auxplci;
29170
29171+ pax_track_stack();
29172+
29173 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
29174
29175 if(!a->group_optimization_enabled)
29176diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/mntfunc.c linux-3.1.1/drivers/isdn/hardware/eicon/mntfunc.c
29177--- linux-3.1.1/drivers/isdn/hardware/eicon/mntfunc.c 2011-11-11 15:19:27.000000000 -0500
29178+++ linux-3.1.1/drivers/isdn/hardware/eicon/mntfunc.c 2011-11-16 18:40:10.000000000 -0500
29179@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
29180 IDI_SYNC_REQ req;
29181 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29182
29183+ pax_track_stack();
29184+
29185 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29186
29187 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29188diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/xdi_adapter.h linux-3.1.1/drivers/isdn/hardware/eicon/xdi_adapter.h
29189--- linux-3.1.1/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-11-11 15:19:27.000000000 -0500
29190+++ linux-3.1.1/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-11-16 18:39:07.000000000 -0500
29191@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
29192 typedef struct _diva_os_idi_adapter_interface {
29193 diva_init_card_proc_t cleanup_adapter_proc;
29194 diva_cmd_card_proc_t cmd_proc;
29195-} diva_os_idi_adapter_interface_t;
29196+} __no_const diva_os_idi_adapter_interface_t;
29197
29198 typedef struct _diva_os_xdi_adapter {
29199 struct list_head link;
29200diff -urNp linux-3.1.1/drivers/isdn/i4l/isdn_common.c linux-3.1.1/drivers/isdn/i4l/isdn_common.c
29201--- linux-3.1.1/drivers/isdn/i4l/isdn_common.c 2011-11-11 15:19:27.000000000 -0500
29202+++ linux-3.1.1/drivers/isdn/i4l/isdn_common.c 2011-11-16 18:40:10.000000000 -0500
29203@@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd,
29204 } iocpar;
29205 void __user *argp = (void __user *)arg;
29206
29207+ pax_track_stack();
29208+
29209 #define name iocpar.name
29210 #define bname iocpar.bname
29211 #define iocts iocpar.iocts
29212diff -urNp linux-3.1.1/drivers/isdn/icn/icn.c linux-3.1.1/drivers/isdn/icn/icn.c
29213--- linux-3.1.1/drivers/isdn/icn/icn.c 2011-11-11 15:19:27.000000000 -0500
29214+++ linux-3.1.1/drivers/isdn/icn/icn.c 2011-11-16 18:39:07.000000000 -0500
29215@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
29216 if (count > len)
29217 count = len;
29218 if (user) {
29219- if (copy_from_user(msg, buf, count))
29220+ if (count > sizeof msg || copy_from_user(msg, buf, count))
29221 return -EFAULT;
29222 } else
29223 memcpy(msg, buf, count);
29224diff -urNp linux-3.1.1/drivers/lguest/core.c linux-3.1.1/drivers/lguest/core.c
29225--- linux-3.1.1/drivers/lguest/core.c 2011-11-11 15:19:27.000000000 -0500
29226+++ linux-3.1.1/drivers/lguest/core.c 2011-11-16 18:39:07.000000000 -0500
29227@@ -92,9 +92,17 @@ static __init int map_switcher(void)
29228 * it's worked so far. The end address needs +1 because __get_vm_area
29229 * allocates an extra guard page, so we need space for that.
29230 */
29231+
29232+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29233+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
29234+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
29235+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
29236+#else
29237 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
29238 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
29239 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
29240+#endif
29241+
29242 if (!switcher_vma) {
29243 err = -ENOMEM;
29244 printk("lguest: could not map switcher pages high\n");
29245@@ -119,7 +127,7 @@ static __init int map_switcher(void)
29246 * Now the Switcher is mapped at the right address, we can't fail!
29247 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
29248 */
29249- memcpy(switcher_vma->addr, start_switcher_text,
29250+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
29251 end_switcher_text - start_switcher_text);
29252
29253 printk(KERN_INFO "lguest: mapped switcher at %p\n",
29254diff -urNp linux-3.1.1/drivers/lguest/x86/core.c linux-3.1.1/drivers/lguest/x86/core.c
29255--- linux-3.1.1/drivers/lguest/x86/core.c 2011-11-11 15:19:27.000000000 -0500
29256+++ linux-3.1.1/drivers/lguest/x86/core.c 2011-11-16 18:39:07.000000000 -0500
29257@@ -59,7 +59,7 @@ static struct {
29258 /* Offset from where switcher.S was compiled to where we've copied it */
29259 static unsigned long switcher_offset(void)
29260 {
29261- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
29262+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
29263 }
29264
29265 /* This cpu's struct lguest_pages. */
29266@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
29267 * These copies are pretty cheap, so we do them unconditionally: */
29268 /* Save the current Host top-level page directory.
29269 */
29270+
29271+#ifdef CONFIG_PAX_PER_CPU_PGD
29272+ pages->state.host_cr3 = read_cr3();
29273+#else
29274 pages->state.host_cr3 = __pa(current->mm->pgd);
29275+#endif
29276+
29277 /*
29278 * Set up the Guest's page tables to see this CPU's pages (and no
29279 * other CPU's pages).
29280@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
29281 * compiled-in switcher code and the high-mapped copy we just made.
29282 */
29283 for (i = 0; i < IDT_ENTRIES; i++)
29284- default_idt_entries[i] += switcher_offset();
29285+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
29286
29287 /*
29288 * Set up the Switcher's per-cpu areas.
29289@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
29290 * it will be undisturbed when we switch. To change %cs and jump we
29291 * need this structure to feed to Intel's "lcall" instruction.
29292 */
29293- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
29294+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
29295 lguest_entry.segment = LGUEST_CS;
29296
29297 /*
29298diff -urNp linux-3.1.1/drivers/lguest/x86/switcher_32.S linux-3.1.1/drivers/lguest/x86/switcher_32.S
29299--- linux-3.1.1/drivers/lguest/x86/switcher_32.S 2011-11-11 15:19:27.000000000 -0500
29300+++ linux-3.1.1/drivers/lguest/x86/switcher_32.S 2011-11-16 18:39:07.000000000 -0500
29301@@ -87,6 +87,7 @@
29302 #include <asm/page.h>
29303 #include <asm/segment.h>
29304 #include <asm/lguest.h>
29305+#include <asm/processor-flags.h>
29306
29307 // We mark the start of the code to copy
29308 // It's placed in .text tho it's never run here
29309@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
29310 // Changes type when we load it: damn Intel!
29311 // For after we switch over our page tables
29312 // That entry will be read-only: we'd crash.
29313+
29314+#ifdef CONFIG_PAX_KERNEXEC
29315+ mov %cr0, %edx
29316+ xor $X86_CR0_WP, %edx
29317+ mov %edx, %cr0
29318+#endif
29319+
29320 movl $(GDT_ENTRY_TSS*8), %edx
29321 ltr %dx
29322
29323@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
29324 // Let's clear it again for our return.
29325 // The GDT descriptor of the Host
29326 // Points to the table after two "size" bytes
29327- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
29328+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
29329 // Clear "used" from type field (byte 5, bit 2)
29330- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
29331+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
29332+
29333+#ifdef CONFIG_PAX_KERNEXEC
29334+ mov %cr0, %eax
29335+ xor $X86_CR0_WP, %eax
29336+ mov %eax, %cr0
29337+#endif
29338
29339 // Once our page table's switched, the Guest is live!
29340 // The Host fades as we run this final step.
29341@@ -295,13 +309,12 @@ deliver_to_host:
29342 // I consulted gcc, and it gave
29343 // These instructions, which I gladly credit:
29344 leal (%edx,%ebx,8), %eax
29345- movzwl (%eax),%edx
29346- movl 4(%eax), %eax
29347- xorw %ax, %ax
29348- orl %eax, %edx
29349+ movl 4(%eax), %edx
29350+ movw (%eax), %dx
29351 // Now the address of the handler's in %edx
29352 // We call it now: its "iret" drops us home.
29353- jmp *%edx
29354+ ljmp $__KERNEL_CS, $1f
29355+1: jmp *%edx
29356
29357 // Every interrupt can come to us here
29358 // But we must truly tell each apart.
29359diff -urNp linux-3.1.1/drivers/macintosh/macio_asic.c linux-3.1.1/drivers/macintosh/macio_asic.c
29360--- linux-3.1.1/drivers/macintosh/macio_asic.c 2011-11-11 15:19:27.000000000 -0500
29361+++ linux-3.1.1/drivers/macintosh/macio_asic.c 2011-11-16 18:39:07.000000000 -0500
29362@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(s
29363 * MacIO is matched against any Apple ID, it's probe() function
29364 * will then decide wether it applies or not
29365 */
29366-static const struct pci_device_id __devinitdata pci_ids [] = { {
29367+static const struct pci_device_id __devinitconst pci_ids [] = { {
29368 .vendor = PCI_VENDOR_ID_APPLE,
29369 .device = PCI_ANY_ID,
29370 .subvendor = PCI_ANY_ID,
29371diff -urNp linux-3.1.1/drivers/md/dm.c linux-3.1.1/drivers/md/dm.c
29372--- linux-3.1.1/drivers/md/dm.c 2011-11-11 15:19:27.000000000 -0500
29373+++ linux-3.1.1/drivers/md/dm.c 2011-11-16 18:39:07.000000000 -0500
29374@@ -165,9 +165,9 @@ struct mapped_device {
29375 /*
29376 * Event handling.
29377 */
29378- atomic_t event_nr;
29379+ atomic_unchecked_t event_nr;
29380 wait_queue_head_t eventq;
29381- atomic_t uevent_seq;
29382+ atomic_unchecked_t uevent_seq;
29383 struct list_head uevent_list;
29384 spinlock_t uevent_lock; /* Protect access to uevent_list */
29385
29386@@ -1843,8 +1843,8 @@ static struct mapped_device *alloc_dev(i
29387 rwlock_init(&md->map_lock);
29388 atomic_set(&md->holders, 1);
29389 atomic_set(&md->open_count, 0);
29390- atomic_set(&md->event_nr, 0);
29391- atomic_set(&md->uevent_seq, 0);
29392+ atomic_set_unchecked(&md->event_nr, 0);
29393+ atomic_set_unchecked(&md->uevent_seq, 0);
29394 INIT_LIST_HEAD(&md->uevent_list);
29395 spin_lock_init(&md->uevent_lock);
29396
29397@@ -1978,7 +1978,7 @@ static void event_callback(void *context
29398
29399 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
29400
29401- atomic_inc(&md->event_nr);
29402+ atomic_inc_unchecked(&md->event_nr);
29403 wake_up(&md->eventq);
29404 }
29405
29406@@ -2614,18 +2614,18 @@ int dm_kobject_uevent(struct mapped_devi
29407
29408 uint32_t dm_next_uevent_seq(struct mapped_device *md)
29409 {
29410- return atomic_add_return(1, &md->uevent_seq);
29411+ return atomic_add_return_unchecked(1, &md->uevent_seq);
29412 }
29413
29414 uint32_t dm_get_event_nr(struct mapped_device *md)
29415 {
29416- return atomic_read(&md->event_nr);
29417+ return atomic_read_unchecked(&md->event_nr);
29418 }
29419
29420 int dm_wait_event(struct mapped_device *md, int event_nr)
29421 {
29422 return wait_event_interruptible(md->eventq,
29423- (event_nr != atomic_read(&md->event_nr)));
29424+ (event_nr != atomic_read_unchecked(&md->event_nr)));
29425 }
29426
29427 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
29428diff -urNp linux-3.1.1/drivers/md/dm-ioctl.c linux-3.1.1/drivers/md/dm-ioctl.c
29429--- linux-3.1.1/drivers/md/dm-ioctl.c 2011-11-11 15:19:27.000000000 -0500
29430+++ linux-3.1.1/drivers/md/dm-ioctl.c 2011-11-16 18:39:07.000000000 -0500
29431@@ -1578,7 +1578,7 @@ static int validate_params(uint cmd, str
29432 cmd == DM_LIST_VERSIONS_CMD)
29433 return 0;
29434
29435- if ((cmd == DM_DEV_CREATE_CMD)) {
29436+ if (cmd == DM_DEV_CREATE_CMD) {
29437 if (!*param->name) {
29438 DMWARN("name not supplied when creating device");
29439 return -EINVAL;
29440diff -urNp linux-3.1.1/drivers/md/dm-raid1.c linux-3.1.1/drivers/md/dm-raid1.c
29441--- linux-3.1.1/drivers/md/dm-raid1.c 2011-11-11 15:19:27.000000000 -0500
29442+++ linux-3.1.1/drivers/md/dm-raid1.c 2011-11-16 18:39:07.000000000 -0500
29443@@ -40,7 +40,7 @@ enum dm_raid1_error {
29444
29445 struct mirror {
29446 struct mirror_set *ms;
29447- atomic_t error_count;
29448+ atomic_unchecked_t error_count;
29449 unsigned long error_type;
29450 struct dm_dev *dev;
29451 sector_t offset;
29452@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(s
29453 struct mirror *m;
29454
29455 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
29456- if (!atomic_read(&m->error_count))
29457+ if (!atomic_read_unchecked(&m->error_count))
29458 return m;
29459
29460 return NULL;
29461@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m
29462 * simple way to tell if a device has encountered
29463 * errors.
29464 */
29465- atomic_inc(&m->error_count);
29466+ atomic_inc_unchecked(&m->error_count);
29467
29468 if (test_and_set_bit(error_type, &m->error_type))
29469 return;
29470@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(stru
29471 struct mirror *m = get_default_mirror(ms);
29472
29473 do {
29474- if (likely(!atomic_read(&m->error_count)))
29475+ if (likely(!atomic_read_unchecked(&m->error_count)))
29476 return m;
29477
29478 if (m-- == ms->mirror)
29479@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
29480 {
29481 struct mirror *default_mirror = get_default_mirror(m->ms);
29482
29483- return !atomic_read(&default_mirror->error_count);
29484+ return !atomic_read_unchecked(&default_mirror->error_count);
29485 }
29486
29487 static int mirror_available(struct mirror_set *ms, struct bio *bio)
29488@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *
29489 */
29490 if (likely(region_in_sync(ms, region, 1)))
29491 m = choose_mirror(ms, bio->bi_sector);
29492- else if (m && atomic_read(&m->error_count))
29493+ else if (m && atomic_read_unchecked(&m->error_count))
29494 m = NULL;
29495
29496 if (likely(m))
29497@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set
29498 }
29499
29500 ms->mirror[mirror].ms = ms;
29501- atomic_set(&(ms->mirror[mirror].error_count), 0);
29502+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
29503 ms->mirror[mirror].error_type = 0;
29504 ms->mirror[mirror].offset = offset;
29505
29506@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
29507 */
29508 static char device_status_char(struct mirror *m)
29509 {
29510- if (!atomic_read(&(m->error_count)))
29511+ if (!atomic_read_unchecked(&(m->error_count)))
29512 return 'A';
29513
29514 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
29515diff -urNp linux-3.1.1/drivers/md/dm-stripe.c linux-3.1.1/drivers/md/dm-stripe.c
29516--- linux-3.1.1/drivers/md/dm-stripe.c 2011-11-11 15:19:27.000000000 -0500
29517+++ linux-3.1.1/drivers/md/dm-stripe.c 2011-11-16 18:39:07.000000000 -0500
29518@@ -20,7 +20,7 @@ struct stripe {
29519 struct dm_dev *dev;
29520 sector_t physical_start;
29521
29522- atomic_t error_count;
29523+ atomic_unchecked_t error_count;
29524 };
29525
29526 struct stripe_c {
29527@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
29528 kfree(sc);
29529 return r;
29530 }
29531- atomic_set(&(sc->stripe[i].error_count), 0);
29532+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
29533 }
29534
29535 ti->private = sc;
29536@@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
29537 DMEMIT("%d ", sc->stripes);
29538 for (i = 0; i < sc->stripes; i++) {
29539 DMEMIT("%s ", sc->stripe[i].dev->name);
29540- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
29541+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
29542 'D' : 'A';
29543 }
29544 buffer[i] = '\0';
29545@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
29546 */
29547 for (i = 0; i < sc->stripes; i++)
29548 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
29549- atomic_inc(&(sc->stripe[i].error_count));
29550- if (atomic_read(&(sc->stripe[i].error_count)) <
29551+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
29552+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
29553 DM_IO_ERROR_THRESHOLD)
29554 schedule_work(&sc->trigger_event);
29555 }
29556diff -urNp linux-3.1.1/drivers/md/dm-table.c linux-3.1.1/drivers/md/dm-table.c
29557--- linux-3.1.1/drivers/md/dm-table.c 2011-11-11 15:19:27.000000000 -0500
29558+++ linux-3.1.1/drivers/md/dm-table.c 2011-11-16 18:39:07.000000000 -0500
29559@@ -389,7 +389,7 @@ static int device_area_is_invalid(struct
29560 if (!dev_size)
29561 return 0;
29562
29563- if ((start >= dev_size) || (start + len > dev_size)) {
29564+ if ((start >= dev_size) || (len > dev_size - start)) {
29565 DMWARN("%s: %s too small for target: "
29566 "start=%llu, len=%llu, dev_size=%llu",
29567 dm_device_name(ti->table->md), bdevname(bdev, b),
29568diff -urNp linux-3.1.1/drivers/md/md.c linux-3.1.1/drivers/md/md.c
29569--- linux-3.1.1/drivers/md/md.c 2011-11-11 15:19:27.000000000 -0500
29570+++ linux-3.1.1/drivers/md/md.c 2011-11-16 18:39:07.000000000 -0500
29571@@ -280,10 +280,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
29572 * start build, activate spare
29573 */
29574 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
29575-static atomic_t md_event_count;
29576+static atomic_unchecked_t md_event_count;
29577 void md_new_event(mddev_t *mddev)
29578 {
29579- atomic_inc(&md_event_count);
29580+ atomic_inc_unchecked(&md_event_count);
29581 wake_up(&md_event_waiters);
29582 }
29583 EXPORT_SYMBOL_GPL(md_new_event);
29584@@ -293,7 +293,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
29585 */
29586 static void md_new_event_inintr(mddev_t *mddev)
29587 {
29588- atomic_inc(&md_event_count);
29589+ atomic_inc_unchecked(&md_event_count);
29590 wake_up(&md_event_waiters);
29591 }
29592
29593@@ -1531,7 +1531,7 @@ static int super_1_load(mdk_rdev_t *rdev
29594
29595 rdev->preferred_minor = 0xffff;
29596 rdev->data_offset = le64_to_cpu(sb->data_offset);
29597- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
29598+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
29599
29600 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
29601 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
29602@@ -1748,7 +1748,7 @@ static void super_1_sync(mddev_t *mddev,
29603 else
29604 sb->resync_offset = cpu_to_le64(0);
29605
29606- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
29607+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
29608
29609 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
29610 sb->size = cpu_to_le64(mddev->dev_sectors);
29611@@ -2643,7 +2643,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
29612 static ssize_t
29613 errors_show(mdk_rdev_t *rdev, char *page)
29614 {
29615- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
29616+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
29617 }
29618
29619 static ssize_t
29620@@ -2652,7 +2652,7 @@ errors_store(mdk_rdev_t *rdev, const cha
29621 char *e;
29622 unsigned long n = simple_strtoul(buf, &e, 10);
29623 if (*buf && (*e == 0 || *e == '\n')) {
29624- atomic_set(&rdev->corrected_errors, n);
29625+ atomic_set_unchecked(&rdev->corrected_errors, n);
29626 return len;
29627 }
29628 return -EINVAL;
29629@@ -3042,8 +3042,8 @@ int md_rdev_init(mdk_rdev_t *rdev)
29630 rdev->sb_loaded = 0;
29631 rdev->bb_page = NULL;
29632 atomic_set(&rdev->nr_pending, 0);
29633- atomic_set(&rdev->read_errors, 0);
29634- atomic_set(&rdev->corrected_errors, 0);
29635+ atomic_set_unchecked(&rdev->read_errors, 0);
29636+ atomic_set_unchecked(&rdev->corrected_errors, 0);
29637
29638 INIT_LIST_HEAD(&rdev->same_set);
29639 init_waitqueue_head(&rdev->blocked_wait);
29640@@ -6667,7 +6667,7 @@ static int md_seq_show(struct seq_file *
29641
29642 spin_unlock(&pers_lock);
29643 seq_printf(seq, "\n");
29644- seq->poll_event = atomic_read(&md_event_count);
29645+ seq->poll_event = atomic_read_unchecked(&md_event_count);
29646 return 0;
29647 }
29648 if (v == (void*)2) {
29649@@ -6756,7 +6756,7 @@ static int md_seq_show(struct seq_file *
29650 chunk_kb ? "KB" : "B");
29651 if (bitmap->file) {
29652 seq_printf(seq, ", file: ");
29653- seq_path(seq, &bitmap->file->f_path, " \t\n");
29654+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
29655 }
29656
29657 seq_printf(seq, "\n");
29658@@ -6787,7 +6787,7 @@ static int md_seq_open(struct inode *ino
29659 return error;
29660
29661 seq = file->private_data;
29662- seq->poll_event = atomic_read(&md_event_count);
29663+ seq->poll_event = atomic_read_unchecked(&md_event_count);
29664 return error;
29665 }
29666
29667@@ -6801,7 +6801,7 @@ static unsigned int mdstat_poll(struct f
29668 /* always allow read */
29669 mask = POLLIN | POLLRDNORM;
29670
29671- if (seq->poll_event != atomic_read(&md_event_count))
29672+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
29673 mask |= POLLERR | POLLPRI;
29674 return mask;
29675 }
29676@@ -6845,7 +6845,7 @@ static int is_mddev_idle(mddev_t *mddev,
29677 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
29678 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
29679 (int)part_stat_read(&disk->part0, sectors[1]) -
29680- atomic_read(&disk->sync_io);
29681+ atomic_read_unchecked(&disk->sync_io);
29682 /* sync IO will cause sync_io to increase before the disk_stats
29683 * as sync_io is counted when a request starts, and
29684 * disk_stats is counted when it completes.
29685diff -urNp linux-3.1.1/drivers/md/md.h linux-3.1.1/drivers/md/md.h
29686--- linux-3.1.1/drivers/md/md.h 2011-11-11 15:19:27.000000000 -0500
29687+++ linux-3.1.1/drivers/md/md.h 2011-11-16 18:39:07.000000000 -0500
29688@@ -124,13 +124,13 @@ struct mdk_rdev_s
29689 * only maintained for arrays that
29690 * support hot removal
29691 */
29692- atomic_t read_errors; /* number of consecutive read errors that
29693+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
29694 * we have tried to ignore.
29695 */
29696 struct timespec last_read_error; /* monotonic time since our
29697 * last read error
29698 */
29699- atomic_t corrected_errors; /* number of corrected read errors,
29700+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
29701 * for reporting to userspace and storing
29702 * in superblock.
29703 */
29704@@ -415,7 +415,7 @@ static inline void rdev_dec_pending(mdk_
29705
29706 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
29707 {
29708- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
29709+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
29710 }
29711
29712 struct mdk_personality
29713diff -urNp linux-3.1.1/drivers/md/raid10.c linux-3.1.1/drivers/md/raid10.c
29714--- linux-3.1.1/drivers/md/raid10.c 2011-11-11 15:19:27.000000000 -0500
29715+++ linux-3.1.1/drivers/md/raid10.c 2011-11-16 18:39:07.000000000 -0500
29716@@ -1423,7 +1423,7 @@ static void end_sync_read(struct bio *bi
29717 /* The write handler will notice the lack of
29718 * R10BIO_Uptodate and record any errors etc
29719 */
29720- atomic_add(r10_bio->sectors,
29721+ atomic_add_unchecked(r10_bio->sectors,
29722 &conf->mirrors[d].rdev->corrected_errors);
29723
29724 /* for reconstruct, we always reschedule after a read.
29725@@ -1723,7 +1723,7 @@ static void check_decay_read_errors(mdde
29726 {
29727 struct timespec cur_time_mon;
29728 unsigned long hours_since_last;
29729- unsigned int read_errors = atomic_read(&rdev->read_errors);
29730+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
29731
29732 ktime_get_ts(&cur_time_mon);
29733
29734@@ -1745,9 +1745,9 @@ static void check_decay_read_errors(mdde
29735 * overflowing the shift of read_errors by hours_since_last.
29736 */
29737 if (hours_since_last >= 8 * sizeof(read_errors))
29738- atomic_set(&rdev->read_errors, 0);
29739+ atomic_set_unchecked(&rdev->read_errors, 0);
29740 else
29741- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
29742+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
29743 }
29744
29745 static int r10_sync_page_io(mdk_rdev_t *rdev, sector_t sector,
29746@@ -1797,8 +1797,8 @@ static void fix_read_error(conf_t *conf,
29747 return;
29748
29749 check_decay_read_errors(mddev, rdev);
29750- atomic_inc(&rdev->read_errors);
29751- if (atomic_read(&rdev->read_errors) > max_read_errors) {
29752+ atomic_inc_unchecked(&rdev->read_errors);
29753+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
29754 char b[BDEVNAME_SIZE];
29755 bdevname(rdev->bdev, b);
29756
29757@@ -1806,7 +1806,7 @@ static void fix_read_error(conf_t *conf,
29758 "md/raid10:%s: %s: Raid device exceeded "
29759 "read_error threshold [cur %d:max %d]\n",
29760 mdname(mddev), b,
29761- atomic_read(&rdev->read_errors), max_read_errors);
29762+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
29763 printk(KERN_NOTICE
29764 "md/raid10:%s: %s: Failing raid device\n",
29765 mdname(mddev), b);
29766@@ -1951,7 +1951,7 @@ static void fix_read_error(conf_t *conf,
29767 (unsigned long long)(
29768 sect + rdev->data_offset),
29769 bdevname(rdev->bdev, b));
29770- atomic_add(s, &rdev->corrected_errors);
29771+ atomic_add_unchecked(s, &rdev->corrected_errors);
29772 }
29773
29774 rdev_dec_pending(rdev, mddev);
29775diff -urNp linux-3.1.1/drivers/md/raid1.c linux-3.1.1/drivers/md/raid1.c
29776--- linux-3.1.1/drivers/md/raid1.c 2011-11-11 15:19:27.000000000 -0500
29777+++ linux-3.1.1/drivers/md/raid1.c 2011-11-16 18:39:07.000000000 -0500
29778@@ -1541,7 +1541,7 @@ static int fix_sync_read_error(r1bio_t *
29779 if (r1_sync_page_io(rdev, sect, s,
29780 bio->bi_io_vec[idx].bv_page,
29781 READ) != 0)
29782- atomic_add(s, &rdev->corrected_errors);
29783+ atomic_add_unchecked(s, &rdev->corrected_errors);
29784 }
29785 sectors -= s;
29786 sect += s;
29787@@ -1754,7 +1754,7 @@ static void fix_read_error(conf_t *conf,
29788 test_bit(In_sync, &rdev->flags)) {
29789 if (r1_sync_page_io(rdev, sect, s,
29790 conf->tmppage, READ)) {
29791- atomic_add(s, &rdev->corrected_errors);
29792+ atomic_add_unchecked(s, &rdev->corrected_errors);
29793 printk(KERN_INFO
29794 "md/raid1:%s: read error corrected "
29795 "(%d sectors at %llu on %s)\n",
29796diff -urNp linux-3.1.1/drivers/md/raid5.c linux-3.1.1/drivers/md/raid5.c
29797--- linux-3.1.1/drivers/md/raid5.c 2011-11-11 15:19:27.000000000 -0500
29798+++ linux-3.1.1/drivers/md/raid5.c 2011-11-16 18:40:10.000000000 -0500
29799@@ -1616,19 +1616,19 @@ static void raid5_end_read_request(struc
29800 (unsigned long long)(sh->sector
29801 + rdev->data_offset),
29802 bdevname(rdev->bdev, b));
29803- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
29804+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
29805 clear_bit(R5_ReadError, &sh->dev[i].flags);
29806 clear_bit(R5_ReWrite, &sh->dev[i].flags);
29807 }
29808- if (atomic_read(&conf->disks[i].rdev->read_errors))
29809- atomic_set(&conf->disks[i].rdev->read_errors, 0);
29810+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
29811+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
29812 } else {
29813 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
29814 int retry = 0;
29815 rdev = conf->disks[i].rdev;
29816
29817 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
29818- atomic_inc(&rdev->read_errors);
29819+ atomic_inc_unchecked(&rdev->read_errors);
29820 if (conf->mddev->degraded >= conf->max_degraded)
29821 printk_ratelimited(
29822 KERN_WARNING
29823@@ -1648,7 +1648,7 @@ static void raid5_end_read_request(struc
29824 (unsigned long long)(sh->sector
29825 + rdev->data_offset),
29826 bdn);
29827- else if (atomic_read(&rdev->read_errors)
29828+ else if (atomic_read_unchecked(&rdev->read_errors)
29829 > conf->max_nr_stripes)
29830 printk(KERN_WARNING
29831 "md/raid:%s: Too many read errors, failing device %s.\n",
29832@@ -1978,6 +1978,7 @@ static sector_t compute_blocknr(struct s
29833 sector_t r_sector;
29834 struct stripe_head sh2;
29835
29836+ pax_track_stack();
29837
29838 chunk_offset = sector_div(new_sector, sectors_per_chunk);
29839 stripe = new_sector;
29840diff -urNp linux-3.1.1/drivers/media/common/saa7146_hlp.c linux-3.1.1/drivers/media/common/saa7146_hlp.c
29841--- linux-3.1.1/drivers/media/common/saa7146_hlp.c 2011-11-11 15:19:27.000000000 -0500
29842+++ linux-3.1.1/drivers/media/common/saa7146_hlp.c 2011-11-16 18:40:10.000000000 -0500
29843@@ -353,6 +353,8 @@ static void calculate_clipping_registers
29844
29845 int x[32], y[32], w[32], h[32];
29846
29847+ pax_track_stack();
29848+
29849 /* clear out memory */
29850 memset(&line_list[0], 0x00, sizeof(u32)*32);
29851 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
29852diff -urNp linux-3.1.1/drivers/media/dvb/ddbridge/ddbridge-core.c linux-3.1.1/drivers/media/dvb/ddbridge/ddbridge-core.c
29853--- linux-3.1.1/drivers/media/dvb/ddbridge/ddbridge-core.c 2011-11-11 15:19:27.000000000 -0500
29854+++ linux-3.1.1/drivers/media/dvb/ddbridge/ddbridge-core.c 2011-11-16 18:39:07.000000000 -0500
29855@@ -1675,7 +1675,7 @@ static struct ddb_info ddb_v6 = {
29856 .subvendor = _subvend, .subdevice = _subdev, \
29857 .driver_data = (unsigned long)&_driverdata }
29858
29859-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
29860+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
29861 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
29862 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
29863 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
29864diff -urNp linux-3.1.1/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-3.1.1/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
29865--- linux-3.1.1/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-11-11 15:19:27.000000000 -0500
29866+++ linux-3.1.1/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-11-16 18:40:10.000000000 -0500
29867@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
29868 u8 buf[HOST_LINK_BUF_SIZE];
29869 int i;
29870
29871+ pax_track_stack();
29872+
29873 dprintk("%s\n", __func__);
29874
29875 /* check if we have space for a link buf in the rx_buffer */
29876@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
29877 unsigned long timeout;
29878 int written;
29879
29880+ pax_track_stack();
29881+
29882 dprintk("%s\n", __func__);
29883
29884 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
29885diff -urNp linux-3.1.1/drivers/media/dvb/dvb-core/dvb_demux.h linux-3.1.1/drivers/media/dvb/dvb-core/dvb_demux.h
29886--- linux-3.1.1/drivers/media/dvb/dvb-core/dvb_demux.h 2011-11-11 15:19:27.000000000 -0500
29887+++ linux-3.1.1/drivers/media/dvb/dvb-core/dvb_demux.h 2011-11-17 18:34:32.000000000 -0500
29888@@ -73,7 +73,7 @@ struct dvb_demux_feed {
29889 union {
29890 dmx_ts_cb ts;
29891 dmx_section_cb sec;
29892- } cb;
29893+ } __no_const cb;
29894
29895 struct dvb_demux *demux;
29896 void *priv;
29897diff -urNp linux-3.1.1/drivers/media/dvb/dvb-core/dvbdev.c linux-3.1.1/drivers/media/dvb/dvb-core/dvbdev.c
29898--- linux-3.1.1/drivers/media/dvb/dvb-core/dvbdev.c 2011-11-11 15:19:27.000000000 -0500
29899+++ linux-3.1.1/drivers/media/dvb/dvb-core/dvbdev.c 2011-11-16 18:39:07.000000000 -0500
29900@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
29901 const struct dvb_device *template, void *priv, int type)
29902 {
29903 struct dvb_device *dvbdev;
29904- struct file_operations *dvbdevfops;
29905+ file_operations_no_const *dvbdevfops;
29906 struct device *clsdev;
29907 int minor;
29908 int id;
29909diff -urNp linux-3.1.1/drivers/media/dvb/dvb-usb/cxusb.c linux-3.1.1/drivers/media/dvb/dvb-usb/cxusb.c
29910--- linux-3.1.1/drivers/media/dvb/dvb-usb/cxusb.c 2011-11-11 15:19:27.000000000 -0500
29911+++ linux-3.1.1/drivers/media/dvb/dvb-usb/cxusb.c 2011-11-16 18:39:07.000000000 -0500
29912@@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
29913 struct dib0700_adapter_state {
29914 int (*set_param_save) (struct dvb_frontend *,
29915 struct dvb_frontend_parameters *);
29916-};
29917+} __no_const;
29918
29919 static int dib7070_set_param_override(struct dvb_frontend *fe,
29920 struct dvb_frontend_parameters *fep)
29921diff -urNp linux-3.1.1/drivers/media/dvb/dvb-usb/dib0700_core.c linux-3.1.1/drivers/media/dvb/dvb-usb/dib0700_core.c
29922--- linux-3.1.1/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-11-11 15:19:27.000000000 -0500
29923+++ linux-3.1.1/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-11-16 18:40:10.000000000 -0500
29924@@ -478,6 +478,8 @@ int dib0700_download_firmware(struct usb
29925 if (!buf)
29926 return -ENOMEM;
29927
29928+ pax_track_stack();
29929+
29930 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
29931 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
29932 hx.addr, hx.len, hx.chk);
29933diff -urNp linux-3.1.1/drivers/media/dvb/dvb-usb/dw2102.c linux-3.1.1/drivers/media/dvb/dvb-usb/dw2102.c
29934--- linux-3.1.1/drivers/media/dvb/dvb-usb/dw2102.c 2011-11-11 15:19:27.000000000 -0500
29935+++ linux-3.1.1/drivers/media/dvb/dvb-usb/dw2102.c 2011-11-16 18:39:07.000000000 -0500
29936@@ -95,7 +95,7 @@ struct su3000_state {
29937
29938 struct s6x0_state {
29939 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
29940-};
29941+} __no_const;
29942
29943 /* debug */
29944 static int dvb_usb_dw2102_debug;
29945diff -urNp linux-3.1.1/drivers/media/dvb/dvb-usb/lmedm04.c linux-3.1.1/drivers/media/dvb/dvb-usb/lmedm04.c
29946--- linux-3.1.1/drivers/media/dvb/dvb-usb/lmedm04.c 2011-11-11 15:19:27.000000000 -0500
29947+++ linux-3.1.1/drivers/media/dvb/dvb-usb/lmedm04.c 2011-11-16 18:40:10.000000000 -0500
29948@@ -742,6 +742,7 @@ static int lme2510_download_firmware(str
29949 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
29950 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
29951
29952+ pax_track_stack();
29953
29954 data[0] = 0x8a;
29955 len_in = 1;
29956@@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_dev
29957 int ret = 0, len_in;
29958 u8 data[512] = {0};
29959
29960+ pax_track_stack();
29961+
29962 data[0] = 0x0a;
29963 len_in = 1;
29964 info("FRM Firmware Cold Reset");
29965diff -urNp linux-3.1.1/drivers/media/dvb/frontends/dib3000.h linux-3.1.1/drivers/media/dvb/frontends/dib3000.h
29966--- linux-3.1.1/drivers/media/dvb/frontends/dib3000.h 2011-11-11 15:19:27.000000000 -0500
29967+++ linux-3.1.1/drivers/media/dvb/frontends/dib3000.h 2011-11-17 18:38:05.000000000 -0500
29968@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
29969 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
29970 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
29971 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
29972-};
29973+} __no_const;
29974
29975 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
29976 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
29977diff -urNp linux-3.1.1/drivers/media/dvb/frontends/mb86a16.c linux-3.1.1/drivers/media/dvb/frontends/mb86a16.c
29978--- linux-3.1.1/drivers/media/dvb/frontends/mb86a16.c 2011-11-11 15:19:27.000000000 -0500
29979+++ linux-3.1.1/drivers/media/dvb/frontends/mb86a16.c 2011-11-16 18:40:10.000000000 -0500
29980@@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
29981 int ret = -1;
29982 int sync;
29983
29984+ pax_track_stack();
29985+
29986 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
29987
29988 fcp = 3000;
29989diff -urNp linux-3.1.1/drivers/media/dvb/frontends/or51211.c linux-3.1.1/drivers/media/dvb/frontends/or51211.c
29990--- linux-3.1.1/drivers/media/dvb/frontends/or51211.c 2011-11-11 15:19:27.000000000 -0500
29991+++ linux-3.1.1/drivers/media/dvb/frontends/or51211.c 2011-11-16 18:40:10.000000000 -0500
29992@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
29993 u8 tudata[585];
29994 int i;
29995
29996+ pax_track_stack();
29997+
29998 dprintk("Firmware is %zd bytes\n",fw->size);
29999
30000 /* Get eprom data */
30001diff -urNp linux-3.1.1/drivers/media/dvb/ngene/ngene-cards.c linux-3.1.1/drivers/media/dvb/ngene/ngene-cards.c
30002--- linux-3.1.1/drivers/media/dvb/ngene/ngene-cards.c 2011-11-11 15:19:27.000000000 -0500
30003+++ linux-3.1.1/drivers/media/dvb/ngene/ngene-cards.c 2011-11-16 18:39:07.000000000 -0500
30004@@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780
30005
30006 /****************************************************************************/
30007
30008-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
30009+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
30010 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
30011 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
30012 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
30013diff -urNp linux-3.1.1/drivers/media/radio/radio-cadet.c linux-3.1.1/drivers/media/radio/radio-cadet.c
30014--- linux-3.1.1/drivers/media/radio/radio-cadet.c 2011-11-11 15:19:27.000000000 -0500
30015+++ linux-3.1.1/drivers/media/radio/radio-cadet.c 2011-11-16 18:39:07.000000000 -0500
30016@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *f
30017 unsigned char readbuf[RDS_BUFFER];
30018 int i = 0;
30019
30020+ if (count > RDS_BUFFER)
30021+ return -EFAULT;
30022 mutex_lock(&dev->lock);
30023 if (dev->rdsstat == 0) {
30024 dev->rdsstat = 1;
30025diff -urNp linux-3.1.1/drivers/media/video/au0828/au0828.h linux-3.1.1/drivers/media/video/au0828/au0828.h
30026--- linux-3.1.1/drivers/media/video/au0828/au0828.h 2011-11-11 15:19:27.000000000 -0500
30027+++ linux-3.1.1/drivers/media/video/au0828/au0828.h 2011-11-16 18:39:07.000000000 -0500
30028@@ -191,7 +191,7 @@ struct au0828_dev {
30029
30030 /* I2C */
30031 struct i2c_adapter i2c_adap;
30032- struct i2c_algorithm i2c_algo;
30033+ i2c_algorithm_no_const i2c_algo;
30034 struct i2c_client i2c_client;
30035 u32 i2c_rc;
30036
30037diff -urNp linux-3.1.1/drivers/media/video/cx18/cx18-driver.c linux-3.1.1/drivers/media/video/cx18/cx18-driver.c
30038--- linux-3.1.1/drivers/media/video/cx18/cx18-driver.c 2011-11-11 15:19:27.000000000 -0500
30039+++ linux-3.1.1/drivers/media/video/cx18/cx18-driver.c 2011-11-16 18:40:10.000000000 -0500
30040@@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
30041 struct i2c_client c;
30042 u8 eedata[256];
30043
30044+ pax_track_stack();
30045+
30046 memset(&c, 0, sizeof(c));
30047 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
30048 c.adapter = &cx->i2c_adap[0];
30049diff -urNp linux-3.1.1/drivers/media/video/cx23885/cx23885-input.c linux-3.1.1/drivers/media/video/cx23885/cx23885-input.c
30050--- linux-3.1.1/drivers/media/video/cx23885/cx23885-input.c 2011-11-11 15:19:27.000000000 -0500
30051+++ linux-3.1.1/drivers/media/video/cx23885/cx23885-input.c 2011-11-16 18:40:10.000000000 -0500
30052@@ -53,6 +53,8 @@ static void cx23885_input_process_measur
30053 bool handle = false;
30054 struct ir_raw_event ir_core_event[64];
30055
30056+ pax_track_stack();
30057+
30058 do {
30059 num = 0;
30060 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
30061diff -urNp linux-3.1.1/drivers/media/video/cx88/cx88-alsa.c linux-3.1.1/drivers/media/video/cx88/cx88-alsa.c
30062--- linux-3.1.1/drivers/media/video/cx88/cx88-alsa.c 2011-11-11 15:19:27.000000000 -0500
30063+++ linux-3.1.1/drivers/media/video/cx88/cx88-alsa.c 2011-11-16 18:39:07.000000000 -0500
30064@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_
30065 * Only boards with eeprom and byte 1 at eeprom=1 have it
30066 */
30067
30068-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
30069+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
30070 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
30071 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
30072 {0, }
30073diff -urNp linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
30074--- linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-11-11 15:19:27.000000000 -0500
30075+++ linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-11-16 18:40:10.000000000 -0500
30076@@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
30077 u8 *eeprom;
30078 struct tveeprom tvdata;
30079
30080+ pax_track_stack();
30081+
30082 memset(&tvdata,0,sizeof(tvdata));
30083
30084 eeprom = pvr2_eeprom_fetch(hdw);
30085diff -urNp linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
30086--- linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-11-11 15:19:27.000000000 -0500
30087+++ linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-11-16 18:39:07.000000000 -0500
30088@@ -196,7 +196,7 @@ struct pvr2_hdw {
30089
30090 /* I2C stuff */
30091 struct i2c_adapter i2c_adap;
30092- struct i2c_algorithm i2c_algo;
30093+ i2c_algorithm_no_const i2c_algo;
30094 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
30095 int i2c_cx25840_hack_state;
30096 int i2c_linked;
30097diff -urNp linux-3.1.1/drivers/media/video/saa7134/saa6752hs.c linux-3.1.1/drivers/media/video/saa7134/saa6752hs.c
30098--- linux-3.1.1/drivers/media/video/saa7134/saa6752hs.c 2011-11-11 15:19:27.000000000 -0500
30099+++ linux-3.1.1/drivers/media/video/saa7134/saa6752hs.c 2011-11-16 18:40:10.000000000 -0500
30100@@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
30101 unsigned char localPAT[256];
30102 unsigned char localPMT[256];
30103
30104+ pax_track_stack();
30105+
30106 /* Set video format - must be done first as it resets other settings */
30107 set_reg8(client, 0x41, h->video_format);
30108
30109diff -urNp linux-3.1.1/drivers/media/video/saa7164/saa7164-cmd.c linux-3.1.1/drivers/media/video/saa7164/saa7164-cmd.c
30110--- linux-3.1.1/drivers/media/video/saa7164/saa7164-cmd.c 2011-11-11 15:19:27.000000000 -0500
30111+++ linux-3.1.1/drivers/media/video/saa7164/saa7164-cmd.c 2011-11-16 18:40:10.000000000 -0500
30112@@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
30113 u8 tmp[512];
30114 dprintk(DBGLVL_CMD, "%s()\n", __func__);
30115
30116+ pax_track_stack();
30117+
30118 /* While any outstand message on the bus exists... */
30119 do {
30120
30121@@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
30122 u8 tmp[512];
30123 dprintk(DBGLVL_CMD, "%s()\n", __func__);
30124
30125+ pax_track_stack();
30126+
30127 while (loop) {
30128
30129 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
30130diff -urNp linux-3.1.1/drivers/media/video/timblogiw.c linux-3.1.1/drivers/media/video/timblogiw.c
30131--- linux-3.1.1/drivers/media/video/timblogiw.c 2011-11-11 15:19:27.000000000 -0500
30132+++ linux-3.1.1/drivers/media/video/timblogiw.c 2011-11-17 18:36:32.000000000 -0500
30133@@ -744,7 +744,7 @@ static int timblogiw_mmap(struct file *f
30134
30135 /* Platform device functions */
30136
30137-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
30138+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
30139 .vidioc_querycap = timblogiw_querycap,
30140 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
30141 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
30142@@ -766,7 +766,7 @@ static __devinitconst struct v4l2_ioctl_
30143 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
30144 };
30145
30146-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
30147+static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
30148 .owner = THIS_MODULE,
30149 .open = timblogiw_open,
30150 .release = timblogiw_close,
30151diff -urNp linux-3.1.1/drivers/media/video/usbvision/usbvision-core.c linux-3.1.1/drivers/media/video/usbvision/usbvision-core.c
30152--- linux-3.1.1/drivers/media/video/usbvision/usbvision-core.c 2011-11-11 15:19:27.000000000 -0500
30153+++ linux-3.1.1/drivers/media/video/usbvision/usbvision-core.c 2011-11-16 18:40:10.000000000 -0500
30154@@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_
30155 unsigned char rv, gv, bv;
30156 static unsigned char *Y, *U, *V;
30157
30158+ pax_track_stack();
30159+
30160 frame = usbvision->cur_frame;
30161 image_size = frame->frmwidth * frame->frmheight;
30162 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
30163diff -urNp linux-3.1.1/drivers/media/video/videobuf-dma-sg.c linux-3.1.1/drivers/media/video/videobuf-dma-sg.c
30164--- linux-3.1.1/drivers/media/video/videobuf-dma-sg.c 2011-11-11 15:19:27.000000000 -0500
30165+++ linux-3.1.1/drivers/media/video/videobuf-dma-sg.c 2011-11-16 18:40:10.000000000 -0500
30166@@ -607,6 +607,8 @@ void *videobuf_sg_alloc(size_t size)
30167 {
30168 struct videobuf_queue q;
30169
30170+ pax_track_stack();
30171+
30172 /* Required to make generic handler to call __videobuf_alloc */
30173 q.int_ops = &sg_ops;
30174
30175diff -urNp linux-3.1.1/drivers/message/fusion/mptbase.c linux-3.1.1/drivers/message/fusion/mptbase.c
30176--- linux-3.1.1/drivers/message/fusion/mptbase.c 2011-11-11 15:19:27.000000000 -0500
30177+++ linux-3.1.1/drivers/message/fusion/mptbase.c 2011-11-16 18:40:10.000000000 -0500
30178@@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct
30179 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
30180 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
30181
30182+#ifdef CONFIG_GRKERNSEC_HIDESYM
30183+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
30184+#else
30185 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
30186 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
30187+#endif
30188+
30189 /*
30190 * Rounding UP to nearest 4-kB boundary here...
30191 */
30192diff -urNp linux-3.1.1/drivers/message/fusion/mptsas.c linux-3.1.1/drivers/message/fusion/mptsas.c
30193--- linux-3.1.1/drivers/message/fusion/mptsas.c 2011-11-11 15:19:27.000000000 -0500
30194+++ linux-3.1.1/drivers/message/fusion/mptsas.c 2011-11-16 18:39:07.000000000 -0500
30195@@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
30196 return 0;
30197 }
30198
30199+static inline void
30200+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
30201+{
30202+ if (phy_info->port_details) {
30203+ phy_info->port_details->rphy = rphy;
30204+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
30205+ ioc->name, rphy));
30206+ }
30207+
30208+ if (rphy) {
30209+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
30210+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
30211+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
30212+ ioc->name, rphy, rphy->dev.release));
30213+ }
30214+}
30215+
30216 /* no mutex */
30217 static void
30218 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
30219@@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
30220 return NULL;
30221 }
30222
30223-static inline void
30224-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
30225-{
30226- if (phy_info->port_details) {
30227- phy_info->port_details->rphy = rphy;
30228- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
30229- ioc->name, rphy));
30230- }
30231-
30232- if (rphy) {
30233- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
30234- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
30235- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
30236- ioc->name, rphy, rphy->dev.release));
30237- }
30238-}
30239-
30240 static inline struct sas_port *
30241 mptsas_get_port(struct mptsas_phyinfo *phy_info)
30242 {
30243diff -urNp linux-3.1.1/drivers/message/fusion/mptscsih.c linux-3.1.1/drivers/message/fusion/mptscsih.c
30244--- linux-3.1.1/drivers/message/fusion/mptscsih.c 2011-11-11 15:19:27.000000000 -0500
30245+++ linux-3.1.1/drivers/message/fusion/mptscsih.c 2011-11-16 18:39:07.000000000 -0500
30246@@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
30247
30248 h = shost_priv(SChost);
30249
30250- if (h) {
30251- if (h->info_kbuf == NULL)
30252- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
30253- return h->info_kbuf;
30254- h->info_kbuf[0] = '\0';
30255+ if (!h)
30256+ return NULL;
30257
30258- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
30259- h->info_kbuf[size-1] = '\0';
30260- }
30261+ if (h->info_kbuf == NULL)
30262+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
30263+ return h->info_kbuf;
30264+ h->info_kbuf[0] = '\0';
30265+
30266+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
30267+ h->info_kbuf[size-1] = '\0';
30268
30269 return h->info_kbuf;
30270 }
30271diff -urNp linux-3.1.1/drivers/message/i2o/i2o_config.c linux-3.1.1/drivers/message/i2o/i2o_config.c
30272--- linux-3.1.1/drivers/message/i2o/i2o_config.c 2011-11-11 15:19:27.000000000 -0500
30273+++ linux-3.1.1/drivers/message/i2o/i2o_config.c 2011-11-16 18:40:10.000000000 -0500
30274@@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
30275 struct i2o_message *msg;
30276 unsigned int iop;
30277
30278+ pax_track_stack();
30279+
30280 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
30281 return -EFAULT;
30282
30283diff -urNp linux-3.1.1/drivers/message/i2o/i2o_proc.c linux-3.1.1/drivers/message/i2o/i2o_proc.c
30284--- linux-3.1.1/drivers/message/i2o/i2o_proc.c 2011-11-11 15:19:27.000000000 -0500
30285+++ linux-3.1.1/drivers/message/i2o/i2o_proc.c 2011-11-16 18:39:07.000000000 -0500
30286@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
30287 "Array Controller Device"
30288 };
30289
30290-static char *chtostr(u8 * chars, int n)
30291-{
30292- char tmp[256];
30293- tmp[0] = 0;
30294- return strncat(tmp, (char *)chars, n);
30295-}
30296-
30297 static int i2o_report_query_status(struct seq_file *seq, int block_status,
30298 char *group)
30299 {
30300@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
30301
30302 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
30303 seq_printf(seq, "%-#8x", ddm_table.module_id);
30304- seq_printf(seq, "%-29s",
30305- chtostr(ddm_table.module_name_version, 28));
30306+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
30307 seq_printf(seq, "%9d ", ddm_table.data_size);
30308 seq_printf(seq, "%8d", ddm_table.code_size);
30309
30310@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
30311
30312 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
30313 seq_printf(seq, "%-#8x", dst->module_id);
30314- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
30315- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
30316+ seq_printf(seq, "%-.28s", dst->module_name_version);
30317+ seq_printf(seq, "%-.8s", dst->date);
30318 seq_printf(seq, "%8d ", dst->module_size);
30319 seq_printf(seq, "%8d ", dst->mpb_size);
30320 seq_printf(seq, "0x%04x", dst->module_flags);
30321@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
30322 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
30323 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
30324 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
30325- seq_printf(seq, "Vendor info : %s\n",
30326- chtostr((u8 *) (work32 + 2), 16));
30327- seq_printf(seq, "Product info : %s\n",
30328- chtostr((u8 *) (work32 + 6), 16));
30329- seq_printf(seq, "Description : %s\n",
30330- chtostr((u8 *) (work32 + 10), 16));
30331- seq_printf(seq, "Product rev. : %s\n",
30332- chtostr((u8 *) (work32 + 14), 8));
30333+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
30334+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
30335+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
30336+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
30337
30338 seq_printf(seq, "Serial number : ");
30339 print_serial_number(seq, (u8 *) (work32 + 16),
30340@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
30341 }
30342
30343 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
30344- seq_printf(seq, "Module name : %s\n",
30345- chtostr(result.module_name, 24));
30346- seq_printf(seq, "Module revision : %s\n",
30347- chtostr(result.module_rev, 8));
30348+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
30349+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
30350
30351 seq_printf(seq, "Serial number : ");
30352 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
30353@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
30354 return 0;
30355 }
30356
30357- seq_printf(seq, "Device name : %s\n",
30358- chtostr(result.device_name, 64));
30359- seq_printf(seq, "Service name : %s\n",
30360- chtostr(result.service_name, 64));
30361- seq_printf(seq, "Physical name : %s\n",
30362- chtostr(result.physical_location, 64));
30363- seq_printf(seq, "Instance number : %s\n",
30364- chtostr(result.instance_number, 4));
30365+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
30366+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
30367+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
30368+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
30369
30370 return 0;
30371 }
30372diff -urNp linux-3.1.1/drivers/message/i2o/iop.c linux-3.1.1/drivers/message/i2o/iop.c
30373--- linux-3.1.1/drivers/message/i2o/iop.c 2011-11-11 15:19:27.000000000 -0500
30374+++ linux-3.1.1/drivers/message/i2o/iop.c 2011-11-16 18:39:07.000000000 -0500
30375@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
30376
30377 spin_lock_irqsave(&c->context_list_lock, flags);
30378
30379- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
30380- atomic_inc(&c->context_list_counter);
30381+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
30382+ atomic_inc_unchecked(&c->context_list_counter);
30383
30384- entry->context = atomic_read(&c->context_list_counter);
30385+ entry->context = atomic_read_unchecked(&c->context_list_counter);
30386
30387 list_add(&entry->list, &c->context_list);
30388
30389@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
30390
30391 #if BITS_PER_LONG == 64
30392 spin_lock_init(&c->context_list_lock);
30393- atomic_set(&c->context_list_counter, 0);
30394+ atomic_set_unchecked(&c->context_list_counter, 0);
30395 INIT_LIST_HEAD(&c->context_list);
30396 #endif
30397
30398diff -urNp linux-3.1.1/drivers/mfd/ab3100-core.c linux-3.1.1/drivers/mfd/ab3100-core.c
30399--- linux-3.1.1/drivers/mfd/ab3100-core.c 2011-11-11 15:19:27.000000000 -0500
30400+++ linux-3.1.1/drivers/mfd/ab3100-core.c 2011-11-16 18:39:07.000000000 -0500
30401@@ -809,7 +809,7 @@ struct ab_family_id {
30402 char *name;
30403 };
30404
30405-static const struct ab_family_id ids[] __devinitdata = {
30406+static const struct ab_family_id ids[] __devinitconst = {
30407 /* AB3100 */
30408 {
30409 .id = 0xc0,
30410diff -urNp linux-3.1.1/drivers/mfd/abx500-core.c linux-3.1.1/drivers/mfd/abx500-core.c
30411--- linux-3.1.1/drivers/mfd/abx500-core.c 2011-11-11 15:19:27.000000000 -0500
30412+++ linux-3.1.1/drivers/mfd/abx500-core.c 2011-11-16 18:39:07.000000000 -0500
30413@@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
30414
30415 struct abx500_device_entry {
30416 struct list_head list;
30417- struct abx500_ops ops;
30418+ abx500_ops_no_const ops;
30419 struct device *dev;
30420 };
30421
30422diff -urNp linux-3.1.1/drivers/mfd/janz-cmodio.c linux-3.1.1/drivers/mfd/janz-cmodio.c
30423--- linux-3.1.1/drivers/mfd/janz-cmodio.c 2011-11-11 15:19:27.000000000 -0500
30424+++ linux-3.1.1/drivers/mfd/janz-cmodio.c 2011-11-16 18:39:07.000000000 -0500
30425@@ -13,6 +13,7 @@
30426
30427 #include <linux/kernel.h>
30428 #include <linux/module.h>
30429+#include <linux/slab.h>
30430 #include <linux/init.h>
30431 #include <linux/pci.h>
30432 #include <linux/interrupt.h>
30433diff -urNp linux-3.1.1/drivers/mfd/wm8350-i2c.c linux-3.1.1/drivers/mfd/wm8350-i2c.c
30434--- linux-3.1.1/drivers/mfd/wm8350-i2c.c 2011-11-11 15:19:27.000000000 -0500
30435+++ linux-3.1.1/drivers/mfd/wm8350-i2c.c 2011-11-16 18:40:10.000000000 -0500
30436@@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
30437 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
30438 int ret;
30439
30440+ pax_track_stack();
30441+
30442 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
30443 return -EINVAL;
30444
30445diff -urNp linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.c
30446--- linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.c 2011-11-11 15:19:27.000000000 -0500
30447+++ linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.c 2011-11-16 18:39:07.000000000 -0500
30448@@ -437,7 +437,7 @@ static irqreturn_t lis302dl_interrupt(in
30449 * the lid is closed. This leads to interrupts as soon as a little move
30450 * is done.
30451 */
30452- atomic_inc(&lis3_dev.count);
30453+ atomic_inc_unchecked(&lis3_dev.count);
30454
30455 wake_up_interruptible(&lis3_dev.misc_wait);
30456 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
30457@@ -520,7 +520,7 @@ static int lis3lv02d_misc_open(struct in
30458 if (lis3_dev.pm_dev)
30459 pm_runtime_get_sync(lis3_dev.pm_dev);
30460
30461- atomic_set(&lis3_dev.count, 0);
30462+ atomic_set_unchecked(&lis3_dev.count, 0);
30463 return 0;
30464 }
30465
30466@@ -547,7 +547,7 @@ static ssize_t lis3lv02d_misc_read(struc
30467 add_wait_queue(&lis3_dev.misc_wait, &wait);
30468 while (true) {
30469 set_current_state(TASK_INTERRUPTIBLE);
30470- data = atomic_xchg(&lis3_dev.count, 0);
30471+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
30472 if (data)
30473 break;
30474
30475@@ -585,7 +585,7 @@ out:
30476 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
30477 {
30478 poll_wait(file, &lis3_dev.misc_wait, wait);
30479- if (atomic_read(&lis3_dev.count))
30480+ if (atomic_read_unchecked(&lis3_dev.count))
30481 return POLLIN | POLLRDNORM;
30482 return 0;
30483 }
30484diff -urNp linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.h
30485--- linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.h 2011-11-11 15:19:27.000000000 -0500
30486+++ linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.h 2011-11-16 18:39:07.000000000 -0500
30487@@ -265,7 +265,7 @@ struct lis3lv02d {
30488 struct input_polled_dev *idev; /* input device */
30489 struct platform_device *pdev; /* platform device */
30490 struct regulator_bulk_data regulators[2];
30491- atomic_t count; /* interrupt count after last read */
30492+ atomic_unchecked_t count; /* interrupt count after last read */
30493 union axis_conversion ac; /* hw -> logical axis */
30494 int mapped_btns[3];
30495
30496diff -urNp linux-3.1.1/drivers/misc/sgi-gru/gruhandles.c linux-3.1.1/drivers/misc/sgi-gru/gruhandles.c
30497--- linux-3.1.1/drivers/misc/sgi-gru/gruhandles.c 2011-11-11 15:19:27.000000000 -0500
30498+++ linux-3.1.1/drivers/misc/sgi-gru/gruhandles.c 2011-11-16 18:39:07.000000000 -0500
30499@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
30500 unsigned long nsec;
30501
30502 nsec = CLKS2NSEC(clks);
30503- atomic_long_inc(&mcs_op_statistics[op].count);
30504- atomic_long_add(nsec, &mcs_op_statistics[op].total);
30505+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
30506+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
30507 if (mcs_op_statistics[op].max < nsec)
30508 mcs_op_statistics[op].max = nsec;
30509 }
30510diff -urNp linux-3.1.1/drivers/misc/sgi-gru/gruprocfs.c linux-3.1.1/drivers/misc/sgi-gru/gruprocfs.c
30511--- linux-3.1.1/drivers/misc/sgi-gru/gruprocfs.c 2011-11-11 15:19:27.000000000 -0500
30512+++ linux-3.1.1/drivers/misc/sgi-gru/gruprocfs.c 2011-11-16 18:39:07.000000000 -0500
30513@@ -32,9 +32,9 @@
30514
30515 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
30516
30517-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
30518+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
30519 {
30520- unsigned long val = atomic_long_read(v);
30521+ unsigned long val = atomic_long_read_unchecked(v);
30522
30523 seq_printf(s, "%16lu %s\n", val, id);
30524 }
30525@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
30526
30527 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
30528 for (op = 0; op < mcsop_last; op++) {
30529- count = atomic_long_read(&mcs_op_statistics[op].count);
30530- total = atomic_long_read(&mcs_op_statistics[op].total);
30531+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
30532+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
30533 max = mcs_op_statistics[op].max;
30534 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
30535 count ? total / count : 0, max);
30536diff -urNp linux-3.1.1/drivers/misc/sgi-gru/grutables.h linux-3.1.1/drivers/misc/sgi-gru/grutables.h
30537--- linux-3.1.1/drivers/misc/sgi-gru/grutables.h 2011-11-11 15:19:27.000000000 -0500
30538+++ linux-3.1.1/drivers/misc/sgi-gru/grutables.h 2011-11-16 18:39:07.000000000 -0500
30539@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
30540 * GRU statistics.
30541 */
30542 struct gru_stats_s {
30543- atomic_long_t vdata_alloc;
30544- atomic_long_t vdata_free;
30545- atomic_long_t gts_alloc;
30546- atomic_long_t gts_free;
30547- atomic_long_t gms_alloc;
30548- atomic_long_t gms_free;
30549- atomic_long_t gts_double_allocate;
30550- atomic_long_t assign_context;
30551- atomic_long_t assign_context_failed;
30552- atomic_long_t free_context;
30553- atomic_long_t load_user_context;
30554- atomic_long_t load_kernel_context;
30555- atomic_long_t lock_kernel_context;
30556- atomic_long_t unlock_kernel_context;
30557- atomic_long_t steal_user_context;
30558- atomic_long_t steal_kernel_context;
30559- atomic_long_t steal_context_failed;
30560- atomic_long_t nopfn;
30561- atomic_long_t asid_new;
30562- atomic_long_t asid_next;
30563- atomic_long_t asid_wrap;
30564- atomic_long_t asid_reuse;
30565- atomic_long_t intr;
30566- atomic_long_t intr_cbr;
30567- atomic_long_t intr_tfh;
30568- atomic_long_t intr_spurious;
30569- atomic_long_t intr_mm_lock_failed;
30570- atomic_long_t call_os;
30571- atomic_long_t call_os_wait_queue;
30572- atomic_long_t user_flush_tlb;
30573- atomic_long_t user_unload_context;
30574- atomic_long_t user_exception;
30575- atomic_long_t set_context_option;
30576- atomic_long_t check_context_retarget_intr;
30577- atomic_long_t check_context_unload;
30578- atomic_long_t tlb_dropin;
30579- atomic_long_t tlb_preload_page;
30580- atomic_long_t tlb_dropin_fail_no_asid;
30581- atomic_long_t tlb_dropin_fail_upm;
30582- atomic_long_t tlb_dropin_fail_invalid;
30583- atomic_long_t tlb_dropin_fail_range_active;
30584- atomic_long_t tlb_dropin_fail_idle;
30585- atomic_long_t tlb_dropin_fail_fmm;
30586- atomic_long_t tlb_dropin_fail_no_exception;
30587- atomic_long_t tfh_stale_on_fault;
30588- atomic_long_t mmu_invalidate_range;
30589- atomic_long_t mmu_invalidate_page;
30590- atomic_long_t flush_tlb;
30591- atomic_long_t flush_tlb_gru;
30592- atomic_long_t flush_tlb_gru_tgh;
30593- atomic_long_t flush_tlb_gru_zero_asid;
30594-
30595- atomic_long_t copy_gpa;
30596- atomic_long_t read_gpa;
30597-
30598- atomic_long_t mesq_receive;
30599- atomic_long_t mesq_receive_none;
30600- atomic_long_t mesq_send;
30601- atomic_long_t mesq_send_failed;
30602- atomic_long_t mesq_noop;
30603- atomic_long_t mesq_send_unexpected_error;
30604- atomic_long_t mesq_send_lb_overflow;
30605- atomic_long_t mesq_send_qlimit_reached;
30606- atomic_long_t mesq_send_amo_nacked;
30607- atomic_long_t mesq_send_put_nacked;
30608- atomic_long_t mesq_page_overflow;
30609- atomic_long_t mesq_qf_locked;
30610- atomic_long_t mesq_qf_noop_not_full;
30611- atomic_long_t mesq_qf_switch_head_failed;
30612- atomic_long_t mesq_qf_unexpected_error;
30613- atomic_long_t mesq_noop_unexpected_error;
30614- atomic_long_t mesq_noop_lb_overflow;
30615- atomic_long_t mesq_noop_qlimit_reached;
30616- atomic_long_t mesq_noop_amo_nacked;
30617- atomic_long_t mesq_noop_put_nacked;
30618- atomic_long_t mesq_noop_page_overflow;
30619+ atomic_long_unchecked_t vdata_alloc;
30620+ atomic_long_unchecked_t vdata_free;
30621+ atomic_long_unchecked_t gts_alloc;
30622+ atomic_long_unchecked_t gts_free;
30623+ atomic_long_unchecked_t gms_alloc;
30624+ atomic_long_unchecked_t gms_free;
30625+ atomic_long_unchecked_t gts_double_allocate;
30626+ atomic_long_unchecked_t assign_context;
30627+ atomic_long_unchecked_t assign_context_failed;
30628+ atomic_long_unchecked_t free_context;
30629+ atomic_long_unchecked_t load_user_context;
30630+ atomic_long_unchecked_t load_kernel_context;
30631+ atomic_long_unchecked_t lock_kernel_context;
30632+ atomic_long_unchecked_t unlock_kernel_context;
30633+ atomic_long_unchecked_t steal_user_context;
30634+ atomic_long_unchecked_t steal_kernel_context;
30635+ atomic_long_unchecked_t steal_context_failed;
30636+ atomic_long_unchecked_t nopfn;
30637+ atomic_long_unchecked_t asid_new;
30638+ atomic_long_unchecked_t asid_next;
30639+ atomic_long_unchecked_t asid_wrap;
30640+ atomic_long_unchecked_t asid_reuse;
30641+ atomic_long_unchecked_t intr;
30642+ atomic_long_unchecked_t intr_cbr;
30643+ atomic_long_unchecked_t intr_tfh;
30644+ atomic_long_unchecked_t intr_spurious;
30645+ atomic_long_unchecked_t intr_mm_lock_failed;
30646+ atomic_long_unchecked_t call_os;
30647+ atomic_long_unchecked_t call_os_wait_queue;
30648+ atomic_long_unchecked_t user_flush_tlb;
30649+ atomic_long_unchecked_t user_unload_context;
30650+ atomic_long_unchecked_t user_exception;
30651+ atomic_long_unchecked_t set_context_option;
30652+ atomic_long_unchecked_t check_context_retarget_intr;
30653+ atomic_long_unchecked_t check_context_unload;
30654+ atomic_long_unchecked_t tlb_dropin;
30655+ atomic_long_unchecked_t tlb_preload_page;
30656+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
30657+ atomic_long_unchecked_t tlb_dropin_fail_upm;
30658+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
30659+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
30660+ atomic_long_unchecked_t tlb_dropin_fail_idle;
30661+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
30662+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
30663+ atomic_long_unchecked_t tfh_stale_on_fault;
30664+ atomic_long_unchecked_t mmu_invalidate_range;
30665+ atomic_long_unchecked_t mmu_invalidate_page;
30666+ atomic_long_unchecked_t flush_tlb;
30667+ atomic_long_unchecked_t flush_tlb_gru;
30668+ atomic_long_unchecked_t flush_tlb_gru_tgh;
30669+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
30670+
30671+ atomic_long_unchecked_t copy_gpa;
30672+ atomic_long_unchecked_t read_gpa;
30673+
30674+ atomic_long_unchecked_t mesq_receive;
30675+ atomic_long_unchecked_t mesq_receive_none;
30676+ atomic_long_unchecked_t mesq_send;
30677+ atomic_long_unchecked_t mesq_send_failed;
30678+ atomic_long_unchecked_t mesq_noop;
30679+ atomic_long_unchecked_t mesq_send_unexpected_error;
30680+ atomic_long_unchecked_t mesq_send_lb_overflow;
30681+ atomic_long_unchecked_t mesq_send_qlimit_reached;
30682+ atomic_long_unchecked_t mesq_send_amo_nacked;
30683+ atomic_long_unchecked_t mesq_send_put_nacked;
30684+ atomic_long_unchecked_t mesq_page_overflow;
30685+ atomic_long_unchecked_t mesq_qf_locked;
30686+ atomic_long_unchecked_t mesq_qf_noop_not_full;
30687+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
30688+ atomic_long_unchecked_t mesq_qf_unexpected_error;
30689+ atomic_long_unchecked_t mesq_noop_unexpected_error;
30690+ atomic_long_unchecked_t mesq_noop_lb_overflow;
30691+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
30692+ atomic_long_unchecked_t mesq_noop_amo_nacked;
30693+ atomic_long_unchecked_t mesq_noop_put_nacked;
30694+ atomic_long_unchecked_t mesq_noop_page_overflow;
30695
30696 };
30697
30698@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
30699 tghop_invalidate, mcsop_last};
30700
30701 struct mcs_op_statistic {
30702- atomic_long_t count;
30703- atomic_long_t total;
30704+ atomic_long_unchecked_t count;
30705+ atomic_long_unchecked_t total;
30706 unsigned long max;
30707 };
30708
30709@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
30710
30711 #define STAT(id) do { \
30712 if (gru_options & OPT_STATS) \
30713- atomic_long_inc(&gru_stats.id); \
30714+ atomic_long_inc_unchecked(&gru_stats.id); \
30715 } while (0)
30716
30717 #ifdef CONFIG_SGI_GRU_DEBUG
30718diff -urNp linux-3.1.1/drivers/misc/sgi-xp/xpc.h linux-3.1.1/drivers/misc/sgi-xp/xpc.h
30719--- linux-3.1.1/drivers/misc/sgi-xp/xpc.h 2011-11-11 15:19:27.000000000 -0500
30720+++ linux-3.1.1/drivers/misc/sgi-xp/xpc.h 2011-11-16 18:39:07.000000000 -0500
30721@@ -835,6 +835,7 @@ struct xpc_arch_operations {
30722 void (*received_payload) (struct xpc_channel *, void *);
30723 void (*notify_senders_of_disconnect) (struct xpc_channel *);
30724 };
30725+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
30726
30727 /* struct xpc_partition act_state values (for XPC HB) */
30728
30729@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_regis
30730 /* found in xpc_main.c */
30731 extern struct device *xpc_part;
30732 extern struct device *xpc_chan;
30733-extern struct xpc_arch_operations xpc_arch_ops;
30734+extern xpc_arch_operations_no_const xpc_arch_ops;
30735 extern int xpc_disengage_timelimit;
30736 extern int xpc_disengage_timedout;
30737 extern int xpc_activate_IRQ_rcvd;
30738diff -urNp linux-3.1.1/drivers/misc/sgi-xp/xpc_main.c linux-3.1.1/drivers/misc/sgi-xp/xpc_main.c
30739--- linux-3.1.1/drivers/misc/sgi-xp/xpc_main.c 2011-11-11 15:19:27.000000000 -0500
30740+++ linux-3.1.1/drivers/misc/sgi-xp/xpc_main.c 2011-11-16 18:39:07.000000000 -0500
30741@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_not
30742 .notifier_call = xpc_system_die,
30743 };
30744
30745-struct xpc_arch_operations xpc_arch_ops;
30746+xpc_arch_operations_no_const xpc_arch_ops;
30747
30748 /*
30749 * Timer function to enforce the timelimit on the partition disengage.
30750diff -urNp linux-3.1.1/drivers/misc/sgi-xp/xp.h linux-3.1.1/drivers/misc/sgi-xp/xp.h
30751--- linux-3.1.1/drivers/misc/sgi-xp/xp.h 2011-11-11 15:19:27.000000000 -0500
30752+++ linux-3.1.1/drivers/misc/sgi-xp/xp.h 2011-11-16 18:39:07.000000000 -0500
30753@@ -289,7 +289,7 @@ struct xpc_interface {
30754 xpc_notify_func, void *);
30755 void (*received) (short, int, void *);
30756 enum xp_retval (*partid_to_nasids) (short, void *);
30757-};
30758+} __no_const;
30759
30760 extern struct xpc_interface xpc_interface;
30761
30762diff -urNp linux-3.1.1/drivers/mmc/host/sdhci-pci.c linux-3.1.1/drivers/mmc/host/sdhci-pci.c
30763--- linux-3.1.1/drivers/mmc/host/sdhci-pci.c 2011-11-11 15:19:27.000000000 -0500
30764+++ linux-3.1.1/drivers/mmc/host/sdhci-pci.c 2011-11-16 18:39:07.000000000 -0500
30765@@ -542,7 +542,7 @@ static const struct sdhci_pci_fixes sdhc
30766 .probe = via_probe,
30767 };
30768
30769-static const struct pci_device_id pci_ids[] __devinitdata = {
30770+static const struct pci_device_id pci_ids[] __devinitconst = {
30771 {
30772 .vendor = PCI_VENDOR_ID_RICOH,
30773 .device = PCI_DEVICE_ID_RICOH_R5C822,
30774diff -urNp linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0001.c linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0001.c
30775--- linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0001.c 2011-11-11 15:19:27.000000000 -0500
30776+++ linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0001.c 2011-11-16 18:40:10.000000000 -0500
30777@@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
30778 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
30779 unsigned long timeo = jiffies + HZ;
30780
30781+ pax_track_stack();
30782+
30783 /* Prevent setting state FL_SYNCING for chip in suspended state. */
30784 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
30785 goto sleep;
30786@@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(stru
30787 unsigned long initial_adr;
30788 int initial_len = len;
30789
30790+ pax_track_stack();
30791+
30792 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
30793 adr += chip->start;
30794 initial_adr = adr;
30795@@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(st
30796 int retries = 3;
30797 int ret;
30798
30799+ pax_track_stack();
30800+
30801 adr += chip->start;
30802
30803 retry:
30804diff -urNp linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0020.c linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0020.c
30805--- linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0020.c 2011-11-11 15:19:27.000000000 -0500
30806+++ linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0020.c 2011-11-16 18:40:10.000000000 -0500
30807@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
30808 unsigned long cmd_addr;
30809 struct cfi_private *cfi = map->fldrv_priv;
30810
30811+ pax_track_stack();
30812+
30813 adr += chip->start;
30814
30815 /* Ensure cmd read/writes are aligned. */
30816@@ -429,6 +431,8 @@ static inline int do_write_buffer(struct
30817 DECLARE_WAITQUEUE(wait, current);
30818 int wbufsize, z;
30819
30820+ pax_track_stack();
30821+
30822 /* M58LW064A requires bus alignment for buffer wriets -- saw */
30823 if (adr & (map_bankwidth(map)-1))
30824 return -EINVAL;
30825@@ -743,6 +747,8 @@ static inline int do_erase_oneblock(stru
30826 DECLARE_WAITQUEUE(wait, current);
30827 int ret = 0;
30828
30829+ pax_track_stack();
30830+
30831 adr += chip->start;
30832
30833 /* Let's determine this according to the interleave only once */
30834@@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struc
30835 unsigned long timeo = jiffies + HZ;
30836 DECLARE_WAITQUEUE(wait, current);
30837
30838+ pax_track_stack();
30839+
30840 adr += chip->start;
30841
30842 /* Let's determine this according to the interleave only once */
30843@@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(str
30844 unsigned long timeo = jiffies + HZ;
30845 DECLARE_WAITQUEUE(wait, current);
30846
30847+ pax_track_stack();
30848+
30849 adr += chip->start;
30850
30851 /* Let's determine this according to the interleave only once */
30852diff -urNp linux-3.1.1/drivers/mtd/devices/doc2000.c linux-3.1.1/drivers/mtd/devices/doc2000.c
30853--- linux-3.1.1/drivers/mtd/devices/doc2000.c 2011-11-11 15:19:27.000000000 -0500
30854+++ linux-3.1.1/drivers/mtd/devices/doc2000.c 2011-11-16 18:39:07.000000000 -0500
30855@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
30856
30857 /* The ECC will not be calculated correctly if less than 512 is written */
30858 /* DBB-
30859- if (len != 0x200 && eccbuf)
30860+ if (len != 0x200)
30861 printk(KERN_WARNING
30862 "ECC needs a full sector write (adr: %lx size %lx)\n",
30863 (long) to, (long) len);
30864diff -urNp linux-3.1.1/drivers/mtd/devices/doc2001.c linux-3.1.1/drivers/mtd/devices/doc2001.c
30865--- linux-3.1.1/drivers/mtd/devices/doc2001.c 2011-11-11 15:19:27.000000000 -0500
30866+++ linux-3.1.1/drivers/mtd/devices/doc2001.c 2011-11-16 18:39:07.000000000 -0500
30867@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
30868 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
30869
30870 /* Don't allow read past end of device */
30871- if (from >= this->totlen)
30872+ if (from >= this->totlen || !len)
30873 return -EINVAL;
30874
30875 /* Don't allow a single read to cross a 512-byte block boundary */
30876diff -urNp linux-3.1.1/drivers/mtd/ftl.c linux-3.1.1/drivers/mtd/ftl.c
30877--- linux-3.1.1/drivers/mtd/ftl.c 2011-11-11 15:19:27.000000000 -0500
30878+++ linux-3.1.1/drivers/mtd/ftl.c 2011-11-16 18:40:10.000000000 -0500
30879@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
30880 loff_t offset;
30881 uint16_t srcunitswap = cpu_to_le16(srcunit);
30882
30883+ pax_track_stack();
30884+
30885 eun = &part->EUNInfo[srcunit];
30886 xfer = &part->XferInfo[xferunit];
30887 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
30888diff -urNp linux-3.1.1/drivers/mtd/inftlcore.c linux-3.1.1/drivers/mtd/inftlcore.c
30889--- linux-3.1.1/drivers/mtd/inftlcore.c 2011-11-11 15:19:27.000000000 -0500
30890+++ linux-3.1.1/drivers/mtd/inftlcore.c 2011-11-16 18:40:10.000000000 -0500
30891@@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
30892 struct inftl_oob oob;
30893 size_t retlen;
30894
30895+ pax_track_stack();
30896+
30897 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
30898 "pending=%d)\n", inftl, thisVUC, pendingblock);
30899
30900diff -urNp linux-3.1.1/drivers/mtd/inftlmount.c linux-3.1.1/drivers/mtd/inftlmount.c
30901--- linux-3.1.1/drivers/mtd/inftlmount.c 2011-11-11 15:19:27.000000000 -0500
30902+++ linux-3.1.1/drivers/mtd/inftlmount.c 2011-11-16 18:40:10.000000000 -0500
30903@@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
30904 struct INFTLPartition *ip;
30905 size_t retlen;
30906
30907+ pax_track_stack();
30908+
30909 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
30910
30911 /*
30912diff -urNp linux-3.1.1/drivers/mtd/lpddr/qinfo_probe.c linux-3.1.1/drivers/mtd/lpddr/qinfo_probe.c
30913--- linux-3.1.1/drivers/mtd/lpddr/qinfo_probe.c 2011-11-11 15:19:27.000000000 -0500
30914+++ linux-3.1.1/drivers/mtd/lpddr/qinfo_probe.c 2011-11-16 18:40:10.000000000 -0500
30915@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
30916 {
30917 map_word pfow_val[4];
30918
30919+ pax_track_stack();
30920+
30921 /* Check identification string */
30922 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
30923 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
30924diff -urNp linux-3.1.1/drivers/mtd/mtdchar.c linux-3.1.1/drivers/mtd/mtdchar.c
30925--- linux-3.1.1/drivers/mtd/mtdchar.c 2011-11-11 15:19:27.000000000 -0500
30926+++ linux-3.1.1/drivers/mtd/mtdchar.c 2011-11-16 18:40:10.000000000 -0500
30927@@ -554,6 +554,8 @@ static int mtd_ioctl(struct file *file,
30928 u_long size;
30929 struct mtd_info_user info;
30930
30931+ pax_track_stack();
30932+
30933 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
30934
30935 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
30936diff -urNp linux-3.1.1/drivers/mtd/nand/denali.c linux-3.1.1/drivers/mtd/nand/denali.c
30937--- linux-3.1.1/drivers/mtd/nand/denali.c 2011-11-11 15:19:27.000000000 -0500
30938+++ linux-3.1.1/drivers/mtd/nand/denali.c 2011-11-16 18:39:07.000000000 -0500
30939@@ -26,6 +26,7 @@
30940 #include <linux/pci.h>
30941 #include <linux/mtd/mtd.h>
30942 #include <linux/module.h>
30943+#include <linux/slab.h>
30944
30945 #include "denali.h"
30946
30947diff -urNp linux-3.1.1/drivers/mtd/nftlcore.c linux-3.1.1/drivers/mtd/nftlcore.c
30948--- linux-3.1.1/drivers/mtd/nftlcore.c 2011-11-11 15:19:27.000000000 -0500
30949+++ linux-3.1.1/drivers/mtd/nftlcore.c 2011-11-16 18:40:10.000000000 -0500
30950@@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
30951 int inplace = 1;
30952 size_t retlen;
30953
30954+ pax_track_stack();
30955+
30956 memset(BlockMap, 0xff, sizeof(BlockMap));
30957 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
30958
30959diff -urNp linux-3.1.1/drivers/mtd/nftlmount.c linux-3.1.1/drivers/mtd/nftlmount.c
30960--- linux-3.1.1/drivers/mtd/nftlmount.c 2011-11-11 15:19:27.000000000 -0500
30961+++ linux-3.1.1/drivers/mtd/nftlmount.c 2011-11-16 18:40:10.000000000 -0500
30962@@ -24,6 +24,7 @@
30963 #include <asm/errno.h>
30964 #include <linux/delay.h>
30965 #include <linux/slab.h>
30966+#include <linux/sched.h>
30967 #include <linux/mtd/mtd.h>
30968 #include <linux/mtd/nand.h>
30969 #include <linux/mtd/nftl.h>
30970@@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
30971 struct mtd_info *mtd = nftl->mbd.mtd;
30972 unsigned int i;
30973
30974+ pax_track_stack();
30975+
30976 /* Assume logical EraseSize == physical erasesize for starting the scan.
30977 We'll sort it out later if we find a MediaHeader which says otherwise */
30978 /* Actually, we won't. The new DiskOnChip driver has already scanned
30979diff -urNp linux-3.1.1/drivers/mtd/ubi/build.c linux-3.1.1/drivers/mtd/ubi/build.c
30980--- linux-3.1.1/drivers/mtd/ubi/build.c 2011-11-11 15:19:27.000000000 -0500
30981+++ linux-3.1.1/drivers/mtd/ubi/build.c 2011-11-16 18:39:07.000000000 -0500
30982@@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
30983 static int __init bytes_str_to_int(const char *str)
30984 {
30985 char *endp;
30986- unsigned long result;
30987+ unsigned long result, scale = 1;
30988
30989 result = simple_strtoul(str, &endp, 0);
30990 if (str == endp || result >= INT_MAX) {
30991@@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const
30992
30993 switch (*endp) {
30994 case 'G':
30995- result *= 1024;
30996+ scale *= 1024;
30997 case 'M':
30998- result *= 1024;
30999+ scale *= 1024;
31000 case 'K':
31001- result *= 1024;
31002+ scale *= 1024;
31003 if (endp[1] == 'i' && endp[2] == 'B')
31004 endp += 2;
31005 case '\0':
31006@@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const
31007 return -EINVAL;
31008 }
31009
31010- return result;
31011+ if ((intoverflow_t)result*scale >= INT_MAX) {
31012+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
31013+ str);
31014+ return -EINVAL;
31015+ }
31016+
31017+ return result*scale;
31018 }
31019
31020 /**
31021diff -urNp linux-3.1.1/drivers/net/atlx/atl2.c linux-3.1.1/drivers/net/atlx/atl2.c
31022--- linux-3.1.1/drivers/net/atlx/atl2.c 2011-11-11 15:19:27.000000000 -0500
31023+++ linux-3.1.1/drivers/net/atlx/atl2.c 2011-11-16 18:39:07.000000000 -0500
31024@@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw
31025 */
31026
31027 #define ATL2_PARAM(X, desc) \
31028- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
31029+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
31030 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
31031 MODULE_PARM_DESC(X, desc);
31032 #else
31033diff -urNp linux-3.1.1/drivers/net/bna/bfa_ioc_ct.c linux-3.1.1/drivers/net/bna/bfa_ioc_ct.c
31034--- linux-3.1.1/drivers/net/bna/bfa_ioc_ct.c 2011-11-11 15:19:27.000000000 -0500
31035+++ linux-3.1.1/drivers/net/bna/bfa_ioc_ct.c 2011-11-16 18:39:07.000000000 -0500
31036@@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
31037 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
31038 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
31039
31040-static struct bfa_ioc_hwif nw_hwif_ct;
31041+static struct bfa_ioc_hwif nw_hwif_ct = {
31042+ .ioc_pll_init = bfa_ioc_ct_pll_init,
31043+ .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
31044+ .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
31045+ .ioc_reg_init = bfa_ioc_ct_reg_init,
31046+ .ioc_map_port = bfa_ioc_ct_map_port,
31047+ .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
31048+ .ioc_notify_fail = bfa_ioc_ct_notify_fail,
31049+ .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
31050+ .ioc_sync_start = bfa_ioc_ct_sync_start,
31051+ .ioc_sync_join = bfa_ioc_ct_sync_join,
31052+ .ioc_sync_leave = bfa_ioc_ct_sync_leave,
31053+ .ioc_sync_ack = bfa_ioc_ct_sync_ack,
31054+ .ioc_sync_complete = bfa_ioc_ct_sync_complete
31055+};
31056
31057 /**
31058 * Called from bfa_ioc_attach() to map asic specific calls.
31059@@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
31060 void
31061 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
31062 {
31063- nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
31064- nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
31065- nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
31066- nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
31067- nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
31068- nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
31069- nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
31070- nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
31071- nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
31072- nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
31073- nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
31074- nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
31075- nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
31076-
31077 ioc->ioc_hwif = &nw_hwif_ct;
31078 }
31079
31080diff -urNp linux-3.1.1/drivers/net/bna/bnad.c linux-3.1.1/drivers/net/bna/bnad.c
31081--- linux-3.1.1/drivers/net/bna/bnad.c 2011-11-11 15:19:27.000000000 -0500
31082+++ linux-3.1.1/drivers/net/bna/bnad.c 2011-11-16 18:39:07.000000000 -0500
31083@@ -1673,7 +1673,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
31084 struct bna_intr_info *intr_info =
31085 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
31086 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
31087- struct bna_tx_event_cbfn tx_cbfn;
31088+ static struct bna_tx_event_cbfn tx_cbfn = {
31089+ /* Initialize the tx event handlers */
31090+ .tcb_setup_cbfn = bnad_cb_tcb_setup,
31091+ .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
31092+ .tx_stall_cbfn = bnad_cb_tx_stall,
31093+ .tx_resume_cbfn = bnad_cb_tx_resume,
31094+ .tx_cleanup_cbfn = bnad_cb_tx_cleanup
31095+ };
31096 struct bna_tx *tx;
31097 unsigned long flags;
31098
31099@@ -1682,13 +1689,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
31100 tx_config->txq_depth = bnad->txq_depth;
31101 tx_config->tx_type = BNA_TX_T_REGULAR;
31102
31103- /* Initialize the tx event handlers */
31104- tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
31105- tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
31106- tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
31107- tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
31108- tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
31109-
31110 /* Get BNA's resource requirement for one tx object */
31111 spin_lock_irqsave(&bnad->bna_lock, flags);
31112 bna_tx_res_req(bnad->num_txq_per_tx,
31113@@ -1819,21 +1819,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
31114 struct bna_intr_info *intr_info =
31115 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
31116 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
31117- struct bna_rx_event_cbfn rx_cbfn;
31118+ static struct bna_rx_event_cbfn rx_cbfn = {
31119+ /* Initialize the Rx event handlers */
31120+ .rcb_setup_cbfn = bnad_cb_rcb_setup,
31121+ .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
31122+ .ccb_setup_cbfn = bnad_cb_ccb_setup,
31123+ .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
31124+ .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
31125+ .rx_post_cbfn = bnad_cb_rx_post
31126+ };
31127 struct bna_rx *rx;
31128 unsigned long flags;
31129
31130 /* Initialize the Rx object configuration */
31131 bnad_init_rx_config(bnad, rx_config);
31132
31133- /* Initialize the Rx event handlers */
31134- rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
31135- rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
31136- rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
31137- rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
31138- rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
31139- rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
31140-
31141 /* Get BNA's resource requirement for one Rx object */
31142 spin_lock_irqsave(&bnad->bna_lock, flags);
31143 bna_rx_res_req(rx_config, res_info);
31144diff -urNp linux-3.1.1/drivers/net/bnx2.c linux-3.1.1/drivers/net/bnx2.c
31145--- linux-3.1.1/drivers/net/bnx2.c 2011-11-11 15:19:27.000000000 -0500
31146+++ linux-3.1.1/drivers/net/bnx2.c 2011-11-16 18:40:11.000000000 -0500
31147@@ -5877,6 +5877,8 @@ bnx2_test_nvram(struct bnx2 *bp)
31148 int rc = 0;
31149 u32 magic, csum;
31150
31151+ pax_track_stack();
31152+
31153 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
31154 goto test_nvram_done;
31155
31156diff -urNp linux-3.1.1/drivers/net/bnx2x/bnx2x_ethtool.c linux-3.1.1/drivers/net/bnx2x/bnx2x_ethtool.c
31157--- linux-3.1.1/drivers/net/bnx2x/bnx2x_ethtool.c 2011-11-11 15:19:27.000000000 -0500
31158+++ linux-3.1.1/drivers/net/bnx2x/bnx2x_ethtool.c 2011-11-16 18:40:11.000000000 -0500
31159@@ -1943,6 +1943,8 @@ static int bnx2x_test_nvram(struct bnx2x
31160 int i, rc;
31161 u32 magic, crc;
31162
31163+ pax_track_stack();
31164+
31165 if (BP_NOMCP(bp))
31166 return 0;
31167
31168diff -urNp linux-3.1.1/drivers/net/bnx2x/bnx2x_sp.h linux-3.1.1/drivers/net/bnx2x/bnx2x_sp.h
31169--- linux-3.1.1/drivers/net/bnx2x/bnx2x_sp.h 2011-11-11 15:19:27.000000000 -0500
31170+++ linux-3.1.1/drivers/net/bnx2x/bnx2x_sp.h 2011-11-16 18:39:07.000000000 -0500
31171@@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj {
31172
31173 int (*wait_comp)(struct bnx2x *bp,
31174 struct bnx2x_rx_mode_ramrod_params *p);
31175-};
31176+} __no_const;
31177
31178 /********************** Set multicast group ***********************************/
31179
31180diff -urNp linux-3.1.1/drivers/net/cxgb3/l2t.h linux-3.1.1/drivers/net/cxgb3/l2t.h
31181--- linux-3.1.1/drivers/net/cxgb3/l2t.h 2011-11-11 15:19:27.000000000 -0500
31182+++ linux-3.1.1/drivers/net/cxgb3/l2t.h 2011-11-16 18:39:07.000000000 -0500
31183@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)
31184 */
31185 struct l2t_skb_cb {
31186 arp_failure_handler_func arp_failure_handler;
31187-};
31188+} __no_const;
31189
31190 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
31191
31192diff -urNp linux-3.1.1/drivers/net/cxgb4/cxgb4_main.c linux-3.1.1/drivers/net/cxgb4/cxgb4_main.c
31193--- linux-3.1.1/drivers/net/cxgb4/cxgb4_main.c 2011-11-11 15:19:27.000000000 -0500
31194+++ linux-3.1.1/drivers/net/cxgb4/cxgb4_main.c 2011-11-16 18:40:22.000000000 -0500
31195@@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct
31196 unsigned int nchan = adap->params.nports;
31197 struct msix_entry entries[MAX_INGQ + 1];
31198
31199+ pax_track_stack();
31200+
31201 for (i = 0; i < ARRAY_SIZE(entries); ++i)
31202 entries[i].entry = i;
31203
31204diff -urNp linux-3.1.1/drivers/net/cxgb4/t4_hw.c linux-3.1.1/drivers/net/cxgb4/t4_hw.c
31205--- linux-3.1.1/drivers/net/cxgb4/t4_hw.c 2011-11-11 15:19:27.000000000 -0500
31206+++ linux-3.1.1/drivers/net/cxgb4/t4_hw.c 2011-11-16 18:40:22.000000000 -0500
31207@@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
31208 u8 vpd[VPD_LEN], csum;
31209 unsigned int vpdr_len, kw_offset, id_len;
31210
31211+ pax_track_stack();
31212+
31213 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
31214 if (ret < 0)
31215 return ret;
31216diff -urNp linux-3.1.1/drivers/net/e1000e/82571.c linux-3.1.1/drivers/net/e1000e/82571.c
31217--- linux-3.1.1/drivers/net/e1000e/82571.c 2011-11-11 15:19:27.000000000 -0500
31218+++ linux-3.1.1/drivers/net/e1000e/82571.c 2011-11-16 18:39:07.000000000 -0500
31219@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
31220 {
31221 struct e1000_hw *hw = &adapter->hw;
31222 struct e1000_mac_info *mac = &hw->mac;
31223- struct e1000_mac_operations *func = &mac->ops;
31224+ e1000_mac_operations_no_const *func = &mac->ops;
31225 u32 swsm = 0;
31226 u32 swsm2 = 0;
31227 bool force_clear_smbi = false;
31228diff -urNp linux-3.1.1/drivers/net/e1000e/es2lan.c linux-3.1.1/drivers/net/e1000e/es2lan.c
31229--- linux-3.1.1/drivers/net/e1000e/es2lan.c 2011-11-11 15:19:27.000000000 -0500
31230+++ linux-3.1.1/drivers/net/e1000e/es2lan.c 2011-11-16 18:39:07.000000000 -0500
31231@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
31232 {
31233 struct e1000_hw *hw = &adapter->hw;
31234 struct e1000_mac_info *mac = &hw->mac;
31235- struct e1000_mac_operations *func = &mac->ops;
31236+ e1000_mac_operations_no_const *func = &mac->ops;
31237
31238 /* Set media type */
31239 switch (adapter->pdev->device) {
31240diff -urNp linux-3.1.1/drivers/net/e1000e/hw.h linux-3.1.1/drivers/net/e1000e/hw.h
31241--- linux-3.1.1/drivers/net/e1000e/hw.h 2011-11-11 15:19:27.000000000 -0500
31242+++ linux-3.1.1/drivers/net/e1000e/hw.h 2011-11-16 18:39:07.000000000 -0500
31243@@ -778,6 +778,7 @@ struct e1000_mac_operations {
31244 void (*write_vfta)(struct e1000_hw *, u32, u32);
31245 s32 (*read_mac_addr)(struct e1000_hw *);
31246 };
31247+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
31248
31249 /*
31250 * When to use various PHY register access functions:
31251@@ -818,6 +819,7 @@ struct e1000_phy_operations {
31252 void (*power_up)(struct e1000_hw *);
31253 void (*power_down)(struct e1000_hw *);
31254 };
31255+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
31256
31257 /* Function pointers for the NVM. */
31258 struct e1000_nvm_operations {
31259@@ -829,9 +831,10 @@ struct e1000_nvm_operations {
31260 s32 (*validate)(struct e1000_hw *);
31261 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
31262 };
31263+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
31264
31265 struct e1000_mac_info {
31266- struct e1000_mac_operations ops;
31267+ e1000_mac_operations_no_const ops;
31268 u8 addr[ETH_ALEN];
31269 u8 perm_addr[ETH_ALEN];
31270
31271@@ -872,7 +875,7 @@ struct e1000_mac_info {
31272 };
31273
31274 struct e1000_phy_info {
31275- struct e1000_phy_operations ops;
31276+ e1000_phy_operations_no_const ops;
31277
31278 enum e1000_phy_type type;
31279
31280@@ -906,7 +909,7 @@ struct e1000_phy_info {
31281 };
31282
31283 struct e1000_nvm_info {
31284- struct e1000_nvm_operations ops;
31285+ e1000_nvm_operations_no_const ops;
31286
31287 enum e1000_nvm_type type;
31288 enum e1000_nvm_override override;
31289diff -urNp linux-3.1.1/drivers/net/fealnx.c linux-3.1.1/drivers/net/fealnx.c
31290--- linux-3.1.1/drivers/net/fealnx.c 2011-11-11 15:19:27.000000000 -0500
31291+++ linux-3.1.1/drivers/net/fealnx.c 2011-11-16 18:39:07.000000000 -0500
31292@@ -150,7 +150,7 @@ struct chip_info {
31293 int flags;
31294 };
31295
31296-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
31297+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
31298 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
31299 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
31300 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
31301diff -urNp linux-3.1.1/drivers/net/hamradio/6pack.c linux-3.1.1/drivers/net/hamradio/6pack.c
31302--- linux-3.1.1/drivers/net/hamradio/6pack.c 2011-11-11 15:19:27.000000000 -0500
31303+++ linux-3.1.1/drivers/net/hamradio/6pack.c 2011-11-16 18:40:22.000000000 -0500
31304@@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
31305 unsigned char buf[512];
31306 int count1;
31307
31308+ pax_track_stack();
31309+
31310 if (!count)
31311 return;
31312
31313diff -urNp linux-3.1.1/drivers/net/igb/e1000_hw.h linux-3.1.1/drivers/net/igb/e1000_hw.h
31314--- linux-3.1.1/drivers/net/igb/e1000_hw.h 2011-11-11 15:19:27.000000000 -0500
31315+++ linux-3.1.1/drivers/net/igb/e1000_hw.h 2011-11-16 18:39:07.000000000 -0500
31316@@ -314,6 +314,7 @@ struct e1000_mac_operations {
31317 s32 (*read_mac_addr)(struct e1000_hw *);
31318 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
31319 };
31320+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
31321
31322 struct e1000_phy_operations {
31323 s32 (*acquire)(struct e1000_hw *);
31324@@ -330,6 +331,7 @@ struct e1000_phy_operations {
31325 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
31326 s32 (*write_reg)(struct e1000_hw *, u32, u16);
31327 };
31328+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
31329
31330 struct e1000_nvm_operations {
31331 s32 (*acquire)(struct e1000_hw *);
31332@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
31333 s32 (*update)(struct e1000_hw *);
31334 s32 (*validate)(struct e1000_hw *);
31335 };
31336+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
31337
31338 struct e1000_info {
31339 s32 (*get_invariants)(struct e1000_hw *);
31340@@ -350,7 +353,7 @@ struct e1000_info {
31341 extern const struct e1000_info e1000_82575_info;
31342
31343 struct e1000_mac_info {
31344- struct e1000_mac_operations ops;
31345+ e1000_mac_operations_no_const ops;
31346
31347 u8 addr[6];
31348 u8 perm_addr[6];
31349@@ -388,7 +391,7 @@ struct e1000_mac_info {
31350 };
31351
31352 struct e1000_phy_info {
31353- struct e1000_phy_operations ops;
31354+ e1000_phy_operations_no_const ops;
31355
31356 enum e1000_phy_type type;
31357
31358@@ -423,7 +426,7 @@ struct e1000_phy_info {
31359 };
31360
31361 struct e1000_nvm_info {
31362- struct e1000_nvm_operations ops;
31363+ e1000_nvm_operations_no_const ops;
31364 enum e1000_nvm_type type;
31365 enum e1000_nvm_override override;
31366
31367@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
31368 s32 (*check_for_ack)(struct e1000_hw *, u16);
31369 s32 (*check_for_rst)(struct e1000_hw *, u16);
31370 };
31371+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
31372
31373 struct e1000_mbx_stats {
31374 u32 msgs_tx;
31375@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
31376 };
31377
31378 struct e1000_mbx_info {
31379- struct e1000_mbx_operations ops;
31380+ e1000_mbx_operations_no_const ops;
31381 struct e1000_mbx_stats stats;
31382 u32 timeout;
31383 u32 usec_delay;
31384diff -urNp linux-3.1.1/drivers/net/igbvf/vf.h linux-3.1.1/drivers/net/igbvf/vf.h
31385--- linux-3.1.1/drivers/net/igbvf/vf.h 2011-11-11 15:19:27.000000000 -0500
31386+++ linux-3.1.1/drivers/net/igbvf/vf.h 2011-11-16 18:39:07.000000000 -0500
31387@@ -189,9 +189,10 @@ struct e1000_mac_operations {
31388 s32 (*read_mac_addr)(struct e1000_hw *);
31389 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
31390 };
31391+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
31392
31393 struct e1000_mac_info {
31394- struct e1000_mac_operations ops;
31395+ e1000_mac_operations_no_const ops;
31396 u8 addr[6];
31397 u8 perm_addr[6];
31398
31399@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
31400 s32 (*check_for_ack)(struct e1000_hw *);
31401 s32 (*check_for_rst)(struct e1000_hw *);
31402 };
31403+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
31404
31405 struct e1000_mbx_stats {
31406 u32 msgs_tx;
31407@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
31408 };
31409
31410 struct e1000_mbx_info {
31411- struct e1000_mbx_operations ops;
31412+ e1000_mbx_operations_no_const ops;
31413 struct e1000_mbx_stats stats;
31414 u32 timeout;
31415 u32 usec_delay;
31416diff -urNp linux-3.1.1/drivers/net/ixgb/ixgb_main.c linux-3.1.1/drivers/net/ixgb/ixgb_main.c
31417--- linux-3.1.1/drivers/net/ixgb/ixgb_main.c 2011-11-11 15:19:27.000000000 -0500
31418+++ linux-3.1.1/drivers/net/ixgb/ixgb_main.c 2011-11-16 18:40:22.000000000 -0500
31419@@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev
31420 u32 rctl;
31421 int i;
31422
31423+ pax_track_stack();
31424+
31425 /* Check for Promiscuous and All Multicast modes */
31426
31427 rctl = IXGB_READ_REG(hw, RCTL);
31428diff -urNp linux-3.1.1/drivers/net/ixgb/ixgb_param.c linux-3.1.1/drivers/net/ixgb/ixgb_param.c
31429--- linux-3.1.1/drivers/net/ixgb/ixgb_param.c 2011-11-11 15:19:27.000000000 -0500
31430+++ linux-3.1.1/drivers/net/ixgb/ixgb_param.c 2011-11-16 18:40:22.000000000 -0500
31431@@ -261,6 +261,9 @@ void __devinit
31432 ixgb_check_options(struct ixgb_adapter *adapter)
31433 {
31434 int bd = adapter->bd_number;
31435+
31436+ pax_track_stack();
31437+
31438 if (bd >= IXGB_MAX_NIC) {
31439 pr_notice("Warning: no configuration for board #%i\n", bd);
31440 pr_notice("Using defaults for all values\n");
31441diff -urNp linux-3.1.1/drivers/net/ixgbe/ixgbe_type.h linux-3.1.1/drivers/net/ixgbe/ixgbe_type.h
31442--- linux-3.1.1/drivers/net/ixgbe/ixgbe_type.h 2011-11-11 15:19:27.000000000 -0500
31443+++ linux-3.1.1/drivers/net/ixgbe/ixgbe_type.h 2011-11-16 18:39:07.000000000 -0500
31444@@ -2642,6 +2642,7 @@ struct ixgbe_eeprom_operations {
31445 s32 (*update_checksum)(struct ixgbe_hw *);
31446 u16 (*calc_checksum)(struct ixgbe_hw *);
31447 };
31448+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
31449
31450 struct ixgbe_mac_operations {
31451 s32 (*init_hw)(struct ixgbe_hw *);
31452@@ -2703,6 +2704,7 @@ struct ixgbe_mac_operations {
31453 /* Manageability interface */
31454 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
31455 };
31456+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
31457
31458 struct ixgbe_phy_operations {
31459 s32 (*identify)(struct ixgbe_hw *);
31460@@ -2722,9 +2724,10 @@ struct ixgbe_phy_operations {
31461 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
31462 s32 (*check_overtemp)(struct ixgbe_hw *);
31463 };
31464+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
31465
31466 struct ixgbe_eeprom_info {
31467- struct ixgbe_eeprom_operations ops;
31468+ ixgbe_eeprom_operations_no_const ops;
31469 enum ixgbe_eeprom_type type;
31470 u32 semaphore_delay;
31471 u16 word_size;
31472@@ -2734,7 +2737,7 @@ struct ixgbe_eeprom_info {
31473
31474 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
31475 struct ixgbe_mac_info {
31476- struct ixgbe_mac_operations ops;
31477+ ixgbe_mac_operations_no_const ops;
31478 enum ixgbe_mac_type type;
31479 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
31480 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
31481@@ -2762,7 +2765,7 @@ struct ixgbe_mac_info {
31482 };
31483
31484 struct ixgbe_phy_info {
31485- struct ixgbe_phy_operations ops;
31486+ ixgbe_phy_operations_no_const ops;
31487 struct mdio_if_info mdio;
31488 enum ixgbe_phy_type type;
31489 u32 id;
31490@@ -2790,6 +2793,7 @@ struct ixgbe_mbx_operations {
31491 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
31492 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
31493 };
31494+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
31495
31496 struct ixgbe_mbx_stats {
31497 u32 msgs_tx;
31498@@ -2801,7 +2805,7 @@ struct ixgbe_mbx_stats {
31499 };
31500
31501 struct ixgbe_mbx_info {
31502- struct ixgbe_mbx_operations ops;
31503+ ixgbe_mbx_operations_no_const ops;
31504 struct ixgbe_mbx_stats stats;
31505 u32 timeout;
31506 u32 usec_delay;
31507diff -urNp linux-3.1.1/drivers/net/ixgbevf/vf.h linux-3.1.1/drivers/net/ixgbevf/vf.h
31508--- linux-3.1.1/drivers/net/ixgbevf/vf.h 2011-11-11 15:19:27.000000000 -0500
31509+++ linux-3.1.1/drivers/net/ixgbevf/vf.h 2011-11-16 18:39:07.000000000 -0500
31510@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
31511 s32 (*clear_vfta)(struct ixgbe_hw *);
31512 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
31513 };
31514+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
31515
31516 enum ixgbe_mac_type {
31517 ixgbe_mac_unknown = 0,
31518@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
31519 };
31520
31521 struct ixgbe_mac_info {
31522- struct ixgbe_mac_operations ops;
31523+ ixgbe_mac_operations_no_const ops;
31524 u8 addr[6];
31525 u8 perm_addr[6];
31526
31527@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
31528 s32 (*check_for_ack)(struct ixgbe_hw *);
31529 s32 (*check_for_rst)(struct ixgbe_hw *);
31530 };
31531+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
31532
31533 struct ixgbe_mbx_stats {
31534 u32 msgs_tx;
31535@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
31536 };
31537
31538 struct ixgbe_mbx_info {
31539- struct ixgbe_mbx_operations ops;
31540+ ixgbe_mbx_operations_no_const ops;
31541 struct ixgbe_mbx_stats stats;
31542 u32 timeout;
31543 u32 udelay;
31544diff -urNp linux-3.1.1/drivers/net/ksz884x.c linux-3.1.1/drivers/net/ksz884x.c
31545--- linux-3.1.1/drivers/net/ksz884x.c 2011-11-11 15:19:27.000000000 -0500
31546+++ linux-3.1.1/drivers/net/ksz884x.c 2011-11-16 18:40:22.000000000 -0500
31547@@ -6533,6 +6533,8 @@ static void netdev_get_ethtool_stats(str
31548 int rc;
31549 u64 counter[TOTAL_PORT_COUNTER_NUM];
31550
31551+ pax_track_stack();
31552+
31553 mutex_lock(&hw_priv->lock);
31554 n = SWITCH_PORT_NUM;
31555 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
31556diff -urNp linux-3.1.1/drivers/net/mlx4/main.c linux-3.1.1/drivers/net/mlx4/main.c
31557--- linux-3.1.1/drivers/net/mlx4/main.c 2011-11-11 15:19:27.000000000 -0500
31558+++ linux-3.1.1/drivers/net/mlx4/main.c 2011-11-16 18:40:22.000000000 -0500
31559@@ -40,6 +40,7 @@
31560 #include <linux/dma-mapping.h>
31561 #include <linux/slab.h>
31562 #include <linux/io-mapping.h>
31563+#include <linux/sched.h>
31564
31565 #include <linux/mlx4/device.h>
31566 #include <linux/mlx4/doorbell.h>
31567@@ -762,6 +763,8 @@ static int mlx4_init_hca(struct mlx4_dev
31568 u64 icm_size;
31569 int err;
31570
31571+ pax_track_stack();
31572+
31573 err = mlx4_QUERY_FW(dev);
31574 if (err) {
31575 if (err == -EACCES)
31576diff -urNp linux-3.1.1/drivers/net/niu.c linux-3.1.1/drivers/net/niu.c
31577--- linux-3.1.1/drivers/net/niu.c 2011-11-11 15:19:27.000000000 -0500
31578+++ linux-3.1.1/drivers/net/niu.c 2011-11-16 18:40:22.000000000 -0500
31579@@ -9061,6 +9061,8 @@ static void __devinit niu_try_msix(struc
31580 int i, num_irqs, err;
31581 u8 first_ldg;
31582
31583+ pax_track_stack();
31584+
31585 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
31586 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
31587 ldg_num_map[i] = first_ldg + i;
31588diff -urNp linux-3.1.1/drivers/net/pcnet32.c linux-3.1.1/drivers/net/pcnet32.c
31589--- linux-3.1.1/drivers/net/pcnet32.c 2011-11-11 15:19:27.000000000 -0500
31590+++ linux-3.1.1/drivers/net/pcnet32.c 2011-11-16 18:39:07.000000000 -0500
31591@@ -270,7 +270,7 @@ struct pcnet32_private {
31592 struct sk_buff **rx_skbuff;
31593 dma_addr_t *tx_dma_addr;
31594 dma_addr_t *rx_dma_addr;
31595- struct pcnet32_access a;
31596+ struct pcnet32_access *a;
31597 spinlock_t lock; /* Guard lock */
31598 unsigned int cur_rx, cur_tx; /* The next free ring entry */
31599 unsigned int rx_ring_size; /* current rx ring size */
31600@@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
31601 u16 val;
31602
31603 netif_wake_queue(dev);
31604- val = lp->a.read_csr(ioaddr, CSR3);
31605+ val = lp->a->read_csr(ioaddr, CSR3);
31606 val &= 0x00ff;
31607- lp->a.write_csr(ioaddr, CSR3, val);
31608+ lp->a->write_csr(ioaddr, CSR3, val);
31609 napi_enable(&lp->napi);
31610 }
31611
31612@@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
31613 r = mii_link_ok(&lp->mii_if);
31614 } else if (lp->chip_version >= PCNET32_79C970A) {
31615 ulong ioaddr = dev->base_addr; /* card base I/O address */
31616- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
31617+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
31618 } else { /* can not detect link on really old chips */
31619 r = 1;
31620 }
31621@@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
31622 pcnet32_netif_stop(dev);
31623
31624 spin_lock_irqsave(&lp->lock, flags);
31625- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31626+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31627
31628 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
31629
31630@@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
31631 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
31632 {
31633 struct pcnet32_private *lp = netdev_priv(dev);
31634- struct pcnet32_access *a = &lp->a; /* access to registers */
31635+ struct pcnet32_access *a = lp->a; /* access to registers */
31636 ulong ioaddr = dev->base_addr; /* card base I/O address */
31637 struct sk_buff *skb; /* sk buff */
31638 int x, i; /* counters */
31639@@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
31640 pcnet32_netif_stop(dev);
31641
31642 spin_lock_irqsave(&lp->lock, flags);
31643- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31644+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31645
31646 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
31647
31648 /* Reset the PCNET32 */
31649- lp->a.reset(ioaddr);
31650- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31651+ lp->a->reset(ioaddr);
31652+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31653
31654 /* switch pcnet32 to 32bit mode */
31655- lp->a.write_bcr(ioaddr, 20, 2);
31656+ lp->a->write_bcr(ioaddr, 20, 2);
31657
31658 /* purge & init rings but don't actually restart */
31659 pcnet32_restart(dev, 0x0000);
31660
31661- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31662+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31663
31664 /* Initialize Transmit buffers. */
31665 size = data_len + 15;
31666@@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
31667
31668 /* set int loopback in CSR15 */
31669 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
31670- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
31671+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
31672
31673 teststatus = cpu_to_le16(0x8000);
31674- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
31675+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
31676
31677 /* Check status of descriptors */
31678 for (x = 0; x < numbuffs; x++) {
31679@@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
31680 }
31681 }
31682
31683- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31684+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31685 wmb();
31686 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
31687 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
31688@@ -1015,7 +1015,7 @@ clean_up:
31689 pcnet32_restart(dev, CSR0_NORMAL);
31690 } else {
31691 pcnet32_purge_rx_ring(dev);
31692- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
31693+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
31694 }
31695 spin_unlock_irqrestore(&lp->lock, flags);
31696
31697@@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct ne
31698 enum ethtool_phys_id_state state)
31699 {
31700 struct pcnet32_private *lp = netdev_priv(dev);
31701- struct pcnet32_access *a = &lp->a;
31702+ struct pcnet32_access *a = lp->a;
31703 ulong ioaddr = dev->base_addr;
31704 unsigned long flags;
31705 int i;
31706@@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_de
31707 {
31708 int csr5;
31709 struct pcnet32_private *lp = netdev_priv(dev);
31710- struct pcnet32_access *a = &lp->a;
31711+ struct pcnet32_access *a = lp->a;
31712 ulong ioaddr = dev->base_addr;
31713 int ticks;
31714
31715@@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_stru
31716 spin_lock_irqsave(&lp->lock, flags);
31717 if (pcnet32_tx(dev)) {
31718 /* reset the chip to clear the error condition, then restart */
31719- lp->a.reset(ioaddr);
31720- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31721+ lp->a->reset(ioaddr);
31722+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31723 pcnet32_restart(dev, CSR0_START);
31724 netif_wake_queue(dev);
31725 }
31726@@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_stru
31727 __napi_complete(napi);
31728
31729 /* clear interrupt masks */
31730- val = lp->a.read_csr(ioaddr, CSR3);
31731+ val = lp->a->read_csr(ioaddr, CSR3);
31732 val &= 0x00ff;
31733- lp->a.write_csr(ioaddr, CSR3, val);
31734+ lp->a->write_csr(ioaddr, CSR3, val);
31735
31736 /* Set interrupt enable. */
31737- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
31738+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
31739
31740 spin_unlock_irqrestore(&lp->lock, flags);
31741 }
31742@@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_
31743 int i, csr0;
31744 u16 *buff = ptr;
31745 struct pcnet32_private *lp = netdev_priv(dev);
31746- struct pcnet32_access *a = &lp->a;
31747+ struct pcnet32_access *a = lp->a;
31748 ulong ioaddr = dev->base_addr;
31749 unsigned long flags;
31750
31751@@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_
31752 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
31753 if (lp->phymask & (1 << j)) {
31754 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
31755- lp->a.write_bcr(ioaddr, 33,
31756+ lp->a->write_bcr(ioaddr, 33,
31757 (j << 5) | i);
31758- *buff++ = lp->a.read_bcr(ioaddr, 34);
31759+ *buff++ = lp->a->read_bcr(ioaddr, 34);
31760 }
31761 }
31762 }
31763@@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31764 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
31765 lp->options |= PCNET32_PORT_FD;
31766
31767- lp->a = *a;
31768+ lp->a = a;
31769
31770 /* prior to register_netdev, dev->name is not yet correct */
31771 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
31772@@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31773 if (lp->mii) {
31774 /* lp->phycount and lp->phymask are set to 0 by memset above */
31775
31776- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
31777+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
31778 /* scan for PHYs */
31779 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
31780 unsigned short id1, id2;
31781@@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31782 pr_info("Found PHY %04x:%04x at address %d\n",
31783 id1, id2, i);
31784 }
31785- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
31786+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
31787 if (lp->phycount > 1)
31788 lp->options |= PCNET32_PORT_MII;
31789 }
31790@@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_devic
31791 }
31792
31793 /* Reset the PCNET32 */
31794- lp->a.reset(ioaddr);
31795+ lp->a->reset(ioaddr);
31796
31797 /* switch pcnet32 to 32bit mode */
31798- lp->a.write_bcr(ioaddr, 20, 2);
31799+ lp->a->write_bcr(ioaddr, 20, 2);
31800
31801 netif_printk(lp, ifup, KERN_DEBUG, dev,
31802 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
31803@@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_devic
31804 (u32) (lp->init_dma_addr));
31805
31806 /* set/reset autoselect bit */
31807- val = lp->a.read_bcr(ioaddr, 2) & ~2;
31808+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
31809 if (lp->options & PCNET32_PORT_ASEL)
31810 val |= 2;
31811- lp->a.write_bcr(ioaddr, 2, val);
31812+ lp->a->write_bcr(ioaddr, 2, val);
31813
31814 /* handle full duplex setting */
31815 if (lp->mii_if.full_duplex) {
31816- val = lp->a.read_bcr(ioaddr, 9) & ~3;
31817+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
31818 if (lp->options & PCNET32_PORT_FD) {
31819 val |= 1;
31820 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
31821@@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_devic
31822 if (lp->chip_version == 0x2627)
31823 val |= 3;
31824 }
31825- lp->a.write_bcr(ioaddr, 9, val);
31826+ lp->a->write_bcr(ioaddr, 9, val);
31827 }
31828
31829 /* set/reset GPSI bit in test register */
31830- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
31831+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
31832 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
31833 val |= 0x10;
31834- lp->a.write_csr(ioaddr, 124, val);
31835+ lp->a->write_csr(ioaddr, 124, val);
31836
31837 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
31838 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
31839@@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_devic
31840 * duplex, and/or enable auto negotiation, and clear DANAS
31841 */
31842 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
31843- lp->a.write_bcr(ioaddr, 32,
31844- lp->a.read_bcr(ioaddr, 32) | 0x0080);
31845+ lp->a->write_bcr(ioaddr, 32,
31846+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
31847 /* disable Auto Negotiation, set 10Mpbs, HD */
31848- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
31849+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
31850 if (lp->options & PCNET32_PORT_FD)
31851 val |= 0x10;
31852 if (lp->options & PCNET32_PORT_100)
31853 val |= 0x08;
31854- lp->a.write_bcr(ioaddr, 32, val);
31855+ lp->a->write_bcr(ioaddr, 32, val);
31856 } else {
31857 if (lp->options & PCNET32_PORT_ASEL) {
31858- lp->a.write_bcr(ioaddr, 32,
31859- lp->a.read_bcr(ioaddr,
31860+ lp->a->write_bcr(ioaddr, 32,
31861+ lp->a->read_bcr(ioaddr,
31862 32) | 0x0080);
31863 /* enable auto negotiate, setup, disable fd */
31864- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
31865+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
31866 val |= 0x20;
31867- lp->a.write_bcr(ioaddr, 32, val);
31868+ lp->a->write_bcr(ioaddr, 32, val);
31869 }
31870 }
31871 } else {
31872@@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_devic
31873 * There is really no good other way to handle multiple PHYs
31874 * other than turning off all automatics
31875 */
31876- val = lp->a.read_bcr(ioaddr, 2);
31877- lp->a.write_bcr(ioaddr, 2, val & ~2);
31878- val = lp->a.read_bcr(ioaddr, 32);
31879- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
31880+ val = lp->a->read_bcr(ioaddr, 2);
31881+ lp->a->write_bcr(ioaddr, 2, val & ~2);
31882+ val = lp->a->read_bcr(ioaddr, 32);
31883+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
31884
31885 if (!(lp->options & PCNET32_PORT_ASEL)) {
31886 /* setup ecmd */
31887@@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_devic
31888 ethtool_cmd_speed_set(&ecmd,
31889 (lp->options & PCNET32_PORT_100) ?
31890 SPEED_100 : SPEED_10);
31891- bcr9 = lp->a.read_bcr(ioaddr, 9);
31892+ bcr9 = lp->a->read_bcr(ioaddr, 9);
31893
31894 if (lp->options & PCNET32_PORT_FD) {
31895 ecmd.duplex = DUPLEX_FULL;
31896@@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_devic
31897 ecmd.duplex = DUPLEX_HALF;
31898 bcr9 |= ~(1 << 0);
31899 }
31900- lp->a.write_bcr(ioaddr, 9, bcr9);
31901+ lp->a->write_bcr(ioaddr, 9, bcr9);
31902 }
31903
31904 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
31905@@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_devic
31906
31907 #ifdef DO_DXSUFLO
31908 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
31909- val = lp->a.read_csr(ioaddr, CSR3);
31910+ val = lp->a->read_csr(ioaddr, CSR3);
31911 val |= 0x40;
31912- lp->a.write_csr(ioaddr, CSR3, val);
31913+ lp->a->write_csr(ioaddr, CSR3, val);
31914 }
31915 #endif
31916
31917@@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_devic
31918 napi_enable(&lp->napi);
31919
31920 /* Re-initialize the PCNET32, and start it when done. */
31921- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
31922- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
31923+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
31924+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
31925
31926- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31927- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
31928+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31929+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
31930
31931 netif_start_queue(dev);
31932
31933@@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_devic
31934
31935 i = 0;
31936 while (i++ < 100)
31937- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
31938+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
31939 break;
31940 /*
31941 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
31942 * reports that doing so triggers a bug in the '974.
31943 */
31944- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
31945+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
31946
31947 netif_printk(lp, ifup, KERN_DEBUG, dev,
31948 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
31949 i,
31950 (u32) (lp->init_dma_addr),
31951- lp->a.read_csr(ioaddr, CSR0));
31952+ lp->a->read_csr(ioaddr, CSR0));
31953
31954 spin_unlock_irqrestore(&lp->lock, flags);
31955
31956@@ -2218,7 +2218,7 @@ err_free_ring:
31957 * Switch back to 16bit mode to avoid problems with dumb
31958 * DOS packet driver after a warm reboot
31959 */
31960- lp->a.write_bcr(ioaddr, 20, 4);
31961+ lp->a->write_bcr(ioaddr, 20, 4);
31962
31963 err_free_irq:
31964 spin_unlock_irqrestore(&lp->lock, flags);
31965@@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_d
31966
31967 /* wait for stop */
31968 for (i = 0; i < 100; i++)
31969- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
31970+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
31971 break;
31972
31973 if (i >= 100)
31974@@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_d
31975 return;
31976
31977 /* ReInit Ring */
31978- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
31979+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
31980 i = 0;
31981 while (i++ < 1000)
31982- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
31983+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
31984 break;
31985
31986- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
31987+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
31988 }
31989
31990 static void pcnet32_tx_timeout(struct net_device *dev)
31991@@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct ne
31992 /* Transmitter timeout, serious problems. */
31993 if (pcnet32_debug & NETIF_MSG_DRV)
31994 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
31995- dev->name, lp->a.read_csr(ioaddr, CSR0));
31996- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
31997+ dev->name, lp->a->read_csr(ioaddr, CSR0));
31998+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
31999 dev->stats.tx_errors++;
32000 if (netif_msg_tx_err(lp)) {
32001 int i;
32002@@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(st
32003
32004 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
32005 "%s() called, csr0 %4.4x\n",
32006- __func__, lp->a.read_csr(ioaddr, CSR0));
32007+ __func__, lp->a->read_csr(ioaddr, CSR0));
32008
32009 /* Default status -- will not enable Successful-TxDone
32010 * interrupt when that option is available to us.
32011@@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(st
32012 dev->stats.tx_bytes += skb->len;
32013
32014 /* Trigger an immediate send poll. */
32015- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
32016+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
32017
32018 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
32019 lp->tx_full = 1;
32020@@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
32021
32022 spin_lock(&lp->lock);
32023
32024- csr0 = lp->a.read_csr(ioaddr, CSR0);
32025+ csr0 = lp->a->read_csr(ioaddr, CSR0);
32026 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
32027 if (csr0 == 0xffff)
32028 break; /* PCMCIA remove happened */
32029 /* Acknowledge all of the current interrupt sources ASAP. */
32030- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
32031+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
32032
32033 netif_printk(lp, intr, KERN_DEBUG, dev,
32034 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
32035- csr0, lp->a.read_csr(ioaddr, CSR0));
32036+ csr0, lp->a->read_csr(ioaddr, CSR0));
32037
32038 /* Log misc errors. */
32039 if (csr0 & 0x4000)
32040@@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
32041 if (napi_schedule_prep(&lp->napi)) {
32042 u16 val;
32043 /* set interrupt masks */
32044- val = lp->a.read_csr(ioaddr, CSR3);
32045+ val = lp->a->read_csr(ioaddr, CSR3);
32046 val |= 0x5f00;
32047- lp->a.write_csr(ioaddr, CSR3, val);
32048+ lp->a->write_csr(ioaddr, CSR3, val);
32049
32050 __napi_schedule(&lp->napi);
32051 break;
32052 }
32053- csr0 = lp->a.read_csr(ioaddr, CSR0);
32054+ csr0 = lp->a->read_csr(ioaddr, CSR0);
32055 }
32056
32057 netif_printk(lp, intr, KERN_DEBUG, dev,
32058 "exiting interrupt, csr0=%#4.4x\n",
32059- lp->a.read_csr(ioaddr, CSR0));
32060+ lp->a->read_csr(ioaddr, CSR0));
32061
32062 spin_unlock(&lp->lock);
32063
32064@@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_devi
32065
32066 spin_lock_irqsave(&lp->lock, flags);
32067
32068- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
32069+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
32070
32071 netif_printk(lp, ifdown, KERN_DEBUG, dev,
32072 "Shutting down ethercard, status was %2.2x\n",
32073- lp->a.read_csr(ioaddr, CSR0));
32074+ lp->a->read_csr(ioaddr, CSR0));
32075
32076 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
32077- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
32078+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
32079
32080 /*
32081 * Switch back to 16bit mode to avoid problems with dumb
32082 * DOS packet driver after a warm reboot
32083 */
32084- lp->a.write_bcr(ioaddr, 20, 4);
32085+ lp->a->write_bcr(ioaddr, 20, 4);
32086
32087 spin_unlock_irqrestore(&lp->lock, flags);
32088
32089@@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_
32090 unsigned long flags;
32091
32092 spin_lock_irqsave(&lp->lock, flags);
32093- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
32094+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
32095 spin_unlock_irqrestore(&lp->lock, flags);
32096
32097 return &dev->stats;
32098@@ -2577,10 +2577,10 @@ static void pcnet32_load_multicast(struc
32099 if (dev->flags & IFF_ALLMULTI) {
32100 ib->filter[0] = cpu_to_le32(~0U);
32101 ib->filter[1] = cpu_to_le32(~0U);
32102- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
32103- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
32104- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
32105- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
32106+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
32107+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
32108+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
32109+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
32110 return;
32111 }
32112 /* clear the multicast filter */
32113@@ -2594,7 +2594,7 @@ static void pcnet32_load_multicast(struc
32114 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
32115 }
32116 for (i = 0; i < 4; i++)
32117- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
32118+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
32119 le16_to_cpu(mcast_table[i]));
32120 }
32121
32122@@ -2609,28 +2609,28 @@ static void pcnet32_set_multicast_list(s
32123
32124 spin_lock_irqsave(&lp->lock, flags);
32125 suspended = pcnet32_suspend(dev, &flags, 0);
32126- csr15 = lp->a.read_csr(ioaddr, CSR15);
32127+ csr15 = lp->a->read_csr(ioaddr, CSR15);
32128 if (dev->flags & IFF_PROMISC) {
32129 /* Log any net taps. */
32130 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
32131 lp->init_block->mode =
32132 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
32133 7);
32134- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
32135+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
32136 } else {
32137 lp->init_block->mode =
32138 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
32139- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
32140+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
32141 pcnet32_load_multicast(dev);
32142 }
32143
32144 if (suspended) {
32145 int csr5;
32146 /* clear SUSPEND (SPND) - CSR5 bit 0 */
32147- csr5 = lp->a.read_csr(ioaddr, CSR5);
32148- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
32149+ csr5 = lp->a->read_csr(ioaddr, CSR5);
32150+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
32151 } else {
32152- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
32153+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
32154 pcnet32_restart(dev, CSR0_NORMAL);
32155 netif_wake_queue(dev);
32156 }
32157@@ -2648,8 +2648,8 @@ static int mdio_read(struct net_device *
32158 if (!lp->mii)
32159 return 0;
32160
32161- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
32162- val_out = lp->a.read_bcr(ioaddr, 34);
32163+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
32164+ val_out = lp->a->read_bcr(ioaddr, 34);
32165
32166 return val_out;
32167 }
32168@@ -2663,8 +2663,8 @@ static void mdio_write(struct net_device
32169 if (!lp->mii)
32170 return;
32171
32172- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
32173- lp->a.write_bcr(ioaddr, 34, val);
32174+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
32175+ lp->a->write_bcr(ioaddr, 34, val);
32176 }
32177
32178 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32179@@ -2741,7 +2741,7 @@ static void pcnet32_check_media(struct n
32180 curr_link = mii_link_ok(&lp->mii_if);
32181 } else {
32182 ulong ioaddr = dev->base_addr; /* card base I/O address */
32183- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
32184+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
32185 }
32186 if (!curr_link) {
32187 if (prev_link || verbose) {
32188@@ -2764,13 +2764,13 @@ static void pcnet32_check_media(struct n
32189 (ecmd.duplex == DUPLEX_FULL)
32190 ? "full" : "half");
32191 }
32192- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
32193+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
32194 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
32195 if (lp->mii_if.full_duplex)
32196 bcr9 |= (1 << 0);
32197 else
32198 bcr9 &= ~(1 << 0);
32199- lp->a.write_bcr(dev->base_addr, 9, bcr9);
32200+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
32201 }
32202 } else {
32203 netif_info(lp, link, dev, "link up\n");
32204diff -urNp linux-3.1.1/drivers/net/ppp_generic.c linux-3.1.1/drivers/net/ppp_generic.c
32205--- linux-3.1.1/drivers/net/ppp_generic.c 2011-11-11 15:19:27.000000000 -0500
32206+++ linux-3.1.1/drivers/net/ppp_generic.c 2011-11-16 18:39:07.000000000 -0500
32207@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
32208 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
32209 struct ppp_stats stats;
32210 struct ppp_comp_stats cstats;
32211- char *vers;
32212
32213 switch (cmd) {
32214 case SIOCGPPPSTATS:
32215@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
32216 break;
32217
32218 case SIOCGPPPVER:
32219- vers = PPP_VERSION;
32220- if (copy_to_user(addr, vers, strlen(vers) + 1))
32221+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
32222 break;
32223 err = 0;
32224 break;
32225diff -urNp linux-3.1.1/drivers/net/r8169.c linux-3.1.1/drivers/net/r8169.c
32226--- linux-3.1.1/drivers/net/r8169.c 2011-11-11 15:19:27.000000000 -0500
32227+++ linux-3.1.1/drivers/net/r8169.c 2011-11-16 18:39:07.000000000 -0500
32228@@ -663,12 +663,12 @@ struct rtl8169_private {
32229 struct mdio_ops {
32230 void (*write)(void __iomem *, int, int);
32231 int (*read)(void __iomem *, int);
32232- } mdio_ops;
32233+ } __no_const mdio_ops;
32234
32235 struct pll_power_ops {
32236 void (*down)(struct rtl8169_private *);
32237 void (*up)(struct rtl8169_private *);
32238- } pll_power_ops;
32239+ } __no_const pll_power_ops;
32240
32241 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
32242 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
32243diff -urNp linux-3.1.1/drivers/net/sis190.c linux-3.1.1/drivers/net/sis190.c
32244--- linux-3.1.1/drivers/net/sis190.c 2011-11-11 15:19:27.000000000 -0500
32245+++ linux-3.1.1/drivers/net/sis190.c 2011-11-16 18:39:07.000000000 -0500
32246@@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr
32247 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
32248 struct net_device *dev)
32249 {
32250- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
32251+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
32252 struct sis190_private *tp = netdev_priv(dev);
32253 struct pci_dev *isa_bridge;
32254 u8 reg, tmp8;
32255diff -urNp linux-3.1.1/drivers/net/sundance.c linux-3.1.1/drivers/net/sundance.c
32256--- linux-3.1.1/drivers/net/sundance.c 2011-11-11 15:19:27.000000000 -0500
32257+++ linux-3.1.1/drivers/net/sundance.c 2011-11-16 18:39:07.000000000 -0500
32258@@ -218,7 +218,7 @@ enum {
32259 struct pci_id_info {
32260 const char *name;
32261 };
32262-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32263+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32264 {"D-Link DFE-550TX FAST Ethernet Adapter"},
32265 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
32266 {"D-Link DFE-580TX 4 port Server Adapter"},
32267diff -urNp linux-3.1.1/drivers/net/tg3.h linux-3.1.1/drivers/net/tg3.h
32268--- linux-3.1.1/drivers/net/tg3.h 2011-11-11 15:19:27.000000000 -0500
32269+++ linux-3.1.1/drivers/net/tg3.h 2011-11-16 18:39:07.000000000 -0500
32270@@ -134,6 +134,7 @@
32271 #define CHIPREV_ID_5750_A0 0x4000
32272 #define CHIPREV_ID_5750_A1 0x4001
32273 #define CHIPREV_ID_5750_A3 0x4003
32274+#define CHIPREV_ID_5750_C1 0x4201
32275 #define CHIPREV_ID_5750_C2 0x4202
32276 #define CHIPREV_ID_5752_A0_HW 0x5000
32277 #define CHIPREV_ID_5752_A0 0x6000
32278diff -urNp linux-3.1.1/drivers/net/tokenring/abyss.c linux-3.1.1/drivers/net/tokenring/abyss.c
32279--- linux-3.1.1/drivers/net/tokenring/abyss.c 2011-11-11 15:19:27.000000000 -0500
32280+++ linux-3.1.1/drivers/net/tokenring/abyss.c 2011-11-16 18:39:07.000000000 -0500
32281@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
32282
32283 static int __init abyss_init (void)
32284 {
32285- abyss_netdev_ops = tms380tr_netdev_ops;
32286+ pax_open_kernel();
32287+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32288
32289- abyss_netdev_ops.ndo_open = abyss_open;
32290- abyss_netdev_ops.ndo_stop = abyss_close;
32291+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
32292+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
32293+ pax_close_kernel();
32294
32295 return pci_register_driver(&abyss_driver);
32296 }
32297diff -urNp linux-3.1.1/drivers/net/tokenring/madgemc.c linux-3.1.1/drivers/net/tokenring/madgemc.c
32298--- linux-3.1.1/drivers/net/tokenring/madgemc.c 2011-11-11 15:19:27.000000000 -0500
32299+++ linux-3.1.1/drivers/net/tokenring/madgemc.c 2011-11-16 18:39:07.000000000 -0500
32300@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
32301
32302 static int __init madgemc_init (void)
32303 {
32304- madgemc_netdev_ops = tms380tr_netdev_ops;
32305- madgemc_netdev_ops.ndo_open = madgemc_open;
32306- madgemc_netdev_ops.ndo_stop = madgemc_close;
32307+ pax_open_kernel();
32308+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32309+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
32310+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
32311+ pax_close_kernel();
32312
32313 return mca_register_driver (&madgemc_driver);
32314 }
32315diff -urNp linux-3.1.1/drivers/net/tokenring/proteon.c linux-3.1.1/drivers/net/tokenring/proteon.c
32316--- linux-3.1.1/drivers/net/tokenring/proteon.c 2011-11-11 15:19:27.000000000 -0500
32317+++ linux-3.1.1/drivers/net/tokenring/proteon.c 2011-11-16 18:39:07.000000000 -0500
32318@@ -353,9 +353,11 @@ static int __init proteon_init(void)
32319 struct platform_device *pdev;
32320 int i, num = 0, err = 0;
32321
32322- proteon_netdev_ops = tms380tr_netdev_ops;
32323- proteon_netdev_ops.ndo_open = proteon_open;
32324- proteon_netdev_ops.ndo_stop = tms380tr_close;
32325+ pax_open_kernel();
32326+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32327+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
32328+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
32329+ pax_close_kernel();
32330
32331 err = platform_driver_register(&proteon_driver);
32332 if (err)
32333diff -urNp linux-3.1.1/drivers/net/tokenring/skisa.c linux-3.1.1/drivers/net/tokenring/skisa.c
32334--- linux-3.1.1/drivers/net/tokenring/skisa.c 2011-11-11 15:19:27.000000000 -0500
32335+++ linux-3.1.1/drivers/net/tokenring/skisa.c 2011-11-16 18:39:07.000000000 -0500
32336@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
32337 struct platform_device *pdev;
32338 int i, num = 0, err = 0;
32339
32340- sk_isa_netdev_ops = tms380tr_netdev_ops;
32341- sk_isa_netdev_ops.ndo_open = sk_isa_open;
32342- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
32343+ pax_open_kernel();
32344+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32345+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
32346+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
32347+ pax_close_kernel();
32348
32349 err = platform_driver_register(&sk_isa_driver);
32350 if (err)
32351diff -urNp linux-3.1.1/drivers/net/tulip/de2104x.c linux-3.1.1/drivers/net/tulip/de2104x.c
32352--- linux-3.1.1/drivers/net/tulip/de2104x.c 2011-11-11 15:19:27.000000000 -0500
32353+++ linux-3.1.1/drivers/net/tulip/de2104x.c 2011-11-16 18:40:22.000000000 -0500
32354@@ -1795,6 +1795,8 @@ static void __devinit de21041_get_srom_i
32355 struct de_srom_info_leaf *il;
32356 void *bufp;
32357
32358+ pax_track_stack();
32359+
32360 /* download entire eeprom */
32361 for (i = 0; i < DE_EEPROM_WORDS; i++)
32362 ((__le16 *)ee_data)[i] =
32363diff -urNp linux-3.1.1/drivers/net/tulip/de4x5.c linux-3.1.1/drivers/net/tulip/de4x5.c
32364--- linux-3.1.1/drivers/net/tulip/de4x5.c 2011-11-11 15:19:27.000000000 -0500
32365+++ linux-3.1.1/drivers/net/tulip/de4x5.c 2011-11-16 18:39:07.000000000 -0500
32366@@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, stru
32367 for (i=0; i<ETH_ALEN; i++) {
32368 tmp.addr[i] = dev->dev_addr[i];
32369 }
32370- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32371+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32372 break;
32373
32374 case DE4X5_SET_HWADDR: /* Set the hardware address */
32375@@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, stru
32376 spin_lock_irqsave(&lp->lock, flags);
32377 memcpy(&statbuf, &lp->pktStats, ioc->len);
32378 spin_unlock_irqrestore(&lp->lock, flags);
32379- if (copy_to_user(ioc->data, &statbuf, ioc->len))
32380+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32381 return -EFAULT;
32382 break;
32383 }
32384diff -urNp linux-3.1.1/drivers/net/tulip/eeprom.c linux-3.1.1/drivers/net/tulip/eeprom.c
32385--- linux-3.1.1/drivers/net/tulip/eeprom.c 2011-11-11 15:19:27.000000000 -0500
32386+++ linux-3.1.1/drivers/net/tulip/eeprom.c 2011-11-16 18:39:07.000000000 -0500
32387@@ -81,7 +81,7 @@ static struct eeprom_fixup eeprom_fixups
32388 {NULL}};
32389
32390
32391-static const char *block_name[] __devinitdata = {
32392+static const char *block_name[] __devinitconst = {
32393 "21140 non-MII",
32394 "21140 MII PHY",
32395 "21142 Serial PHY",
32396diff -urNp linux-3.1.1/drivers/net/tulip/winbond-840.c linux-3.1.1/drivers/net/tulip/winbond-840.c
32397--- linux-3.1.1/drivers/net/tulip/winbond-840.c 2011-11-11 15:19:27.000000000 -0500
32398+++ linux-3.1.1/drivers/net/tulip/winbond-840.c 2011-11-16 18:39:07.000000000 -0500
32399@@ -236,7 +236,7 @@ struct pci_id_info {
32400 int drv_flags; /* Driver use, intended as capability flags. */
32401 };
32402
32403-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32404+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32405 { /* Sometime a Level-One switch card. */
32406 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
32407 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
32408diff -urNp linux-3.1.1/drivers/net/usb/hso.c linux-3.1.1/drivers/net/usb/hso.c
32409--- linux-3.1.1/drivers/net/usb/hso.c 2011-11-11 15:19:27.000000000 -0500
32410+++ linux-3.1.1/drivers/net/usb/hso.c 2011-11-16 18:39:07.000000000 -0500
32411@@ -71,7 +71,7 @@
32412 #include <asm/byteorder.h>
32413 #include <linux/serial_core.h>
32414 #include <linux/serial.h>
32415-
32416+#include <asm/local.h>
32417
32418 #define MOD_AUTHOR "Option Wireless"
32419 #define MOD_DESCRIPTION "USB High Speed Option driver"
32420@@ -257,7 +257,7 @@ struct hso_serial {
32421
32422 /* from usb_serial_port */
32423 struct tty_struct *tty;
32424- int open_count;
32425+ local_t open_count;
32426 spinlock_t serial_lock;
32427
32428 int (*write_data) (struct hso_serial *serial);
32429@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
32430 struct urb *urb;
32431
32432 urb = serial->rx_urb[0];
32433- if (serial->open_count > 0) {
32434+ if (local_read(&serial->open_count) > 0) {
32435 count = put_rxbuf_data(urb, serial);
32436 if (count == -1)
32437 return;
32438@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
32439 DUMP1(urb->transfer_buffer, urb->actual_length);
32440
32441 /* Anyone listening? */
32442- if (serial->open_count == 0)
32443+ if (local_read(&serial->open_count) == 0)
32444 return;
32445
32446 if (status == 0) {
32447@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
32448 spin_unlock_irq(&serial->serial_lock);
32449
32450 /* check for port already opened, if not set the termios */
32451- serial->open_count++;
32452- if (serial->open_count == 1) {
32453+ if (local_inc_return(&serial->open_count) == 1) {
32454 serial->rx_state = RX_IDLE;
32455 /* Force default termio settings */
32456 _hso_serial_set_termios(tty, NULL);
32457@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
32458 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
32459 if (result) {
32460 hso_stop_serial_device(serial->parent);
32461- serial->open_count--;
32462+ local_dec(&serial->open_count);
32463 kref_put(&serial->parent->ref, hso_serial_ref_free);
32464 }
32465 } else {
32466@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
32467
32468 /* reset the rts and dtr */
32469 /* do the actual close */
32470- serial->open_count--;
32471+ local_dec(&serial->open_count);
32472
32473- if (serial->open_count <= 0) {
32474- serial->open_count = 0;
32475+ if (local_read(&serial->open_count) <= 0) {
32476+ local_set(&serial->open_count, 0);
32477 spin_lock_irq(&serial->serial_lock);
32478 if (serial->tty == tty) {
32479 serial->tty->driver_data = NULL;
32480@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
32481
32482 /* the actual setup */
32483 spin_lock_irqsave(&serial->serial_lock, flags);
32484- if (serial->open_count)
32485+ if (local_read(&serial->open_count))
32486 _hso_serial_set_termios(tty, old);
32487 else
32488 tty->termios = old;
32489@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
32490 D1("Pending read interrupt on port %d\n", i);
32491 spin_lock(&serial->serial_lock);
32492 if (serial->rx_state == RX_IDLE &&
32493- serial->open_count > 0) {
32494+ local_read(&serial->open_count) > 0) {
32495 /* Setup and send a ctrl req read on
32496 * port i */
32497 if (!serial->rx_urb_filled[0]) {
32498@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interfa
32499 /* Start all serial ports */
32500 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
32501 if (serial_table[i] && (serial_table[i]->interface == iface)) {
32502- if (dev2ser(serial_table[i])->open_count) {
32503+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
32504 result =
32505 hso_start_serial_device(serial_table[i], GFP_NOIO);
32506 hso_kick_transmit(dev2ser(serial_table[i]));
32507diff -urNp linux-3.1.1/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-3.1.1/drivers/net/vmxnet3/vmxnet3_ethtool.c
32508--- linux-3.1.1/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-11-11 15:19:27.000000000 -0500
32509+++ linux-3.1.1/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-11-16 18:39:07.000000000 -0500
32510@@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device
32511 * Return with error code if any of the queue indices
32512 * is out of range
32513 */
32514- if (p->ring_index[i] < 0 ||
32515- p->ring_index[i] >= adapter->num_rx_queues)
32516+ if (p->ring_index[i] >= adapter->num_rx_queues)
32517 return -EINVAL;
32518 }
32519
32520diff -urNp linux-3.1.1/drivers/net/vxge/vxge-config.h linux-3.1.1/drivers/net/vxge/vxge-config.h
32521--- linux-3.1.1/drivers/net/vxge/vxge-config.h 2011-11-11 15:19:27.000000000 -0500
32522+++ linux-3.1.1/drivers/net/vxge/vxge-config.h 2011-11-16 18:39:07.000000000 -0500
32523@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
32524 void (*link_down)(struct __vxge_hw_device *devh);
32525 void (*crit_err)(struct __vxge_hw_device *devh,
32526 enum vxge_hw_event type, u64 ext_data);
32527-};
32528+} __no_const;
32529
32530 /*
32531 * struct __vxge_hw_blockpool_entry - Block private data structure
32532diff -urNp linux-3.1.1/drivers/net/vxge/vxge-main.c linux-3.1.1/drivers/net/vxge/vxge-main.c
32533--- linux-3.1.1/drivers/net/vxge/vxge-main.c 2011-11-11 15:19:27.000000000 -0500
32534+++ linux-3.1.1/drivers/net/vxge/vxge-main.c 2011-11-16 18:40:22.000000000 -0500
32535@@ -100,6 +100,8 @@ static inline void VXGE_COMPLETE_VPATH_T
32536 struct sk_buff *completed[NR_SKB_COMPLETED];
32537 int more;
32538
32539+ pax_track_stack();
32540+
32541 do {
32542 more = 0;
32543 skb_ptr = completed;
32544@@ -1915,6 +1917,8 @@ static enum vxge_hw_status vxge_rth_conf
32545 u8 mtable[256] = {0}; /* CPU to vpath mapping */
32546 int index;
32547
32548+ pax_track_stack();
32549+
32550 /*
32551 * Filling
32552 * - itable with bucket numbers
32553diff -urNp linux-3.1.1/drivers/net/vxge/vxge-traffic.h linux-3.1.1/drivers/net/vxge/vxge-traffic.h
32554--- linux-3.1.1/drivers/net/vxge/vxge-traffic.h 2011-11-11 15:19:27.000000000 -0500
32555+++ linux-3.1.1/drivers/net/vxge/vxge-traffic.h 2011-11-16 18:39:07.000000000 -0500
32556@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
32557 struct vxge_hw_mempool_dma *dma_object,
32558 u32 index,
32559 u32 is_last);
32560-};
32561+} __no_const;
32562
32563 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
32564 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
32565diff -urNp linux-3.1.1/drivers/net/wan/hdlc_x25.c linux-3.1.1/drivers/net/wan/hdlc_x25.c
32566--- linux-3.1.1/drivers/net/wan/hdlc_x25.c 2011-11-11 15:19:27.000000000 -0500
32567+++ linux-3.1.1/drivers/net/wan/hdlc_x25.c 2011-11-16 18:39:07.000000000 -0500
32568@@ -134,16 +134,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
32569
32570 static int x25_open(struct net_device *dev)
32571 {
32572- struct lapb_register_struct cb;
32573+ static struct lapb_register_struct cb = {
32574+ .connect_confirmation = x25_connected,
32575+ .connect_indication = x25_connected,
32576+ .disconnect_confirmation = x25_disconnected,
32577+ .disconnect_indication = x25_disconnected,
32578+ .data_indication = x25_data_indication,
32579+ .data_transmit = x25_data_transmit
32580+ };
32581 int result;
32582
32583- cb.connect_confirmation = x25_connected;
32584- cb.connect_indication = x25_connected;
32585- cb.disconnect_confirmation = x25_disconnected;
32586- cb.disconnect_indication = x25_disconnected;
32587- cb.data_indication = x25_data_indication;
32588- cb.data_transmit = x25_data_transmit;
32589-
32590 result = lapb_register(dev, &cb);
32591 if (result != LAPB_OK)
32592 return result;
32593diff -urNp linux-3.1.1/drivers/net/wimax/i2400m/usb-fw.c linux-3.1.1/drivers/net/wimax/i2400m/usb-fw.c
32594--- linux-3.1.1/drivers/net/wimax/i2400m/usb-fw.c 2011-11-11 15:19:27.000000000 -0500
32595+++ linux-3.1.1/drivers/net/wimax/i2400m/usb-fw.c 2011-11-16 18:40:22.000000000 -0500
32596@@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
32597 int do_autopm = 1;
32598 DECLARE_COMPLETION_ONSTACK(notif_completion);
32599
32600+ pax_track_stack();
32601+
32602 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
32603 i2400m, ack, ack_size);
32604 BUG_ON(_ack == i2400m->bm_ack_buf);
32605diff -urNp linux-3.1.1/drivers/net/wireless/airo.c linux-3.1.1/drivers/net/wireless/airo.c
32606--- linux-3.1.1/drivers/net/wireless/airo.c 2011-11-11 15:19:27.000000000 -0500
32607+++ linux-3.1.1/drivers/net/wireless/airo.c 2011-11-16 18:40:22.000000000 -0500
32608@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
32609 BSSListElement * loop_net;
32610 BSSListElement * tmp_net;
32611
32612+ pax_track_stack();
32613+
32614 /* Blow away current list of scan results */
32615 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
32616 list_move_tail (&loop_net->list, &ai->network_free_list);
32617@@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *
32618 WepKeyRid wkr;
32619 int rc;
32620
32621+ pax_track_stack();
32622+
32623 memset( &mySsid, 0, sizeof( mySsid ) );
32624 kfree (ai->flash);
32625 ai->flash = NULL;
32626@@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct i
32627 __le32 *vals = stats.vals;
32628 int len;
32629
32630+ pax_track_stack();
32631+
32632 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32633 return -ENOMEM;
32634 data = file->private_data;
32635@@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct ino
32636 /* If doLoseSync is not 1, we won't do a Lose Sync */
32637 int doLoseSync = -1;
32638
32639+ pax_track_stack();
32640+
32641 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32642 return -ENOMEM;
32643 data = file->private_data;
32644@@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_de
32645 int i;
32646 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
32647
32648+ pax_track_stack();
32649+
32650 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
32651 if (!qual)
32652 return -ENOMEM;
32653@@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(str
32654 CapabilityRid cap_rid;
32655 __le32 *vals = stats_rid.vals;
32656
32657+ pax_track_stack();
32658+
32659 /* Get stats out of the card */
32660 clear_bit(JOB_WSTATS, &local->jobs);
32661 if (local->power.event) {
32662diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath5k/debug.c linux-3.1.1/drivers/net/wireless/ath/ath5k/debug.c
32663--- linux-3.1.1/drivers/net/wireless/ath/ath5k/debug.c 2011-11-11 15:19:27.000000000 -0500
32664+++ linux-3.1.1/drivers/net/wireless/ath/ath5k/debug.c 2011-11-16 19:08:21.000000000 -0500
32665@@ -203,6 +203,8 @@ static ssize_t read_file_beacon(struct f
32666 unsigned int v;
32667 u64 tsf;
32668
32669+ pax_track_stack();
32670+
32671 v = ath5k_hw_reg_read(ah, AR5K_BEACON);
32672 len += snprintf(buf + len, sizeof(buf) - len,
32673 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
32674@@ -321,6 +323,8 @@ static ssize_t read_file_debug(struct fi
32675 unsigned int len = 0;
32676 unsigned int i;
32677
32678+ pax_track_stack();
32679+
32680 len += snprintf(buf + len, sizeof(buf) - len,
32681 "DEBUG LEVEL: 0x%08x\n\n", ah->debug.level);
32682
32683@@ -492,6 +496,8 @@ static ssize_t read_file_misc(struct fil
32684 unsigned int len = 0;
32685 u32 filt = ath5k_hw_get_rx_filter(ah);
32686
32687+ pax_track_stack();
32688+
32689 len += snprintf(buf + len, sizeof(buf) - len, "bssid-mask: %pM\n",
32690 ah->bssidmask);
32691 len += snprintf(buf + len, sizeof(buf) - len, "filter-flags: 0x%x ",
32692@@ -548,6 +554,8 @@ static ssize_t read_file_frameerrors(str
32693 unsigned int len = 0;
32694 int i;
32695
32696+ pax_track_stack();
32697+
32698 len += snprintf(buf + len, sizeof(buf) - len,
32699 "RX\n---------------------\n");
32700 len += snprintf(buf + len, sizeof(buf) - len, "CRC\t%u\t(%u%%)\n",
32701@@ -665,6 +673,8 @@ static ssize_t read_file_ani(struct file
32702 char buf[700];
32703 unsigned int len = 0;
32704
32705+ pax_track_stack();
32706+
32707 len += snprintf(buf + len, sizeof(buf) - len,
32708 "HW has PHY error counters:\t%s\n",
32709 ah->ah_capabilities.cap_has_phyerr_counters ?
32710@@ -829,6 +839,8 @@ static ssize_t read_file_queue(struct fi
32711 struct ath5k_buf *bf, *bf0;
32712 int i, n;
32713
32714+ pax_track_stack();
32715+
32716 len += snprintf(buf + len, sizeof(buf) - len,
32717 "available txbuffers: %d\n", ah->txbuf_len);
32718
32719diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_calib.c
32720--- linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-11-11 15:19:27.000000000 -0500
32721+++ linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-11-16 18:40:22.000000000 -0500
32722@@ -758,6 +758,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
32723 int i, im, j;
32724 int nmeasurement;
32725
32726+ pax_track_stack();
32727+
32728 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
32729 if (ah->txchainmask & (1 << i))
32730 num_chains++;
32731diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
32732--- linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-11-11 15:19:27.000000000 -0500
32733+++ linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-11-16 18:40:22.000000000 -0500
32734@@ -406,6 +406,8 @@ static bool create_pa_curve(u32 *data_L,
32735 int theta_low_bin = 0;
32736 int i;
32737
32738+ pax_track_stack();
32739+
32740 /* disregard any bin that contains <= 16 samples */
32741 thresh_accum_cnt = 16;
32742 scale_factor = 5;
32743diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath9k/debug.c linux-3.1.1/drivers/net/wireless/ath/ath9k/debug.c
32744--- linux-3.1.1/drivers/net/wireless/ath/ath9k/debug.c 2011-11-11 15:19:27.000000000 -0500
32745+++ linux-3.1.1/drivers/net/wireless/ath/ath9k/debug.c 2011-11-16 18:40:22.000000000 -0500
32746@@ -387,6 +387,8 @@ static ssize_t read_file_interrupt(struc
32747 char buf[512];
32748 unsigned int len = 0;
32749
32750+ pax_track_stack();
32751+
32752 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
32753 len += snprintf(buf + len, sizeof(buf) - len,
32754 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
32755@@ -477,6 +479,8 @@ static ssize_t read_file_wiphy(struct fi
32756 u8 addr[ETH_ALEN];
32757 u32 tmp;
32758
32759+ pax_track_stack();
32760+
32761 len += snprintf(buf + len, sizeof(buf) - len,
32762 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
32763 wiphy_name(sc->hw->wiphy),
32764diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath9k/htc_drv_debug.c linux-3.1.1/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
32765--- linux-3.1.1/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-11-11 15:19:27.000000000 -0500
32766+++ linux-3.1.1/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-11-16 18:40:22.000000000 -0500
32767@@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(s
32768 unsigned int len = 0;
32769 int ret = 0;
32770
32771+ pax_track_stack();
32772+
32773 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32774
32775 ath9k_htc_ps_wakeup(priv);
32776@@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(st
32777 unsigned int len = 0;
32778 int ret = 0;
32779
32780+ pax_track_stack();
32781+
32782 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32783
32784 ath9k_htc_ps_wakeup(priv);
32785@@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(st
32786 unsigned int len = 0;
32787 int ret = 0;
32788
32789+ pax_track_stack();
32790+
32791 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32792
32793 ath9k_htc_ps_wakeup(priv);
32794@@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct fil
32795 char buf[512];
32796 unsigned int len = 0;
32797
32798+ pax_track_stack();
32799+
32800 len += snprintf(buf + len, sizeof(buf) - len,
32801 "%20s : %10u\n", "Buffers queued",
32802 priv->debug.tx_stats.buf_queued);
32803@@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct fil
32804 char buf[512];
32805 unsigned int len = 0;
32806
32807+ pax_track_stack();
32808+
32809 spin_lock_bh(&priv->tx.tx_lock);
32810
32811 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
32812@@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct fi
32813 char buf[512];
32814 unsigned int len = 0;
32815
32816+ pax_track_stack();
32817+
32818 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
32819 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
32820
32821diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath9k/hw.h linux-3.1.1/drivers/net/wireless/ath/ath9k/hw.h
32822--- linux-3.1.1/drivers/net/wireless/ath/ath9k/hw.h 2011-11-11 15:19:27.000000000 -0500
32823+++ linux-3.1.1/drivers/net/wireless/ath/ath9k/hw.h 2011-11-16 18:39:07.000000000 -0500
32824@@ -588,7 +588,7 @@ struct ath_hw_private_ops {
32825
32826 /* ANI */
32827 void (*ani_cache_ini_regs)(struct ath_hw *ah);
32828-};
32829+} __no_const;
32830
32831 /**
32832 * struct ath_hw_ops - callbacks used by hardware code and driver code
32833@@ -639,7 +639,7 @@ struct ath_hw_ops {
32834 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
32835 struct ath_hw_antcomb_conf *antconf);
32836
32837-};
32838+} __no_const;
32839
32840 struct ath_nf_limits {
32841 s16 max;
32842@@ -652,7 +652,7 @@ struct ath_nf_limits {
32843 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
32844
32845 struct ath_hw {
32846- struct ath_ops reg_ops;
32847+ ath_ops_no_const reg_ops;
32848
32849 struct ieee80211_hw *hw;
32850 struct ath_common common;
32851diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath.h linux-3.1.1/drivers/net/wireless/ath/ath.h
32852--- linux-3.1.1/drivers/net/wireless/ath/ath.h 2011-11-11 15:19:27.000000000 -0500
32853+++ linux-3.1.1/drivers/net/wireless/ath/ath.h 2011-11-16 18:39:07.000000000 -0500
32854@@ -121,6 +121,7 @@ struct ath_ops {
32855 void (*write_flush) (void *);
32856 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
32857 };
32858+typedef struct ath_ops __no_const ath_ops_no_const;
32859
32860 struct ath_common;
32861 struct ath_bus_ops;
32862diff -urNp linux-3.1.1/drivers/net/wireless/ipw2x00/ipw2100.c linux-3.1.1/drivers/net/wireless/ipw2x00/ipw2100.c
32863--- linux-3.1.1/drivers/net/wireless/ipw2x00/ipw2100.c 2011-11-11 15:19:27.000000000 -0500
32864+++ linux-3.1.1/drivers/net/wireless/ipw2x00/ipw2100.c 2011-11-16 18:40:22.000000000 -0500
32865@@ -2102,6 +2102,8 @@ static int ipw2100_set_essid(struct ipw2
32866 int err;
32867 DECLARE_SSID_BUF(ssid);
32868
32869+ pax_track_stack();
32870+
32871 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
32872
32873 if (ssid_len)
32874@@ -5451,6 +5453,8 @@ static int ipw2100_set_key(struct ipw210
32875 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
32876 int err;
32877
32878+ pax_track_stack();
32879+
32880 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
32881 idx, keylen, len);
32882
32883diff -urNp linux-3.1.1/drivers/net/wireless/ipw2x00/libipw_rx.c linux-3.1.1/drivers/net/wireless/ipw2x00/libipw_rx.c
32884--- linux-3.1.1/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-11-11 15:19:27.000000000 -0500
32885+++ linux-3.1.1/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-11-16 18:40:22.000000000 -0500
32886@@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
32887 unsigned long flags;
32888 DECLARE_SSID_BUF(ssid);
32889
32890+ pax_track_stack();
32891+
32892 LIBIPW_DEBUG_SCAN("'%s' (%pM"
32893 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
32894 print_ssid(ssid, info_element->data, info_element->len),
32895diff -urNp linux-3.1.1/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-3.1.1/drivers/net/wireless/iwlegacy/iwl3945-base.c
32896--- linux-3.1.1/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-11-11 15:19:27.000000000 -0500
32897+++ linux-3.1.1/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-11-16 18:39:07.000000000 -0500
32898@@ -3687,7 +3687,9 @@ static int iwl3945_pci_probe(struct pci_
32899 */
32900 if (iwl3945_mod_params.disable_hw_scan) {
32901 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
32902- iwl3945_hw_ops.hw_scan = NULL;
32903+ pax_open_kernel();
32904+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
32905+ pax_close_kernel();
32906 }
32907
32908 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
32909diff -urNp linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
32910--- linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-11-11 15:19:27.000000000 -0500
32911+++ linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-11-16 18:40:22.000000000 -0500
32912@@ -920,6 +920,8 @@ static void rs_tx_status(void *priv_r, s
32913 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
32914 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
32915
32916+ pax_track_stack();
32917+
32918 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
32919
32920 /* Treat uninitialized rate scaling data same as non-existing. */
32921@@ -2931,6 +2933,8 @@ static void rs_fill_link_cmd(struct iwl_
32922 container_of(lq_sta, struct iwl_station_priv, lq_sta);
32923 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
32924
32925+ pax_track_stack();
32926+
32927 /* Override starting rate (index 0) if needed for debug purposes */
32928 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
32929
32930diff -urNp linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debugfs.c
32931--- linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-11-11 15:19:27.000000000 -0500
32932+++ linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-11-16 18:40:22.000000000 -0500
32933@@ -561,6 +561,8 @@ static ssize_t iwl_dbgfs_status_read(str
32934 int pos = 0;
32935 const size_t bufsz = sizeof(buf);
32936
32937+ pax_track_stack();
32938+
32939 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
32940 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
32941 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
32942@@ -693,6 +695,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
32943 char buf[256 * NUM_IWL_RXON_CTX];
32944 const size_t bufsz = sizeof(buf);
32945
32946+ pax_track_stack();
32947+
32948 for_each_context(priv, ctx) {
32949 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
32950 ctx->ctxid);
32951diff -urNp linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debug.h linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debug.h
32952--- linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-11-11 15:19:27.000000000 -0500
32953+++ linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-11-16 18:39:07.000000000 -0500
32954@@ -68,8 +68,8 @@ do {
32955 } while (0)
32956
32957 #else
32958-#define IWL_DEBUG(__priv, level, fmt, args...)
32959-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
32960+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
32961+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
32962 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
32963 const void *p, u32 len)
32964 {}
32965diff -urNp linux-3.1.1/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-3.1.1/drivers/net/wireless/iwmc3200wifi/debugfs.c
32966--- linux-3.1.1/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-11-11 15:19:27.000000000 -0500
32967+++ linux-3.1.1/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-11-16 18:40:22.000000000 -0500
32968@@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
32969 int buf_len = 512;
32970 size_t len = 0;
32971
32972+ pax_track_stack();
32973+
32974 if (*ppos != 0)
32975 return 0;
32976 if (count < sizeof(buf))
32977diff -urNp linux-3.1.1/drivers/net/wireless/mac80211_hwsim.c linux-3.1.1/drivers/net/wireless/mac80211_hwsim.c
32978--- linux-3.1.1/drivers/net/wireless/mac80211_hwsim.c 2011-11-11 15:19:27.000000000 -0500
32979+++ linux-3.1.1/drivers/net/wireless/mac80211_hwsim.c 2011-11-16 18:39:07.000000000 -0500
32980@@ -1670,9 +1670,11 @@ static int __init init_mac80211_hwsim(vo
32981 return -EINVAL;
32982
32983 if (fake_hw_scan) {
32984- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
32985- mac80211_hwsim_ops.sw_scan_start = NULL;
32986- mac80211_hwsim_ops.sw_scan_complete = NULL;
32987+ pax_open_kernel();
32988+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
32989+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
32990+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
32991+ pax_close_kernel();
32992 }
32993
32994 spin_lock_init(&hwsim_radio_lock);
32995diff -urNp linux-3.1.1/drivers/net/wireless/mwifiex/main.h linux-3.1.1/drivers/net/wireless/mwifiex/main.h
32996--- linux-3.1.1/drivers/net/wireless/mwifiex/main.h 2011-11-11 15:19:27.000000000 -0500
32997+++ linux-3.1.1/drivers/net/wireless/mwifiex/main.h 2011-11-16 18:39:07.000000000 -0500
32998@@ -560,7 +560,7 @@ struct mwifiex_if_ops {
32999
33000 void (*update_mp_end_port) (struct mwifiex_adapter *, u16);
33001 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
33002-};
33003+} __no_const;
33004
33005 struct mwifiex_adapter {
33006 struct mwifiex_private *priv[MWIFIEX_MAX_BSS_NUM];
33007diff -urNp linux-3.1.1/drivers/net/wireless/rndis_wlan.c linux-3.1.1/drivers/net/wireless/rndis_wlan.c
33008--- linux-3.1.1/drivers/net/wireless/rndis_wlan.c 2011-11-11 15:19:27.000000000 -0500
33009+++ linux-3.1.1/drivers/net/wireless/rndis_wlan.c 2011-11-16 18:39:07.000000000 -0500
33010@@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
33011
33012 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
33013
33014- if (rts_threshold < 0 || rts_threshold > 2347)
33015+ if (rts_threshold > 2347)
33016 rts_threshold = 2347;
33017
33018 tmp = cpu_to_le32(rts_threshold);
33019diff -urNp linux-3.1.1/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-3.1.1/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
33020--- linux-3.1.1/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-11-11 15:19:27.000000000 -0500
33021+++ linux-3.1.1/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-11-16 18:40:22.000000000 -0500
33022@@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(st
33023 u8 rfpath;
33024 u8 num_total_rfpath = rtlphy->num_total_rfpath;
33025
33026+ pax_track_stack();
33027+
33028 precommoncmdcnt = 0;
33029 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
33030 MAX_PRECMD_CNT,
33031diff -urNp linux-3.1.1/drivers/net/wireless/wl1251/wl1251.h linux-3.1.1/drivers/net/wireless/wl1251/wl1251.h
33032--- linux-3.1.1/drivers/net/wireless/wl1251/wl1251.h 2011-11-11 15:19:27.000000000 -0500
33033+++ linux-3.1.1/drivers/net/wireless/wl1251/wl1251.h 2011-11-16 18:39:07.000000000 -0500
33034@@ -266,7 +266,7 @@ struct wl1251_if_operations {
33035 void (*reset)(struct wl1251 *wl);
33036 void (*enable_irq)(struct wl1251 *wl);
33037 void (*disable_irq)(struct wl1251 *wl);
33038-};
33039+} __no_const;
33040
33041 struct wl1251 {
33042 struct ieee80211_hw *hw;
33043diff -urNp linux-3.1.1/drivers/net/wireless/wl12xx/spi.c linux-3.1.1/drivers/net/wireless/wl12xx/spi.c
33044--- linux-3.1.1/drivers/net/wireless/wl12xx/spi.c 2011-11-11 15:19:27.000000000 -0500
33045+++ linux-3.1.1/drivers/net/wireless/wl12xx/spi.c 2011-11-16 18:40:22.000000000 -0500
33046@@ -281,6 +281,8 @@ static void wl1271_spi_raw_write(struct
33047 u32 chunk_len;
33048 int i;
33049
33050+ pax_track_stack();
33051+
33052 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
33053
33054 spi_message_init(&m);
33055diff -urNp linux-3.1.1/drivers/oprofile/buffer_sync.c linux-3.1.1/drivers/oprofile/buffer_sync.c
33056--- linux-3.1.1/drivers/oprofile/buffer_sync.c 2011-11-11 15:19:27.000000000 -0500
33057+++ linux-3.1.1/drivers/oprofile/buffer_sync.c 2011-11-16 18:39:07.000000000 -0500
33058@@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
33059 if (cookie == NO_COOKIE)
33060 offset = pc;
33061 if (cookie == INVALID_COOKIE) {
33062- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33063+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33064 offset = pc;
33065 }
33066 if (cookie != last_cookie) {
33067@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
33068 /* add userspace sample */
33069
33070 if (!mm) {
33071- atomic_inc(&oprofile_stats.sample_lost_no_mm);
33072+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
33073 return 0;
33074 }
33075
33076 cookie = lookup_dcookie(mm, s->eip, &offset);
33077
33078 if (cookie == INVALID_COOKIE) {
33079- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33080+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33081 return 0;
33082 }
33083
33084@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
33085 /* ignore backtraces if failed to add a sample */
33086 if (state == sb_bt_start) {
33087 state = sb_bt_ignore;
33088- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
33089+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
33090 }
33091 }
33092 release_mm(mm);
33093diff -urNp linux-3.1.1/drivers/oprofile/event_buffer.c linux-3.1.1/drivers/oprofile/event_buffer.c
33094--- linux-3.1.1/drivers/oprofile/event_buffer.c 2011-11-11 15:19:27.000000000 -0500
33095+++ linux-3.1.1/drivers/oprofile/event_buffer.c 2011-11-16 18:39:07.000000000 -0500
33096@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
33097 }
33098
33099 if (buffer_pos == buffer_size) {
33100- atomic_inc(&oprofile_stats.event_lost_overflow);
33101+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
33102 return;
33103 }
33104
33105diff -urNp linux-3.1.1/drivers/oprofile/oprof.c linux-3.1.1/drivers/oprofile/oprof.c
33106--- linux-3.1.1/drivers/oprofile/oprof.c 2011-11-11 15:19:27.000000000 -0500
33107+++ linux-3.1.1/drivers/oprofile/oprof.c 2011-11-16 18:39:07.000000000 -0500
33108@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
33109 if (oprofile_ops.switch_events())
33110 return;
33111
33112- atomic_inc(&oprofile_stats.multiplex_counter);
33113+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
33114 start_switch_worker();
33115 }
33116
33117diff -urNp linux-3.1.1/drivers/oprofile/oprofilefs.c linux-3.1.1/drivers/oprofile/oprofilefs.c
33118--- linux-3.1.1/drivers/oprofile/oprofilefs.c 2011-11-11 15:19:27.000000000 -0500
33119+++ linux-3.1.1/drivers/oprofile/oprofilefs.c 2011-11-16 18:39:07.000000000 -0500
33120@@ -186,7 +186,7 @@ static const struct file_operations atom
33121
33122
33123 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
33124- char const *name, atomic_t *val)
33125+ char const *name, atomic_unchecked_t *val)
33126 {
33127 return __oprofilefs_create_file(sb, root, name,
33128 &atomic_ro_fops, 0444, val);
33129diff -urNp linux-3.1.1/drivers/oprofile/oprofile_stats.c linux-3.1.1/drivers/oprofile/oprofile_stats.c
33130--- linux-3.1.1/drivers/oprofile/oprofile_stats.c 2011-11-11 15:19:27.000000000 -0500
33131+++ linux-3.1.1/drivers/oprofile/oprofile_stats.c 2011-11-16 18:39:07.000000000 -0500
33132@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
33133 cpu_buf->sample_invalid_eip = 0;
33134 }
33135
33136- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
33137- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
33138- atomic_set(&oprofile_stats.event_lost_overflow, 0);
33139- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
33140- atomic_set(&oprofile_stats.multiplex_counter, 0);
33141+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
33142+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
33143+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
33144+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
33145+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
33146 }
33147
33148
33149diff -urNp linux-3.1.1/drivers/oprofile/oprofile_stats.h linux-3.1.1/drivers/oprofile/oprofile_stats.h
33150--- linux-3.1.1/drivers/oprofile/oprofile_stats.h 2011-11-11 15:19:27.000000000 -0500
33151+++ linux-3.1.1/drivers/oprofile/oprofile_stats.h 2011-11-16 18:39:07.000000000 -0500
33152@@ -13,11 +13,11 @@
33153 #include <linux/atomic.h>
33154
33155 struct oprofile_stat_struct {
33156- atomic_t sample_lost_no_mm;
33157- atomic_t sample_lost_no_mapping;
33158- atomic_t bt_lost_no_mapping;
33159- atomic_t event_lost_overflow;
33160- atomic_t multiplex_counter;
33161+ atomic_unchecked_t sample_lost_no_mm;
33162+ atomic_unchecked_t sample_lost_no_mapping;
33163+ atomic_unchecked_t bt_lost_no_mapping;
33164+ atomic_unchecked_t event_lost_overflow;
33165+ atomic_unchecked_t multiplex_counter;
33166 };
33167
33168 extern struct oprofile_stat_struct oprofile_stats;
33169diff -urNp linux-3.1.1/drivers/parport/procfs.c linux-3.1.1/drivers/parport/procfs.c
33170--- linux-3.1.1/drivers/parport/procfs.c 2011-11-11 15:19:27.000000000 -0500
33171+++ linux-3.1.1/drivers/parport/procfs.c 2011-11-16 18:39:07.000000000 -0500
33172@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
33173
33174 *ppos += len;
33175
33176- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
33177+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
33178 }
33179
33180 #ifdef CONFIG_PARPORT_1284
33181@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
33182
33183 *ppos += len;
33184
33185- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
33186+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
33187 }
33188 #endif /* IEEE1284.3 support. */
33189
33190diff -urNp linux-3.1.1/drivers/pci/hotplug/cpci_hotplug.h linux-3.1.1/drivers/pci/hotplug/cpci_hotplug.h
33191--- linux-3.1.1/drivers/pci/hotplug/cpci_hotplug.h 2011-11-11 15:19:27.000000000 -0500
33192+++ linux-3.1.1/drivers/pci/hotplug/cpci_hotplug.h 2011-11-16 18:39:07.000000000 -0500
33193@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
33194 int (*hardware_test) (struct slot* slot, u32 value);
33195 u8 (*get_power) (struct slot* slot);
33196 int (*set_power) (struct slot* slot, int value);
33197-};
33198+} __no_const;
33199
33200 struct cpci_hp_controller {
33201 unsigned int irq;
33202diff -urNp linux-3.1.1/drivers/pci/hotplug/cpqphp_nvram.c linux-3.1.1/drivers/pci/hotplug/cpqphp_nvram.c
33203--- linux-3.1.1/drivers/pci/hotplug/cpqphp_nvram.c 2011-11-11 15:19:27.000000000 -0500
33204+++ linux-3.1.1/drivers/pci/hotplug/cpqphp_nvram.c 2011-11-16 18:39:07.000000000 -0500
33205@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
33206
33207 void compaq_nvram_init (void __iomem *rom_start)
33208 {
33209+
33210+#ifndef CONFIG_PAX_KERNEXEC
33211 if (rom_start) {
33212 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
33213 }
33214+#endif
33215+
33216 dbg("int15 entry = %p\n", compaq_int15_entry_point);
33217
33218 /* initialize our int15 lock */
33219diff -urNp linux-3.1.1/drivers/pci/pcie/aspm.c linux-3.1.1/drivers/pci/pcie/aspm.c
33220--- linux-3.1.1/drivers/pci/pcie/aspm.c 2011-11-11 15:19:27.000000000 -0500
33221+++ linux-3.1.1/drivers/pci/pcie/aspm.c 2011-11-16 18:39:07.000000000 -0500
33222@@ -27,9 +27,9 @@
33223 #define MODULE_PARAM_PREFIX "pcie_aspm."
33224
33225 /* Note: those are not register definitions */
33226-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
33227-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
33228-#define ASPM_STATE_L1 (4) /* L1 state */
33229+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
33230+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
33231+#define ASPM_STATE_L1 (4U) /* L1 state */
33232 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
33233 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
33234
33235diff -urNp linux-3.1.1/drivers/pci/probe.c linux-3.1.1/drivers/pci/probe.c
33236--- linux-3.1.1/drivers/pci/probe.c 2011-11-11 15:19:27.000000000 -0500
33237+++ linux-3.1.1/drivers/pci/probe.c 2011-11-16 18:39:07.000000000 -0500
33238@@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev,
33239 u32 l, sz, mask;
33240 u16 orig_cmd;
33241
33242- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
33243+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
33244
33245 if (!dev->mmio_always_on) {
33246 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
33247diff -urNp linux-3.1.1/drivers/pci/proc.c linux-3.1.1/drivers/pci/proc.c
33248--- linux-3.1.1/drivers/pci/proc.c 2011-11-11 15:19:27.000000000 -0500
33249+++ linux-3.1.1/drivers/pci/proc.c 2011-11-16 18:40:22.000000000 -0500
33250@@ -476,7 +476,16 @@ static const struct file_operations proc
33251 static int __init pci_proc_init(void)
33252 {
33253 struct pci_dev *dev = NULL;
33254+
33255+#ifdef CONFIG_GRKERNSEC_PROC_ADD
33256+#ifdef CONFIG_GRKERNSEC_PROC_USER
33257+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
33258+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33259+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33260+#endif
33261+#else
33262 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
33263+#endif
33264 proc_create("devices", 0, proc_bus_pci_dir,
33265 &proc_bus_pci_dev_operations);
33266 proc_initialized = 1;
33267diff -urNp linux-3.1.1/drivers/pci/xen-pcifront.c linux-3.1.1/drivers/pci/xen-pcifront.c
33268--- linux-3.1.1/drivers/pci/xen-pcifront.c 2011-11-11 15:19:27.000000000 -0500
33269+++ linux-3.1.1/drivers/pci/xen-pcifront.c 2011-11-16 18:40:22.000000000 -0500
33270@@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
33271 struct pcifront_sd *sd = bus->sysdata;
33272 struct pcifront_device *pdev = pcifront_get_pdev(sd);
33273
33274+ pax_track_stack();
33275+
33276 if (verbose_request)
33277 dev_info(&pdev->xdev->dev,
33278 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
33279@@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
33280 struct pcifront_sd *sd = bus->sysdata;
33281 struct pcifront_device *pdev = pcifront_get_pdev(sd);
33282
33283+ pax_track_stack();
33284+
33285 if (verbose_request)
33286 dev_info(&pdev->xdev->dev,
33287 "write dev=%04x:%02x:%02x.%01x - "
33288@@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
33289 struct pcifront_device *pdev = pcifront_get_pdev(sd);
33290 struct msi_desc *entry;
33291
33292+ pax_track_stack();
33293+
33294 if (nvec > SH_INFO_MAX_VEC) {
33295 dev_err(&dev->dev, "too much vector for pci frontend: %x."
33296 " Increase SH_INFO_MAX_VEC.\n", nvec);
33297@@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
33298 struct pcifront_sd *sd = dev->bus->sysdata;
33299 struct pcifront_device *pdev = pcifront_get_pdev(sd);
33300
33301+ pax_track_stack();
33302+
33303 err = do_pci_op(pdev, &op);
33304
33305 /* What should do for error ? */
33306@@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
33307 struct pcifront_sd *sd = dev->bus->sysdata;
33308 struct pcifront_device *pdev = pcifront_get_pdev(sd);
33309
33310+ pax_track_stack();
33311+
33312 err = do_pci_op(pdev, &op);
33313 if (likely(!err)) {
33314 vector[0] = op.value;
33315diff -urNp linux-3.1.1/drivers/platform/x86/thinkpad_acpi.c linux-3.1.1/drivers/platform/x86/thinkpad_acpi.c
33316--- linux-3.1.1/drivers/platform/x86/thinkpad_acpi.c 2011-11-11 15:19:27.000000000 -0500
33317+++ linux-3.1.1/drivers/platform/x86/thinkpad_acpi.c 2011-11-16 18:39:07.000000000 -0500
33318@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
33319 return 0;
33320 }
33321
33322-void static hotkey_mask_warn_incomplete_mask(void)
33323+static void hotkey_mask_warn_incomplete_mask(void)
33324 {
33325 /* log only what the user can fix... */
33326 const u32 wantedmask = hotkey_driver_mask &
33327diff -urNp linux-3.1.1/drivers/pnp/pnpbios/bioscalls.c linux-3.1.1/drivers/pnp/pnpbios/bioscalls.c
33328--- linux-3.1.1/drivers/pnp/pnpbios/bioscalls.c 2011-11-11 15:19:27.000000000 -0500
33329+++ linux-3.1.1/drivers/pnp/pnpbios/bioscalls.c 2011-11-16 18:39:07.000000000 -0500
33330@@ -59,7 +59,7 @@ do { \
33331 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
33332 } while(0)
33333
33334-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
33335+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
33336 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
33337
33338 /*
33339@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
33340
33341 cpu = get_cpu();
33342 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
33343+
33344+ pax_open_kernel();
33345 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
33346+ pax_close_kernel();
33347
33348 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
33349 spin_lock_irqsave(&pnp_bios_lock, flags);
33350@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
33351 :"memory");
33352 spin_unlock_irqrestore(&pnp_bios_lock, flags);
33353
33354+ pax_open_kernel();
33355 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
33356+ pax_close_kernel();
33357+
33358 put_cpu();
33359
33360 /* If we get here and this is set then the PnP BIOS faulted on us. */
33361@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
33362 return status;
33363 }
33364
33365-void pnpbios_calls_init(union pnp_bios_install_struct *header)
33366+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
33367 {
33368 int i;
33369
33370@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
33371 pnp_bios_callpoint.offset = header->fields.pm16offset;
33372 pnp_bios_callpoint.segment = PNP_CS16;
33373
33374+ pax_open_kernel();
33375+
33376 for_each_possible_cpu(i) {
33377 struct desc_struct *gdt = get_cpu_gdt_table(i);
33378 if (!gdt)
33379@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
33380 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
33381 (unsigned long)__va(header->fields.pm16dseg));
33382 }
33383+
33384+ pax_close_kernel();
33385 }
33386diff -urNp linux-3.1.1/drivers/pnp/resource.c linux-3.1.1/drivers/pnp/resource.c
33387--- linux-3.1.1/drivers/pnp/resource.c 2011-11-11 15:19:27.000000000 -0500
33388+++ linux-3.1.1/drivers/pnp/resource.c 2011-11-16 18:39:07.000000000 -0500
33389@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
33390 return 1;
33391
33392 /* check if the resource is valid */
33393- if (*irq < 0 || *irq > 15)
33394+ if (*irq > 15)
33395 return 0;
33396
33397 /* check if the resource is reserved */
33398@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
33399 return 1;
33400
33401 /* check if the resource is valid */
33402- if (*dma < 0 || *dma == 4 || *dma > 7)
33403+ if (*dma == 4 || *dma > 7)
33404 return 0;
33405
33406 /* check if the resource is reserved */
33407diff -urNp linux-3.1.1/drivers/power/bq27x00_battery.c linux-3.1.1/drivers/power/bq27x00_battery.c
33408--- linux-3.1.1/drivers/power/bq27x00_battery.c 2011-11-11 15:19:27.000000000 -0500
33409+++ linux-3.1.1/drivers/power/bq27x00_battery.c 2011-11-16 18:39:07.000000000 -0500
33410@@ -67,7 +67,7 @@
33411 struct bq27x00_device_info;
33412 struct bq27x00_access_methods {
33413 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
33414-};
33415+} __no_const;
33416
33417 enum bq27x00_chip { BQ27000, BQ27500 };
33418
33419diff -urNp linux-3.1.1/drivers/regulator/max8660.c linux-3.1.1/drivers/regulator/max8660.c
33420--- linux-3.1.1/drivers/regulator/max8660.c 2011-11-11 15:19:27.000000000 -0500
33421+++ linux-3.1.1/drivers/regulator/max8660.c 2011-11-16 18:39:07.000000000 -0500
33422@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
33423 max8660->shadow_regs[MAX8660_OVER1] = 5;
33424 } else {
33425 /* Otherwise devices can be toggled via software */
33426- max8660_dcdc_ops.enable = max8660_dcdc_enable;
33427- max8660_dcdc_ops.disable = max8660_dcdc_disable;
33428+ pax_open_kernel();
33429+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
33430+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
33431+ pax_close_kernel();
33432 }
33433
33434 /*
33435diff -urNp linux-3.1.1/drivers/regulator/mc13892-regulator.c linux-3.1.1/drivers/regulator/mc13892-regulator.c
33436--- linux-3.1.1/drivers/regulator/mc13892-regulator.c 2011-11-11 15:19:27.000000000 -0500
33437+++ linux-3.1.1/drivers/regulator/mc13892-regulator.c 2011-11-16 18:39:07.000000000 -0500
33438@@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_p
33439 }
33440 mc13xxx_unlock(mc13892);
33441
33442- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33443+ pax_open_kernel();
33444+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33445 = mc13892_vcam_set_mode;
33446- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33447+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33448 = mc13892_vcam_get_mode;
33449+ pax_close_kernel();
33450 for (i = 0; i < pdata->num_regulators; i++) {
33451 init_data = &pdata->regulators[i];
33452 priv->regulators[i] = regulator_register(
33453diff -urNp linux-3.1.1/drivers/rtc/rtc-dev.c linux-3.1.1/drivers/rtc/rtc-dev.c
33454--- linux-3.1.1/drivers/rtc/rtc-dev.c 2011-11-11 15:19:27.000000000 -0500
33455+++ linux-3.1.1/drivers/rtc/rtc-dev.c 2011-11-16 18:40:22.000000000 -0500
33456@@ -14,6 +14,7 @@
33457 #include <linux/module.h>
33458 #include <linux/rtc.h>
33459 #include <linux/sched.h>
33460+#include <linux/grsecurity.h>
33461 #include "rtc-core.h"
33462
33463 static dev_t rtc_devt;
33464@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
33465 if (copy_from_user(&tm, uarg, sizeof(tm)))
33466 return -EFAULT;
33467
33468+ gr_log_timechange();
33469+
33470 return rtc_set_time(rtc, &tm);
33471
33472 case RTC_PIE_ON:
33473diff -urNp linux-3.1.1/drivers/scsi/aacraid/aacraid.h linux-3.1.1/drivers/scsi/aacraid/aacraid.h
33474--- linux-3.1.1/drivers/scsi/aacraid/aacraid.h 2011-11-11 15:19:27.000000000 -0500
33475+++ linux-3.1.1/drivers/scsi/aacraid/aacraid.h 2011-11-16 18:39:07.000000000 -0500
33476@@ -492,7 +492,7 @@ struct adapter_ops
33477 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
33478 /* Administrative operations */
33479 int (*adapter_comm)(struct aac_dev * dev, int comm);
33480-};
33481+} __no_const;
33482
33483 /*
33484 * Define which interrupt handler needs to be installed
33485diff -urNp linux-3.1.1/drivers/scsi/aacraid/commctrl.c linux-3.1.1/drivers/scsi/aacraid/commctrl.c
33486--- linux-3.1.1/drivers/scsi/aacraid/commctrl.c 2011-11-11 15:19:27.000000000 -0500
33487+++ linux-3.1.1/drivers/scsi/aacraid/commctrl.c 2011-11-16 18:40:22.000000000 -0500
33488@@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
33489 u32 actual_fibsize64, actual_fibsize = 0;
33490 int i;
33491
33492+ pax_track_stack();
33493
33494 if (dev->in_reset) {
33495 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
33496diff -urNp linux-3.1.1/drivers/scsi/aacraid/linit.c linux-3.1.1/drivers/scsi/aacraid/linit.c
33497--- linux-3.1.1/drivers/scsi/aacraid/linit.c 2011-11-11 15:19:27.000000000 -0500
33498+++ linux-3.1.1/drivers/scsi/aacraid/linit.c 2011-11-16 18:39:07.000000000 -0500
33499@@ -92,7 +92,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_
33500 #elif defined(__devinitconst)
33501 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33502 #else
33503-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
33504+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33505 #endif
33506 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
33507 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
33508diff -urNp linux-3.1.1/drivers/scsi/aic94xx/aic94xx_init.c linux-3.1.1/drivers/scsi/aic94xx/aic94xx_init.c
33509--- linux-3.1.1/drivers/scsi/aic94xx/aic94xx_init.c 2011-11-11 15:19:27.000000000 -0500
33510+++ linux-3.1.1/drivers/scsi/aic94xx/aic94xx_init.c 2011-11-16 18:39:07.000000000 -0500
33511@@ -1012,7 +1012,7 @@ static struct sas_domain_function_templa
33512 .lldd_control_phy = asd_control_phy,
33513 };
33514
33515-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
33516+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
33517 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
33518 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
33519 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
33520diff -urNp linux-3.1.1/drivers/scsi/bfa/bfad.c linux-3.1.1/drivers/scsi/bfa/bfad.c
33521--- linux-3.1.1/drivers/scsi/bfa/bfad.c 2011-11-11 15:19:27.000000000 -0500
33522+++ linux-3.1.1/drivers/scsi/bfa/bfad.c 2011-11-16 19:01:15.000000000 -0500
33523@@ -1019,6 +1019,8 @@ bfad_start_ops(struct bfad_s *bfad) {
33524 struct bfad_vport_s *vport, *vport_new;
33525 struct bfa_fcs_driver_info_s driver_info;
33526
33527+ pax_track_stack();
33528+
33529 /* Limit min/max. xfer size to [64k-32MB] */
33530 if (max_xfer_size < BFAD_MIN_SECTORS >> 1)
33531 max_xfer_size = BFAD_MIN_SECTORS >> 1;
33532diff -urNp linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.c linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.c
33533--- linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.c 2011-11-11 15:19:27.000000000 -0500
33534+++ linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.c 2011-11-16 18:39:07.000000000 -0500
33535@@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct
33536 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
33537 {
33538 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
33539- struct bfa_itn_s *itn;
33540+ bfa_itn_s_no_const *itn;
33541
33542 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
33543 itn->isr = isr;
33544diff -urNp linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.h linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.h
33545--- linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.h 2011-11-11 15:19:27.000000000 -0500
33546+++ linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.h 2011-11-16 18:39:07.000000000 -0500
33547@@ -37,6 +37,7 @@ struct bfa_iotag_s {
33548 struct bfa_itn_s {
33549 bfa_isr_func_t isr;
33550 };
33551+typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
33552
33553 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
33554 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
33555@@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
33556 struct list_head iotag_tio_free_q; /* free IO resources */
33557 struct list_head iotag_unused_q; /* unused IO resources*/
33558 struct bfa_iotag_s *iotag_arr;
33559- struct bfa_itn_s *itn_arr;
33560+ bfa_itn_s_no_const *itn_arr;
33561 int num_ioim_reqs;
33562 int num_fwtio_reqs;
33563 int num_itns;
33564diff -urNp linux-3.1.1/drivers/scsi/bfa/bfa_fcs_lport.c linux-3.1.1/drivers/scsi/bfa/bfa_fcs_lport.c
33565--- linux-3.1.1/drivers/scsi/bfa/bfa_fcs_lport.c 2011-11-11 15:19:27.000000000 -0500
33566+++ linux-3.1.1/drivers/scsi/bfa/bfa_fcs_lport.c 2011-11-16 18:40:22.000000000 -0500
33567@@ -1700,6 +1700,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
33568 u16 len, count;
33569 u16 templen;
33570
33571+ pax_track_stack();
33572+
33573 /*
33574 * get hba attributes
33575 */
33576@@ -1977,6 +1979,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
33577 u8 count = 0;
33578 u16 templen;
33579
33580+ pax_track_stack();
33581+
33582 /*
33583 * get port attributes
33584 */
33585diff -urNp linux-3.1.1/drivers/scsi/bfa/bfa_fcs_rport.c linux-3.1.1/drivers/scsi/bfa/bfa_fcs_rport.c
33586--- linux-3.1.1/drivers/scsi/bfa/bfa_fcs_rport.c 2011-11-11 15:19:27.000000000 -0500
33587+++ linux-3.1.1/drivers/scsi/bfa/bfa_fcs_rport.c 2011-11-16 18:40:22.000000000 -0500
33588@@ -1871,6 +1871,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
33589 struct fc_rpsc_speed_info_s speeds;
33590 struct bfa_port_attr_s pport_attr;
33591
33592+ pax_track_stack();
33593+
33594 bfa_trc(port->fcs, rx_fchs->s_id);
33595 bfa_trc(port->fcs, rx_fchs->d_id);
33596
33597diff -urNp linux-3.1.1/drivers/scsi/bfa/bfa.h linux-3.1.1/drivers/scsi/bfa/bfa.h
33598--- linux-3.1.1/drivers/scsi/bfa/bfa.h 2011-11-11 15:19:27.000000000 -0500
33599+++ linux-3.1.1/drivers/scsi/bfa/bfa.h 2011-11-16 18:39:07.000000000 -0500
33600@@ -196,7 +196,7 @@ struct bfa_hwif_s {
33601 u32 *end);
33602 int cpe_vec_q0;
33603 int rme_vec_q0;
33604-};
33605+} __no_const;
33606 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
33607
33608 struct bfa_faa_cbfn_s {
33609diff -urNp linux-3.1.1/drivers/scsi/bfa/bfa_ioc.h linux-3.1.1/drivers/scsi/bfa/bfa_ioc.h
33610--- linux-3.1.1/drivers/scsi/bfa/bfa_ioc.h 2011-11-11 15:19:27.000000000 -0500
33611+++ linux-3.1.1/drivers/scsi/bfa/bfa_ioc.h 2011-11-16 18:39:07.000000000 -0500
33612@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
33613 bfa_ioc_disable_cbfn_t disable_cbfn;
33614 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
33615 bfa_ioc_reset_cbfn_t reset_cbfn;
33616-};
33617+} __no_const;
33618
33619 /*
33620 * IOC event notification mechanism.
33621@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
33622 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
33623 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
33624 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
33625-};
33626+} __no_const;
33627
33628 /*
33629 * Queue element to wait for room in request queue. FIFO order is
33630diff -urNp linux-3.1.1/drivers/scsi/BusLogic.c linux-3.1.1/drivers/scsi/BusLogic.c
33631--- linux-3.1.1/drivers/scsi/BusLogic.c 2011-11-11 15:19:27.000000000 -0500
33632+++ linux-3.1.1/drivers/scsi/BusLogic.c 2011-11-16 18:40:22.000000000 -0500
33633@@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
33634 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
33635 *PrototypeHostAdapter)
33636 {
33637+ pax_track_stack();
33638+
33639 /*
33640 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
33641 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
33642diff -urNp linux-3.1.1/drivers/scsi/dpt_i2o.c linux-3.1.1/drivers/scsi/dpt_i2o.c
33643--- linux-3.1.1/drivers/scsi/dpt_i2o.c 2011-11-11 15:19:27.000000000 -0500
33644+++ linux-3.1.1/drivers/scsi/dpt_i2o.c 2011-11-16 18:40:22.000000000 -0500
33645@@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
33646 dma_addr_t addr;
33647 ulong flags = 0;
33648
33649+ pax_track_stack();
33650+
33651 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
33652 // get user msg size in u32s
33653 if(get_user(size, &user_msg[0])){
33654@@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
33655 s32 rcode;
33656 dma_addr_t addr;
33657
33658+ pax_track_stack();
33659+
33660 memset(msg, 0 , sizeof(msg));
33661 len = scsi_bufflen(cmd);
33662 direction = 0x00000000;
33663diff -urNp linux-3.1.1/drivers/scsi/eata.c linux-3.1.1/drivers/scsi/eata.c
33664--- linux-3.1.1/drivers/scsi/eata.c 2011-11-11 15:19:27.000000000 -0500
33665+++ linux-3.1.1/drivers/scsi/eata.c 2011-11-16 18:40:22.000000000 -0500
33666@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
33667 struct hostdata *ha;
33668 char name[16];
33669
33670+ pax_track_stack();
33671+
33672 sprintf(name, "%s%d", driver_name, j);
33673
33674 if (!request_region(port_base, REGION_SIZE, driver_name)) {
33675diff -urNp linux-3.1.1/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.1.1/drivers/scsi/fcoe/fcoe_ctlr.c
33676--- linux-3.1.1/drivers/scsi/fcoe/fcoe_ctlr.c 2011-11-11 15:19:27.000000000 -0500
33677+++ linux-3.1.1/drivers/scsi/fcoe/fcoe_ctlr.c 2011-11-16 18:40:22.000000000 -0500
33678@@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
33679 } buf;
33680 int rc;
33681
33682+ pax_track_stack();
33683+
33684 fiph = (struct fip_header *)skb->data;
33685 sub = fiph->fip_subcode;
33686
33687diff -urNp linux-3.1.1/drivers/scsi/gdth.c linux-3.1.1/drivers/scsi/gdth.c
33688--- linux-3.1.1/drivers/scsi/gdth.c 2011-11-11 15:19:27.000000000 -0500
33689+++ linux-3.1.1/drivers/scsi/gdth.c 2011-11-16 18:40:22.000000000 -0500
33690@@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
33691 unsigned long flags;
33692 gdth_ha_str *ha;
33693
33694+ pax_track_stack();
33695+
33696 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
33697 return -EFAULT;
33698 ha = gdth_find_ha(ldrv.ionode);
33699@@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
33700 gdth_ha_str *ha;
33701 int rval;
33702
33703+ pax_track_stack();
33704+
33705 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
33706 res.number >= MAX_HDRIVES)
33707 return -EFAULT;
33708@@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
33709 gdth_ha_str *ha;
33710 int rval;
33711
33712+ pax_track_stack();
33713+
33714 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
33715 return -EFAULT;
33716 ha = gdth_find_ha(gen.ionode);
33717@@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
33718 int i;
33719 gdth_cmd_str gdtcmd;
33720 char cmnd[MAX_COMMAND_SIZE];
33721+
33722+ pax_track_stack();
33723+
33724 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
33725
33726 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
33727diff -urNp linux-3.1.1/drivers/scsi/gdth_proc.c linux-3.1.1/drivers/scsi/gdth_proc.c
33728--- linux-3.1.1/drivers/scsi/gdth_proc.c 2011-11-11 15:19:27.000000000 -0500
33729+++ linux-3.1.1/drivers/scsi/gdth_proc.c 2011-11-16 18:40:22.000000000 -0500
33730@@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
33731 u64 paddr;
33732
33733 char cmnd[MAX_COMMAND_SIZE];
33734+
33735+ pax_track_stack();
33736+
33737 memset(cmnd, 0xff, 12);
33738 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
33739
33740@@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
33741 gdth_hget_str *phg;
33742 char cmnd[MAX_COMMAND_SIZE];
33743
33744+ pax_track_stack();
33745+
33746 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
33747 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
33748 if (!gdtcmd || !estr)
33749diff -urNp linux-3.1.1/drivers/scsi/hosts.c linux-3.1.1/drivers/scsi/hosts.c
33750--- linux-3.1.1/drivers/scsi/hosts.c 2011-11-11 15:19:27.000000000 -0500
33751+++ linux-3.1.1/drivers/scsi/hosts.c 2011-11-16 18:39:07.000000000 -0500
33752@@ -42,7 +42,7 @@
33753 #include "scsi_logging.h"
33754
33755
33756-static atomic_t scsi_host_next_hn; /* host_no for next new host */
33757+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
33758
33759
33760 static void scsi_host_cls_release(struct device *dev)
33761@@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct
33762 * subtract one because we increment first then return, but we need to
33763 * know what the next host number was before increment
33764 */
33765- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
33766+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
33767 shost->dma_channel = 0xff;
33768
33769 /* These three are default values which can be overridden */
33770diff -urNp linux-3.1.1/drivers/scsi/hpsa.c linux-3.1.1/drivers/scsi/hpsa.c
33771--- linux-3.1.1/drivers/scsi/hpsa.c 2011-11-11 15:19:27.000000000 -0500
33772+++ linux-3.1.1/drivers/scsi/hpsa.c 2011-11-16 18:39:07.000000000 -0500
33773@@ -498,7 +498,7 @@ static inline u32 next_command(struct ct
33774 u32 a;
33775
33776 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
33777- return h->access.command_completed(h);
33778+ return h->access->command_completed(h);
33779
33780 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
33781 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
33782@@ -2955,7 +2955,7 @@ static void start_io(struct ctlr_info *h
33783 while (!list_empty(&h->reqQ)) {
33784 c = list_entry(h->reqQ.next, struct CommandList, list);
33785 /* can't do anything if fifo is full */
33786- if ((h->access.fifo_full(h))) {
33787+ if ((h->access->fifo_full(h))) {
33788 dev_warn(&h->pdev->dev, "fifo full\n");
33789 break;
33790 }
33791@@ -2965,7 +2965,7 @@ static void start_io(struct ctlr_info *h
33792 h->Qdepth--;
33793
33794 /* Tell the controller execute command */
33795- h->access.submit_command(h, c);
33796+ h->access->submit_command(h, c);
33797
33798 /* Put job onto the completed Q */
33799 addQ(&h->cmpQ, c);
33800@@ -2974,17 +2974,17 @@ static void start_io(struct ctlr_info *h
33801
33802 static inline unsigned long get_next_completion(struct ctlr_info *h)
33803 {
33804- return h->access.command_completed(h);
33805+ return h->access->command_completed(h);
33806 }
33807
33808 static inline bool interrupt_pending(struct ctlr_info *h)
33809 {
33810- return h->access.intr_pending(h);
33811+ return h->access->intr_pending(h);
33812 }
33813
33814 static inline long interrupt_not_for_us(struct ctlr_info *h)
33815 {
33816- return (h->access.intr_pending(h) == 0) ||
33817+ return (h->access->intr_pending(h) == 0) ||
33818 (h->interrupts_enabled == 0);
33819 }
33820
33821@@ -3881,7 +3881,7 @@ static int __devinit hpsa_pci_init(struc
33822 if (prod_index < 0)
33823 return -ENODEV;
33824 h->product_name = products[prod_index].product_name;
33825- h->access = *(products[prod_index].access);
33826+ h->access = products[prod_index].access;
33827
33828 if (hpsa_board_disabled(h->pdev)) {
33829 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
33830@@ -4158,7 +4158,7 @@ reinit_after_soft_reset:
33831 }
33832
33833 /* make sure the board interrupts are off */
33834- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33835+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33836
33837 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
33838 goto clean2;
33839@@ -4192,7 +4192,7 @@ reinit_after_soft_reset:
33840 * fake ones to scoop up any residual completions.
33841 */
33842 spin_lock_irqsave(&h->lock, flags);
33843- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33844+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33845 spin_unlock_irqrestore(&h->lock, flags);
33846 free_irq(h->intr[h->intr_mode], h);
33847 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
33848@@ -4211,9 +4211,9 @@ reinit_after_soft_reset:
33849 dev_info(&h->pdev->dev, "Board READY.\n");
33850 dev_info(&h->pdev->dev,
33851 "Waiting for stale completions to drain.\n");
33852- h->access.set_intr_mask(h, HPSA_INTR_ON);
33853+ h->access->set_intr_mask(h, HPSA_INTR_ON);
33854 msleep(10000);
33855- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33856+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33857
33858 rc = controller_reset_failed(h->cfgtable);
33859 if (rc)
33860@@ -4234,7 +4234,7 @@ reinit_after_soft_reset:
33861 }
33862
33863 /* Turn the interrupts on so we can service requests */
33864- h->access.set_intr_mask(h, HPSA_INTR_ON);
33865+ h->access->set_intr_mask(h, HPSA_INTR_ON);
33866
33867 hpsa_hba_inquiry(h);
33868 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
33869@@ -4287,7 +4287,7 @@ static void hpsa_shutdown(struct pci_dev
33870 * To write all data in the battery backed cache to disks
33871 */
33872 hpsa_flush_cache(h);
33873- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33874+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33875 free_irq(h->intr[h->intr_mode], h);
33876 #ifdef CONFIG_PCI_MSI
33877 if (h->msix_vector)
33878@@ -4450,7 +4450,7 @@ static __devinit void hpsa_enter_perform
33879 return;
33880 }
33881 /* Change the access methods to the performant access methods */
33882- h->access = SA5_performant_access;
33883+ h->access = &SA5_performant_access;
33884 h->transMethod = CFGTBL_Trans_Performant;
33885 }
33886
33887diff -urNp linux-3.1.1/drivers/scsi/hpsa.h linux-3.1.1/drivers/scsi/hpsa.h
33888--- linux-3.1.1/drivers/scsi/hpsa.h 2011-11-11 15:19:27.000000000 -0500
33889+++ linux-3.1.1/drivers/scsi/hpsa.h 2011-11-16 18:39:07.000000000 -0500
33890@@ -73,7 +73,7 @@ struct ctlr_info {
33891 unsigned int msix_vector;
33892 unsigned int msi_vector;
33893 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
33894- struct access_method access;
33895+ struct access_method *access;
33896
33897 /* queue and queue Info */
33898 struct list_head reqQ;
33899diff -urNp linux-3.1.1/drivers/scsi/ips.h linux-3.1.1/drivers/scsi/ips.h
33900--- linux-3.1.1/drivers/scsi/ips.h 2011-11-11 15:19:27.000000000 -0500
33901+++ linux-3.1.1/drivers/scsi/ips.h 2011-11-16 18:39:07.000000000 -0500
33902@@ -1027,7 +1027,7 @@ typedef struct {
33903 int (*intr)(struct ips_ha *);
33904 void (*enableint)(struct ips_ha *);
33905 uint32_t (*statupd)(struct ips_ha *);
33906-} ips_hw_func_t;
33907+} __no_const ips_hw_func_t;
33908
33909 typedef struct ips_ha {
33910 uint8_t ha_id[IPS_MAX_CHANNELS+1];
33911diff -urNp linux-3.1.1/drivers/scsi/libfc/fc_exch.c linux-3.1.1/drivers/scsi/libfc/fc_exch.c
33912--- linux-3.1.1/drivers/scsi/libfc/fc_exch.c 2011-11-11 15:19:27.000000000 -0500
33913+++ linux-3.1.1/drivers/scsi/libfc/fc_exch.c 2011-11-16 18:39:07.000000000 -0500
33914@@ -105,12 +105,12 @@ struct fc_exch_mgr {
33915 * all together if not used XXX
33916 */
33917 struct {
33918- atomic_t no_free_exch;
33919- atomic_t no_free_exch_xid;
33920- atomic_t xid_not_found;
33921- atomic_t xid_busy;
33922- atomic_t seq_not_found;
33923- atomic_t non_bls_resp;
33924+ atomic_unchecked_t no_free_exch;
33925+ atomic_unchecked_t no_free_exch_xid;
33926+ atomic_unchecked_t xid_not_found;
33927+ atomic_unchecked_t xid_busy;
33928+ atomic_unchecked_t seq_not_found;
33929+ atomic_unchecked_t non_bls_resp;
33930 } stats;
33931 };
33932
33933@@ -718,7 +718,7 @@ static struct fc_exch *fc_exch_em_alloc(
33934 /* allocate memory for exchange */
33935 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
33936 if (!ep) {
33937- atomic_inc(&mp->stats.no_free_exch);
33938+ atomic_inc_unchecked(&mp->stats.no_free_exch);
33939 goto out;
33940 }
33941 memset(ep, 0, sizeof(*ep));
33942@@ -779,7 +779,7 @@ out:
33943 return ep;
33944 err:
33945 spin_unlock_bh(&pool->lock);
33946- atomic_inc(&mp->stats.no_free_exch_xid);
33947+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
33948 mempool_free(ep, mp->ep_pool);
33949 return NULL;
33950 }
33951@@ -922,7 +922,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33952 xid = ntohs(fh->fh_ox_id); /* we originated exch */
33953 ep = fc_exch_find(mp, xid);
33954 if (!ep) {
33955- atomic_inc(&mp->stats.xid_not_found);
33956+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33957 reject = FC_RJT_OX_ID;
33958 goto out;
33959 }
33960@@ -952,7 +952,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33961 ep = fc_exch_find(mp, xid);
33962 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
33963 if (ep) {
33964- atomic_inc(&mp->stats.xid_busy);
33965+ atomic_inc_unchecked(&mp->stats.xid_busy);
33966 reject = FC_RJT_RX_ID;
33967 goto rel;
33968 }
33969@@ -963,7 +963,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33970 }
33971 xid = ep->xid; /* get our XID */
33972 } else if (!ep) {
33973- atomic_inc(&mp->stats.xid_not_found);
33974+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33975 reject = FC_RJT_RX_ID; /* XID not found */
33976 goto out;
33977 }
33978@@ -980,7 +980,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33979 } else {
33980 sp = &ep->seq;
33981 if (sp->id != fh->fh_seq_id) {
33982- atomic_inc(&mp->stats.seq_not_found);
33983+ atomic_inc_unchecked(&mp->stats.seq_not_found);
33984 if (f_ctl & FC_FC_END_SEQ) {
33985 /*
33986 * Update sequence_id based on incoming last
33987@@ -1430,22 +1430,22 @@ static void fc_exch_recv_seq_resp(struct
33988
33989 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
33990 if (!ep) {
33991- atomic_inc(&mp->stats.xid_not_found);
33992+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33993 goto out;
33994 }
33995 if (ep->esb_stat & ESB_ST_COMPLETE) {
33996- atomic_inc(&mp->stats.xid_not_found);
33997+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33998 goto rel;
33999 }
34000 if (ep->rxid == FC_XID_UNKNOWN)
34001 ep->rxid = ntohs(fh->fh_rx_id);
34002 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
34003- atomic_inc(&mp->stats.xid_not_found);
34004+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34005 goto rel;
34006 }
34007 if (ep->did != ntoh24(fh->fh_s_id) &&
34008 ep->did != FC_FID_FLOGI) {
34009- atomic_inc(&mp->stats.xid_not_found);
34010+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34011 goto rel;
34012 }
34013 sof = fr_sof(fp);
34014@@ -1454,7 +1454,7 @@ static void fc_exch_recv_seq_resp(struct
34015 sp->ssb_stat |= SSB_ST_RESP;
34016 sp->id = fh->fh_seq_id;
34017 } else if (sp->id != fh->fh_seq_id) {
34018- atomic_inc(&mp->stats.seq_not_found);
34019+ atomic_inc_unchecked(&mp->stats.seq_not_found);
34020 goto rel;
34021 }
34022
34023@@ -1518,9 +1518,9 @@ static void fc_exch_recv_resp(struct fc_
34024 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
34025
34026 if (!sp)
34027- atomic_inc(&mp->stats.xid_not_found);
34028+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34029 else
34030- atomic_inc(&mp->stats.non_bls_resp);
34031+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
34032
34033 fc_frame_free(fp);
34034 }
34035diff -urNp linux-3.1.1/drivers/scsi/libsas/sas_ata.c linux-3.1.1/drivers/scsi/libsas/sas_ata.c
34036--- linux-3.1.1/drivers/scsi/libsas/sas_ata.c 2011-11-11 15:19:27.000000000 -0500
34037+++ linux-3.1.1/drivers/scsi/libsas/sas_ata.c 2011-11-16 18:39:07.000000000 -0500
34038@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sa
34039 .postreset = ata_std_postreset,
34040 .error_handler = ata_std_error_handler,
34041 .post_internal_cmd = sas_ata_post_internal,
34042- .qc_defer = ata_std_qc_defer,
34043+ .qc_defer = ata_std_qc_defer,
34044 .qc_prep = ata_noop_qc_prep,
34045 .qc_issue = sas_ata_qc_issue,
34046 .qc_fill_rtf = sas_ata_qc_fill_rtf,
34047diff -urNp linux-3.1.1/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.1.1/drivers/scsi/lpfc/lpfc_debugfs.c
34048--- linux-3.1.1/drivers/scsi/lpfc/lpfc_debugfs.c 2011-11-11 15:19:27.000000000 -0500
34049+++ linux-3.1.1/drivers/scsi/lpfc/lpfc_debugfs.c 2011-11-16 18:40:22.000000000 -0500
34050@@ -105,7 +105,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
34051
34052 #include <linux/debugfs.h>
34053
34054-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34055+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34056 static unsigned long lpfc_debugfs_start_time = 0L;
34057
34058 /* iDiag */
34059@@ -146,7 +146,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
34060 lpfc_debugfs_enable = 0;
34061
34062 len = 0;
34063- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
34064+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
34065 (lpfc_debugfs_max_disc_trc - 1);
34066 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
34067 dtp = vport->disc_trc + i;
34068@@ -212,7 +212,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
34069 lpfc_debugfs_enable = 0;
34070
34071 len = 0;
34072- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
34073+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
34074 (lpfc_debugfs_max_slow_ring_trc - 1);
34075 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
34076 dtp = phba->slow_ring_trc + i;
34077@@ -635,14 +635,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
34078 !vport || !vport->disc_trc)
34079 return;
34080
34081- index = atomic_inc_return(&vport->disc_trc_cnt) &
34082+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
34083 (lpfc_debugfs_max_disc_trc - 1);
34084 dtp = vport->disc_trc + index;
34085 dtp->fmt = fmt;
34086 dtp->data1 = data1;
34087 dtp->data2 = data2;
34088 dtp->data3 = data3;
34089- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34090+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34091 dtp->jif = jiffies;
34092 #endif
34093 return;
34094@@ -673,14 +673,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
34095 !phba || !phba->slow_ring_trc)
34096 return;
34097
34098- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
34099+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
34100 (lpfc_debugfs_max_slow_ring_trc - 1);
34101 dtp = phba->slow_ring_trc + index;
34102 dtp->fmt = fmt;
34103 dtp->data1 = data1;
34104 dtp->data2 = data2;
34105 dtp->data3 = data3;
34106- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34107+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34108 dtp->jif = jiffies;
34109 #endif
34110 return;
34111@@ -3828,7 +3828,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
34112 "slow_ring buffer\n");
34113 goto debug_failed;
34114 }
34115- atomic_set(&phba->slow_ring_trc_cnt, 0);
34116+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
34117 memset(phba->slow_ring_trc, 0,
34118 (sizeof(struct lpfc_debugfs_trc) *
34119 lpfc_debugfs_max_slow_ring_trc));
34120@@ -3874,7 +3874,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
34121 "buffer\n");
34122 goto debug_failed;
34123 }
34124- atomic_set(&vport->disc_trc_cnt, 0);
34125+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
34126
34127 snprintf(name, sizeof(name), "discovery_trace");
34128 vport->debug_disc_trc =
34129diff -urNp linux-3.1.1/drivers/scsi/lpfc/lpfc.h linux-3.1.1/drivers/scsi/lpfc/lpfc.h
34130--- linux-3.1.1/drivers/scsi/lpfc/lpfc.h 2011-11-11 15:19:27.000000000 -0500
34131+++ linux-3.1.1/drivers/scsi/lpfc/lpfc.h 2011-11-16 18:39:07.000000000 -0500
34132@@ -425,7 +425,7 @@ struct lpfc_vport {
34133 struct dentry *debug_nodelist;
34134 struct dentry *vport_debugfs_root;
34135 struct lpfc_debugfs_trc *disc_trc;
34136- atomic_t disc_trc_cnt;
34137+ atomic_unchecked_t disc_trc_cnt;
34138 #endif
34139 uint8_t stat_data_enabled;
34140 uint8_t stat_data_blocked;
34141@@ -835,8 +835,8 @@ struct lpfc_hba {
34142 struct timer_list fabric_block_timer;
34143 unsigned long bit_flags;
34144 #define FABRIC_COMANDS_BLOCKED 0
34145- atomic_t num_rsrc_err;
34146- atomic_t num_cmd_success;
34147+ atomic_unchecked_t num_rsrc_err;
34148+ atomic_unchecked_t num_cmd_success;
34149 unsigned long last_rsrc_error_time;
34150 unsigned long last_ramp_down_time;
34151 unsigned long last_ramp_up_time;
34152@@ -850,7 +850,7 @@ struct lpfc_hba {
34153 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
34154 struct dentry *debug_slow_ring_trc;
34155 struct lpfc_debugfs_trc *slow_ring_trc;
34156- atomic_t slow_ring_trc_cnt;
34157+ atomic_unchecked_t slow_ring_trc_cnt;
34158 /* iDiag debugfs sub-directory */
34159 struct dentry *idiag_root;
34160 struct dentry *idiag_pci_cfg;
34161diff -urNp linux-3.1.1/drivers/scsi/lpfc/lpfc_init.c linux-3.1.1/drivers/scsi/lpfc/lpfc_init.c
34162--- linux-3.1.1/drivers/scsi/lpfc/lpfc_init.c 2011-11-11 15:19:27.000000000 -0500
34163+++ linux-3.1.1/drivers/scsi/lpfc/lpfc_init.c 2011-11-16 18:39:07.000000000 -0500
34164@@ -9969,8 +9969,10 @@ lpfc_init(void)
34165 printk(LPFC_COPYRIGHT "\n");
34166
34167 if (lpfc_enable_npiv) {
34168- lpfc_transport_functions.vport_create = lpfc_vport_create;
34169- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34170+ pax_open_kernel();
34171+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
34172+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34173+ pax_close_kernel();
34174 }
34175 lpfc_transport_template =
34176 fc_attach_transport(&lpfc_transport_functions);
34177diff -urNp linux-3.1.1/drivers/scsi/lpfc/lpfc_scsi.c linux-3.1.1/drivers/scsi/lpfc/lpfc_scsi.c
34178--- linux-3.1.1/drivers/scsi/lpfc/lpfc_scsi.c 2011-11-11 15:19:27.000000000 -0500
34179+++ linux-3.1.1/drivers/scsi/lpfc/lpfc_scsi.c 2011-11-16 18:39:07.000000000 -0500
34180@@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
34181 uint32_t evt_posted;
34182
34183 spin_lock_irqsave(&phba->hbalock, flags);
34184- atomic_inc(&phba->num_rsrc_err);
34185+ atomic_inc_unchecked(&phba->num_rsrc_err);
34186 phba->last_rsrc_error_time = jiffies;
34187
34188 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
34189@@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
34190 unsigned long flags;
34191 struct lpfc_hba *phba = vport->phba;
34192 uint32_t evt_posted;
34193- atomic_inc(&phba->num_cmd_success);
34194+ atomic_inc_unchecked(&phba->num_cmd_success);
34195
34196 if (vport->cfg_lun_queue_depth <= queue_depth)
34197 return;
34198@@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
34199 unsigned long num_rsrc_err, num_cmd_success;
34200 int i;
34201
34202- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
34203- num_cmd_success = atomic_read(&phba->num_cmd_success);
34204+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
34205+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
34206
34207 vports = lpfc_create_vport_work_array(phba);
34208 if (vports != NULL)
34209@@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
34210 }
34211 }
34212 lpfc_destroy_vport_work_array(phba, vports);
34213- atomic_set(&phba->num_rsrc_err, 0);
34214- atomic_set(&phba->num_cmd_success, 0);
34215+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
34216+ atomic_set_unchecked(&phba->num_cmd_success, 0);
34217 }
34218
34219 /**
34220@@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
34221 }
34222 }
34223 lpfc_destroy_vport_work_array(phba, vports);
34224- atomic_set(&phba->num_rsrc_err, 0);
34225- atomic_set(&phba->num_cmd_success, 0);
34226+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
34227+ atomic_set_unchecked(&phba->num_cmd_success, 0);
34228 }
34229
34230 /**
34231diff -urNp linux-3.1.1/drivers/scsi/megaraid/megaraid_mbox.c linux-3.1.1/drivers/scsi/megaraid/megaraid_mbox.c
34232--- linux-3.1.1/drivers/scsi/megaraid/megaraid_mbox.c 2011-11-11 15:19:27.000000000 -0500
34233+++ linux-3.1.1/drivers/scsi/megaraid/megaraid_mbox.c 2011-11-16 18:40:22.000000000 -0500
34234@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
34235 int rval;
34236 int i;
34237
34238+ pax_track_stack();
34239+
34240 // Allocate memory for the base list of scb for management module.
34241 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
34242
34243diff -urNp linux-3.1.1/drivers/scsi/osd/osd_initiator.c linux-3.1.1/drivers/scsi/osd/osd_initiator.c
34244--- linux-3.1.1/drivers/scsi/osd/osd_initiator.c 2011-11-11 15:19:27.000000000 -0500
34245+++ linux-3.1.1/drivers/scsi/osd/osd_initiator.c 2011-11-16 18:40:22.000000000 -0500
34246@@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
34247 int nelem = ARRAY_SIZE(get_attrs), a = 0;
34248 int ret;
34249
34250+ pax_track_stack();
34251+
34252 or = osd_start_request(od, GFP_KERNEL);
34253 if (!or)
34254 return -ENOMEM;
34255diff -urNp linux-3.1.1/drivers/scsi/pmcraid.c linux-3.1.1/drivers/scsi/pmcraid.c
34256--- linux-3.1.1/drivers/scsi/pmcraid.c 2011-11-11 15:19:27.000000000 -0500
34257+++ linux-3.1.1/drivers/scsi/pmcraid.c 2011-11-16 18:39:07.000000000 -0500
34258@@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
34259 res->scsi_dev = scsi_dev;
34260 scsi_dev->hostdata = res;
34261 res->change_detected = 0;
34262- atomic_set(&res->read_failures, 0);
34263- atomic_set(&res->write_failures, 0);
34264+ atomic_set_unchecked(&res->read_failures, 0);
34265+ atomic_set_unchecked(&res->write_failures, 0);
34266 rc = 0;
34267 }
34268 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
34269@@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
34270
34271 /* If this was a SCSI read/write command keep count of errors */
34272 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
34273- atomic_inc(&res->read_failures);
34274+ atomic_inc_unchecked(&res->read_failures);
34275 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
34276- atomic_inc(&res->write_failures);
34277+ atomic_inc_unchecked(&res->write_failures);
34278
34279 if (!RES_IS_GSCSI(res->cfg_entry) &&
34280 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
34281@@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
34282 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34283 * hrrq_id assigned here in queuecommand
34284 */
34285- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34286+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34287 pinstance->num_hrrq;
34288 cmd->cmd_done = pmcraid_io_done;
34289
34290@@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
34291 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34292 * hrrq_id assigned here in queuecommand
34293 */
34294- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34295+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34296 pinstance->num_hrrq;
34297
34298 if (request_size) {
34299@@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(stru
34300
34301 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
34302 /* add resources only after host is added into system */
34303- if (!atomic_read(&pinstance->expose_resources))
34304+ if (!atomic_read_unchecked(&pinstance->expose_resources))
34305 return;
34306
34307 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
34308@@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instan
34309 init_waitqueue_head(&pinstance->reset_wait_q);
34310
34311 atomic_set(&pinstance->outstanding_cmds, 0);
34312- atomic_set(&pinstance->last_message_id, 0);
34313- atomic_set(&pinstance->expose_resources, 0);
34314+ atomic_set_unchecked(&pinstance->last_message_id, 0);
34315+ atomic_set_unchecked(&pinstance->expose_resources, 0);
34316
34317 INIT_LIST_HEAD(&pinstance->free_res_q);
34318 INIT_LIST_HEAD(&pinstance->used_res_q);
34319@@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
34320 /* Schedule worker thread to handle CCN and take care of adding and
34321 * removing devices to OS
34322 */
34323- atomic_set(&pinstance->expose_resources, 1);
34324+ atomic_set_unchecked(&pinstance->expose_resources, 1);
34325 schedule_work(&pinstance->worker_q);
34326 return rc;
34327
34328diff -urNp linux-3.1.1/drivers/scsi/pmcraid.h linux-3.1.1/drivers/scsi/pmcraid.h
34329--- linux-3.1.1/drivers/scsi/pmcraid.h 2011-11-11 15:19:27.000000000 -0500
34330+++ linux-3.1.1/drivers/scsi/pmcraid.h 2011-11-16 18:39:07.000000000 -0500
34331@@ -749,7 +749,7 @@ struct pmcraid_instance {
34332 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
34333
34334 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
34335- atomic_t last_message_id;
34336+ atomic_unchecked_t last_message_id;
34337
34338 /* configuration table */
34339 struct pmcraid_config_table *cfg_table;
34340@@ -778,7 +778,7 @@ struct pmcraid_instance {
34341 atomic_t outstanding_cmds;
34342
34343 /* should add/delete resources to mid-layer now ?*/
34344- atomic_t expose_resources;
34345+ atomic_unchecked_t expose_resources;
34346
34347
34348
34349@@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
34350 struct pmcraid_config_table_entry_ext cfg_entry_ext;
34351 };
34352 struct scsi_device *scsi_dev; /* Link scsi_device structure */
34353- atomic_t read_failures; /* count of failed READ commands */
34354- atomic_t write_failures; /* count of failed WRITE commands */
34355+ atomic_unchecked_t read_failures; /* count of failed READ commands */
34356+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34357
34358 /* To indicate add/delete/modify during CCN */
34359 u8 change_detected;
34360diff -urNp linux-3.1.1/drivers/scsi/qla2xxx/qla_def.h linux-3.1.1/drivers/scsi/qla2xxx/qla_def.h
34361--- linux-3.1.1/drivers/scsi/qla2xxx/qla_def.h 2011-11-11 15:19:27.000000000 -0500
34362+++ linux-3.1.1/drivers/scsi/qla2xxx/qla_def.h 2011-11-16 18:39:07.000000000 -0500
34363@@ -2244,7 +2244,7 @@ struct isp_operations {
34364 int (*get_flash_version) (struct scsi_qla_host *, void *);
34365 int (*start_scsi) (srb_t *);
34366 int (*abort_isp) (struct scsi_qla_host *);
34367-};
34368+} __no_const;
34369
34370 /* MSI-X Support *************************************************************/
34371
34372diff -urNp linux-3.1.1/drivers/scsi/qla4xxx/ql4_def.h linux-3.1.1/drivers/scsi/qla4xxx/ql4_def.h
34373--- linux-3.1.1/drivers/scsi/qla4xxx/ql4_def.h 2011-11-11 15:19:27.000000000 -0500
34374+++ linux-3.1.1/drivers/scsi/qla4xxx/ql4_def.h 2011-11-16 18:39:07.000000000 -0500
34375@@ -256,7 +256,7 @@ struct ddb_entry {
34376 atomic_t retry_relogin_timer; /* Min Time between relogins
34377 * (4000 only) */
34378 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
34379- atomic_t relogin_retry_count; /* Num of times relogin has been
34380+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34381 * retried */
34382
34383 uint16_t port;
34384diff -urNp linux-3.1.1/drivers/scsi/qla4xxx/ql4_init.c linux-3.1.1/drivers/scsi/qla4xxx/ql4_init.c
34385--- linux-3.1.1/drivers/scsi/qla4xxx/ql4_init.c 2011-11-11 15:19:27.000000000 -0500
34386+++ linux-3.1.1/drivers/scsi/qla4xxx/ql4_init.c 2011-11-16 18:39:07.000000000 -0500
34387@@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
34388 ddb_entry->fw_ddb_index = fw_ddb_index;
34389 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34390 atomic_set(&ddb_entry->relogin_timer, 0);
34391- atomic_set(&ddb_entry->relogin_retry_count, 0);
34392+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34393 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34394 list_add_tail(&ddb_entry->list, &ha->ddb_list);
34395 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
34396@@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
34397 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
34398 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
34399 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34400- atomic_set(&ddb_entry->relogin_retry_count, 0);
34401+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34402 atomic_set(&ddb_entry->relogin_timer, 0);
34403 clear_bit(DF_RELOGIN, &ddb_entry->flags);
34404 iscsi_unblock_session(ddb_entry->sess);
34405diff -urNp linux-3.1.1/drivers/scsi/qla4xxx/ql4_os.c linux-3.1.1/drivers/scsi/qla4xxx/ql4_os.c
34406--- linux-3.1.1/drivers/scsi/qla4xxx/ql4_os.c 2011-11-11 15:19:27.000000000 -0500
34407+++ linux-3.1.1/drivers/scsi/qla4xxx/ql4_os.c 2011-11-16 18:39:07.000000000 -0500
34408@@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_ql
34409 ddb_entry->fw_ddb_device_state ==
34410 DDB_DS_SESSION_FAILED) {
34411 /* Reset retry relogin timer */
34412- atomic_inc(&ddb_entry->relogin_retry_count);
34413+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34414 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
34415 " timed out-retrying"
34416 " relogin (%d)\n",
34417 ha->host_no,
34418 ddb_entry->fw_ddb_index,
34419- atomic_read(&ddb_entry->
34420+ atomic_read_unchecked(&ddb_entry->
34421 relogin_retry_count))
34422 );
34423 start_dpc++;
34424diff -urNp linux-3.1.1/drivers/scsi/scsi.c linux-3.1.1/drivers/scsi/scsi.c
34425--- linux-3.1.1/drivers/scsi/scsi.c 2011-11-11 15:19:27.000000000 -0500
34426+++ linux-3.1.1/drivers/scsi/scsi.c 2011-11-16 18:39:07.000000000 -0500
34427@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
34428 unsigned long timeout;
34429 int rtn = 0;
34430
34431- atomic_inc(&cmd->device->iorequest_cnt);
34432+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34433
34434 /* check if the device is still usable */
34435 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34436diff -urNp linux-3.1.1/drivers/scsi/scsi_debug.c linux-3.1.1/drivers/scsi/scsi_debug.c
34437--- linux-3.1.1/drivers/scsi/scsi_debug.c 2011-11-11 15:19:27.000000000 -0500
34438+++ linux-3.1.1/drivers/scsi/scsi_debug.c 2011-11-16 18:40:22.000000000 -0500
34439@@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
34440 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
34441 unsigned char *cmd = (unsigned char *)scp->cmnd;
34442
34443+ pax_track_stack();
34444+
34445 if ((errsts = check_readiness(scp, 1, devip)))
34446 return errsts;
34447 memset(arr, 0, sizeof(arr));
34448@@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
34449 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
34450 unsigned char *cmd = (unsigned char *)scp->cmnd;
34451
34452+ pax_track_stack();
34453+
34454 if ((errsts = check_readiness(scp, 1, devip)))
34455 return errsts;
34456 memset(arr, 0, sizeof(arr));
34457diff -urNp linux-3.1.1/drivers/scsi/scsi_lib.c linux-3.1.1/drivers/scsi/scsi_lib.c
34458--- linux-3.1.1/drivers/scsi/scsi_lib.c 2011-11-11 15:19:27.000000000 -0500
34459+++ linux-3.1.1/drivers/scsi/scsi_lib.c 2011-11-16 18:39:07.000000000 -0500
34460@@ -1413,7 +1413,7 @@ static void scsi_kill_request(struct req
34461 shost = sdev->host;
34462 scsi_init_cmd_errh(cmd);
34463 cmd->result = DID_NO_CONNECT << 16;
34464- atomic_inc(&cmd->device->iorequest_cnt);
34465+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34466
34467 /*
34468 * SCSI request completion path will do scsi_device_unbusy(),
34469@@ -1439,9 +1439,9 @@ static void scsi_softirq_done(struct req
34470
34471 INIT_LIST_HEAD(&cmd->eh_entry);
34472
34473- atomic_inc(&cmd->device->iodone_cnt);
34474+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
34475 if (cmd->result)
34476- atomic_inc(&cmd->device->ioerr_cnt);
34477+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34478
34479 disposition = scsi_decide_disposition(cmd);
34480 if (disposition != SUCCESS &&
34481diff -urNp linux-3.1.1/drivers/scsi/scsi_sysfs.c linux-3.1.1/drivers/scsi/scsi_sysfs.c
34482--- linux-3.1.1/drivers/scsi/scsi_sysfs.c 2011-11-11 15:19:27.000000000 -0500
34483+++ linux-3.1.1/drivers/scsi/scsi_sysfs.c 2011-11-16 18:39:07.000000000 -0500
34484@@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
34485 char *buf) \
34486 { \
34487 struct scsi_device *sdev = to_scsi_device(dev); \
34488- unsigned long long count = atomic_read(&sdev->field); \
34489+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
34490 return snprintf(buf, 20, "0x%llx\n", count); \
34491 } \
34492 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34493diff -urNp linux-3.1.1/drivers/scsi/scsi_tgt_lib.c linux-3.1.1/drivers/scsi/scsi_tgt_lib.c
34494--- linux-3.1.1/drivers/scsi/scsi_tgt_lib.c 2011-11-11 15:19:27.000000000 -0500
34495+++ linux-3.1.1/drivers/scsi/scsi_tgt_lib.c 2011-11-16 18:39:07.000000000 -0500
34496@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct sc
34497 int err;
34498
34499 dprintk("%lx %u\n", uaddr, len);
34500- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
34501+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
34502 if (err) {
34503 /*
34504 * TODO: need to fixup sg_tablesize, max_segment_size,
34505diff -urNp linux-3.1.1/drivers/scsi/scsi_transport_fc.c linux-3.1.1/drivers/scsi/scsi_transport_fc.c
34506--- linux-3.1.1/drivers/scsi/scsi_transport_fc.c 2011-11-11 15:19:27.000000000 -0500
34507+++ linux-3.1.1/drivers/scsi/scsi_transport_fc.c 2011-11-16 18:39:07.000000000 -0500
34508@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
34509 * Netlink Infrastructure
34510 */
34511
34512-static atomic_t fc_event_seq;
34513+static atomic_unchecked_t fc_event_seq;
34514
34515 /**
34516 * fc_get_event_number - Obtain the next sequential FC event number
34517@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
34518 u32
34519 fc_get_event_number(void)
34520 {
34521- return atomic_add_return(1, &fc_event_seq);
34522+ return atomic_add_return_unchecked(1, &fc_event_seq);
34523 }
34524 EXPORT_SYMBOL(fc_get_event_number);
34525
34526@@ -645,7 +645,7 @@ static __init int fc_transport_init(void
34527 {
34528 int error;
34529
34530- atomic_set(&fc_event_seq, 0);
34531+ atomic_set_unchecked(&fc_event_seq, 0);
34532
34533 error = transport_class_register(&fc_host_class);
34534 if (error)
34535@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char
34536 char *cp;
34537
34538 *val = simple_strtoul(buf, &cp, 0);
34539- if ((*cp && (*cp != '\n')) || (*val < 0))
34540+ if (*cp && (*cp != '\n'))
34541 return -EINVAL;
34542 /*
34543 * Check for overflow; dev_loss_tmo is u32
34544diff -urNp linux-3.1.1/drivers/scsi/scsi_transport_iscsi.c linux-3.1.1/drivers/scsi/scsi_transport_iscsi.c
34545--- linux-3.1.1/drivers/scsi/scsi_transport_iscsi.c 2011-11-11 15:19:27.000000000 -0500
34546+++ linux-3.1.1/drivers/scsi/scsi_transport_iscsi.c 2011-11-16 18:39:07.000000000 -0500
34547@@ -83,7 +83,7 @@ struct iscsi_internal {
34548 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
34549 };
34550
34551-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34552+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34553 static struct workqueue_struct *iscsi_eh_timer_workq;
34554
34555 /*
34556@@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
34557 int err;
34558
34559 ihost = shost->shost_data;
34560- session->sid = atomic_add_return(1, &iscsi_session_nr);
34561+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34562
34563 if (id == ISCSI_MAX_TARGET) {
34564 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
34565@@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
34566 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34567 ISCSI_TRANSPORT_VERSION);
34568
34569- atomic_set(&iscsi_session_nr, 0);
34570+ atomic_set_unchecked(&iscsi_session_nr, 0);
34571
34572 err = class_register(&iscsi_transport_class);
34573 if (err)
34574diff -urNp linux-3.1.1/drivers/scsi/scsi_transport_srp.c linux-3.1.1/drivers/scsi/scsi_transport_srp.c
34575--- linux-3.1.1/drivers/scsi/scsi_transport_srp.c 2011-11-11 15:19:27.000000000 -0500
34576+++ linux-3.1.1/drivers/scsi/scsi_transport_srp.c 2011-11-16 18:39:07.000000000 -0500
34577@@ -33,7 +33,7 @@
34578 #include "scsi_transport_srp_internal.h"
34579
34580 struct srp_host_attrs {
34581- atomic_t next_port_id;
34582+ atomic_unchecked_t next_port_id;
34583 };
34584 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34585
34586@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
34587 struct Scsi_Host *shost = dev_to_shost(dev);
34588 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34589
34590- atomic_set(&srp_host->next_port_id, 0);
34591+ atomic_set_unchecked(&srp_host->next_port_id, 0);
34592 return 0;
34593 }
34594
34595@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
34596 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34597 rport->roles = ids->roles;
34598
34599- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34600+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34601 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34602
34603 transport_setup_device(&rport->dev);
34604diff -urNp linux-3.1.1/drivers/scsi/sg.c linux-3.1.1/drivers/scsi/sg.c
34605--- linux-3.1.1/drivers/scsi/sg.c 2011-11-11 15:19:27.000000000 -0500
34606+++ linux-3.1.1/drivers/scsi/sg.c 2011-11-16 18:39:07.000000000 -0500
34607@@ -1075,7 +1075,7 @@ sg_ioctl(struct file *filp, unsigned int
34608 sdp->disk->disk_name,
34609 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
34610 NULL,
34611- (char *)arg);
34612+ (char __user *)arg);
34613 case BLKTRACESTART:
34614 return blk_trace_startstop(sdp->device->request_queue, 1);
34615 case BLKTRACESTOP:
34616@@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
34617 const struct file_operations * fops;
34618 };
34619
34620-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34621+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34622 {"allow_dio", &adio_fops},
34623 {"debug", &debug_fops},
34624 {"def_reserved_size", &dressz_fops},
34625@@ -2325,7 +2325,7 @@ sg_proc_init(void)
34626 {
34627 int k, mask;
34628 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34629- struct sg_proc_leaf * leaf;
34630+ const struct sg_proc_leaf * leaf;
34631
34632 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34633 if (!sg_proc_sgp)
34634diff -urNp linux-3.1.1/drivers/scsi/sym53c8xx_2/sym_glue.c linux-3.1.1/drivers/scsi/sym53c8xx_2/sym_glue.c
34635--- linux-3.1.1/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-11-11 15:19:27.000000000 -0500
34636+++ linux-3.1.1/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-11-16 18:40:22.000000000 -0500
34637@@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
34638 int do_iounmap = 0;
34639 int do_disable_device = 1;
34640
34641+ pax_track_stack();
34642+
34643 memset(&sym_dev, 0, sizeof(sym_dev));
34644 memset(&nvram, 0, sizeof(nvram));
34645 sym_dev.pdev = pdev;
34646diff -urNp linux-3.1.1/drivers/scsi/vmw_pvscsi.c linux-3.1.1/drivers/scsi/vmw_pvscsi.c
34647--- linux-3.1.1/drivers/scsi/vmw_pvscsi.c 2011-11-11 15:19:27.000000000 -0500
34648+++ linux-3.1.1/drivers/scsi/vmw_pvscsi.c 2011-11-16 18:40:22.000000000 -0500
34649@@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
34650 dma_addr_t base;
34651 unsigned i;
34652
34653+ pax_track_stack();
34654+
34655 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
34656 cmd.reqRingNumPages = adapter->req_pages;
34657 cmd.cmpRingNumPages = adapter->cmp_pages;
34658diff -urNp linux-3.1.1/drivers/spi/spi.c linux-3.1.1/drivers/spi/spi.c
34659--- linux-3.1.1/drivers/spi/spi.c 2011-11-11 15:19:27.000000000 -0500
34660+++ linux-3.1.1/drivers/spi/spi.c 2011-11-16 18:39:07.000000000 -0500
34661@@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
34662 EXPORT_SYMBOL_GPL(spi_bus_unlock);
34663
34664 /* portable code must never pass more than 32 bytes */
34665-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34666+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
34667
34668 static u8 *buf;
34669
34670diff -urNp linux-3.1.1/drivers/spi/spi-dw-pci.c linux-3.1.1/drivers/spi/spi-dw-pci.c
34671--- linux-3.1.1/drivers/spi/spi-dw-pci.c 2011-11-11 15:19:27.000000000 -0500
34672+++ linux-3.1.1/drivers/spi/spi-dw-pci.c 2011-11-16 18:39:07.000000000 -0500
34673@@ -148,7 +148,7 @@ static int spi_resume(struct pci_dev *pd
34674 #define spi_resume NULL
34675 #endif
34676
34677-static const struct pci_device_id pci_ids[] __devinitdata = {
34678+static const struct pci_device_id pci_ids[] __devinitconst = {
34679 /* Intel MID platform SPI controller 0 */
34680 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
34681 {},
34682diff -urNp linux-3.1.1/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-3.1.1/drivers/staging/ath6kl/os/linux/ar6000_drv.c
34683--- linux-3.1.1/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-11-11 15:19:27.000000000 -0500
34684+++ linux-3.1.1/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-11-16 18:39:07.000000000 -0500
34685@@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[
34686 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
34687
34688
34689-static struct net_device_ops ar6000_netdev_ops = {
34690+static net_device_ops_no_const ar6000_netdev_ops = {
34691 .ndo_init = NULL,
34692 .ndo_open = ar6000_open,
34693 .ndo_stop = ar6000_close,
34694diff -urNp linux-3.1.1/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-3.1.1/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
34695--- linux-3.1.1/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-11-11 15:19:27.000000000 -0500
34696+++ linux-3.1.1/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-11-16 18:39:07.000000000 -0500
34697@@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
34698 typedef struct ar6k_pal_config_s
34699 {
34700 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
34701-}ar6k_pal_config_t;
34702+} __no_const ar6k_pal_config_t;
34703
34704 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
34705 #endif /* _AR6K_PAL_H_ */
34706diff -urNp linux-3.1.1/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-3.1.1/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
34707--- linux-3.1.1/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-11-11 15:19:27.000000000 -0500
34708+++ linux-3.1.1/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-11-16 18:39:07.000000000 -0500
34709@@ -451,14 +451,14 @@ static void brcmf_op_if(struct brcmf_if
34710 free_netdev(ifp->net);
34711 }
34712 /* Allocate etherdev, including space for private structure */
34713- ifp->net = alloc_etherdev(sizeof(drvr_priv));
34714+ ifp->net = alloc_etherdev(sizeof(*drvr_priv));
34715 if (!ifp->net) {
34716 BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
34717 ret = -ENOMEM;
34718 }
34719 if (ret == 0) {
34720 strcpy(ifp->net->name, ifp->name);
34721- memcpy(netdev_priv(ifp->net), &drvr_priv, sizeof(drvr_priv));
34722+ memcpy(netdev_priv(ifp->net), &drvr_priv, sizeof(*drvr_priv));
34723 err = brcmf_net_attach(&drvr_priv->pub, ifp->idx);
34724 if (err != 0) {
34725 BRCMF_ERROR(("%s: brcmf_net_attach failed, "
34726@@ -1279,7 +1279,7 @@ struct brcmf_pub *brcmf_attach(struct br
34727 BRCMF_TRACE(("%s: Enter\n", __func__));
34728
34729 /* Allocate etherdev, including space for private structure */
34730- net = alloc_etherdev(sizeof(drvr_priv));
34731+ net = alloc_etherdev(sizeof(*drvr_priv));
34732 if (!net) {
34733 BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
34734 goto fail;
34735@@ -1295,7 +1295,7 @@ struct brcmf_pub *brcmf_attach(struct br
34736 /*
34737 * Save the brcmf_info into the priv
34738 */
34739- memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv));
34740+ memcpy(netdev_priv(net), &drvr_priv, sizeof(*drvr_priv));
34741
34742 /* Set network interface name if it was provided as module parameter */
34743 if (iface_name[0]) {
34744@@ -1352,7 +1352,7 @@ struct brcmf_pub *brcmf_attach(struct br
34745 /*
34746 * Save the brcmf_info into the priv
34747 */
34748- memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv));
34749+ memcpy(netdev_priv(net), &drvr_priv, sizeof(*drvr_priv));
34750
34751 #if defined(CONFIG_PM_SLEEP)
34752 atomic_set(&brcmf_mmc_suspend, false);
34753diff -urNp linux-3.1.1/drivers/staging/brcm80211/brcmfmac/sdio_host.h linux-3.1.1/drivers/staging/brcm80211/brcmfmac/sdio_host.h
34754--- linux-3.1.1/drivers/staging/brcm80211/brcmfmac/sdio_host.h 2011-11-11 15:19:27.000000000 -0500
34755+++ linux-3.1.1/drivers/staging/brcm80211/brcmfmac/sdio_host.h 2011-11-16 18:39:07.000000000 -0500
34756@@ -263,7 +263,7 @@ struct brcmf_sdioh_driver {
34757 u16 func, uint bustype, u32 regsva, void *param);
34758 /* detach from device */
34759 void (*detach) (void *ch);
34760-};
34761+} __no_const;
34762
34763 struct sdioh_info;
34764
34765diff -urNp linux-3.1.1/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h linux-3.1.1/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h
34766--- linux-3.1.1/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h 2011-11-11 15:19:27.000000000 -0500
34767+++ linux-3.1.1/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h 2011-11-16 18:39:07.000000000 -0500
34768@@ -591,7 +591,7 @@ struct phy_func_ptr {
34769 initfn_t carrsuppr;
34770 rxsigpwrfn_t rxsigpwr;
34771 detachfn_t detach;
34772-};
34773+} __no_const;
34774
34775 struct brcms_phy {
34776 struct brcms_phy_pub pubpi_ro;
34777diff -urNp linux-3.1.1/drivers/staging/et131x/et1310_tx.c linux-3.1.1/drivers/staging/et131x/et1310_tx.c
34778--- linux-3.1.1/drivers/staging/et131x/et1310_tx.c 2011-11-11 15:19:27.000000000 -0500
34779+++ linux-3.1.1/drivers/staging/et131x/et1310_tx.c 2011-11-16 18:39:07.000000000 -0500
34780@@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
34781 struct net_device_stats *stats = &etdev->net_stats;
34782
34783 if (tcb->flags & fMP_DEST_BROAD)
34784- atomic_inc(&etdev->stats.brdcstxmt);
34785+ atomic_inc_unchecked(&etdev->stats.brdcstxmt);
34786 else if (tcb->flags & fMP_DEST_MULTI)
34787- atomic_inc(&etdev->stats.multixmt);
34788+ atomic_inc_unchecked(&etdev->stats.multixmt);
34789 else
34790- atomic_inc(&etdev->stats.unixmt);
34791+ atomic_inc_unchecked(&etdev->stats.unixmt);
34792
34793 if (tcb->skb) {
34794 stats->tx_bytes += tcb->skb->len;
34795diff -urNp linux-3.1.1/drivers/staging/et131x/et131x_adapter.h linux-3.1.1/drivers/staging/et131x/et131x_adapter.h
34796--- linux-3.1.1/drivers/staging/et131x/et131x_adapter.h 2011-11-11 15:19:27.000000000 -0500
34797+++ linux-3.1.1/drivers/staging/et131x/et131x_adapter.h 2011-11-16 18:39:07.000000000 -0500
34798@@ -106,11 +106,11 @@ struct ce_stats {
34799 * operations
34800 */
34801 u32 unircv; /* # multicast packets received */
34802- atomic_t unixmt; /* # multicast packets for Tx */
34803+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
34804 u32 multircv; /* # multicast packets received */
34805- atomic_t multixmt; /* # multicast packets for Tx */
34806+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
34807 u32 brdcstrcv; /* # broadcast packets received */
34808- atomic_t brdcstxmt; /* # broadcast packets for Tx */
34809+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
34810 u32 norcvbuf; /* # Rx packets discarded */
34811 u32 noxmtbuf; /* # Tx packets discarded */
34812
34813diff -urNp linux-3.1.1/drivers/staging/hv/channel.c linux-3.1.1/drivers/staging/hv/channel.c
34814--- linux-3.1.1/drivers/staging/hv/channel.c 2011-11-11 15:19:27.000000000 -0500
34815+++ linux-3.1.1/drivers/staging/hv/channel.c 2011-11-16 18:39:07.000000000 -0500
34816@@ -447,8 +447,8 @@ int vmbus_establish_gpadl(struct vmbus_c
34817 int ret = 0;
34818 int t;
34819
34820- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
34821- atomic_inc(&vmbus_connection.next_gpadl_handle);
34822+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
34823+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
34824
34825 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
34826 if (ret)
34827diff -urNp linux-3.1.1/drivers/staging/hv/hv.c linux-3.1.1/drivers/staging/hv/hv.c
34828--- linux-3.1.1/drivers/staging/hv/hv.c 2011-11-11 15:19:27.000000000 -0500
34829+++ linux-3.1.1/drivers/staging/hv/hv.c 2011-11-16 18:39:07.000000000 -0500
34830@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, voi
34831 u64 output_address = (output) ? virt_to_phys(output) : 0;
34832 u32 output_address_hi = output_address >> 32;
34833 u32 output_address_lo = output_address & 0xFFFFFFFF;
34834- volatile void *hypercall_page = hv_context.hypercall_page;
34835+ volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
34836
34837 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
34838 "=a"(hv_status_lo) : "d" (control_hi),
34839diff -urNp linux-3.1.1/drivers/staging/hv/hv_mouse.c linux-3.1.1/drivers/staging/hv/hv_mouse.c
34840--- linux-3.1.1/drivers/staging/hv/hv_mouse.c 2011-11-11 15:19:27.000000000 -0500
34841+++ linux-3.1.1/drivers/staging/hv/hv_mouse.c 2011-11-16 18:39:07.000000000 -0500
34842@@ -878,8 +878,10 @@ static void reportdesc_callback(struct h
34843 if (hid_dev) {
34844 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
34845
34846- hid_dev->ll_driver->open = mousevsc_hid_open;
34847- hid_dev->ll_driver->close = mousevsc_hid_close;
34848+ pax_open_kernel();
34849+ *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
34850+ *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
34851+ pax_close_kernel();
34852
34853 hid_dev->bus = BUS_VIRTUAL;
34854 hid_dev->vendor = input_device_ctx->device_info.vendor;
34855diff -urNp linux-3.1.1/drivers/staging/hv/hyperv_vmbus.h linux-3.1.1/drivers/staging/hv/hyperv_vmbus.h
34856--- linux-3.1.1/drivers/staging/hv/hyperv_vmbus.h 2011-11-11 15:19:27.000000000 -0500
34857+++ linux-3.1.1/drivers/staging/hv/hyperv_vmbus.h 2011-11-16 18:39:07.000000000 -0500
34858@@ -559,7 +559,7 @@ enum vmbus_connect_state {
34859 struct vmbus_connection {
34860 enum vmbus_connect_state conn_state;
34861
34862- atomic_t next_gpadl_handle;
34863+ atomic_unchecked_t next_gpadl_handle;
34864
34865 /*
34866 * Represents channel interrupts. Each bit position represents a
34867diff -urNp linux-3.1.1/drivers/staging/hv/rndis_filter.c linux-3.1.1/drivers/staging/hv/rndis_filter.c
34868--- linux-3.1.1/drivers/staging/hv/rndis_filter.c 2011-11-11 15:19:27.000000000 -0500
34869+++ linux-3.1.1/drivers/staging/hv/rndis_filter.c 2011-11-16 18:39:07.000000000 -0500
34870@@ -43,7 +43,7 @@ struct rndis_device {
34871
34872 enum rndis_device_state state;
34873 u32 link_stat;
34874- atomic_t new_req_id;
34875+ atomic_unchecked_t new_req_id;
34876
34877 spinlock_t request_lock;
34878 struct list_head req_list;
34879@@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_r
34880 * template
34881 */
34882 set = &rndis_msg->msg.set_req;
34883- set->req_id = atomic_inc_return(&dev->new_req_id);
34884+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34885
34886 /* Add to the request list */
34887 spin_lock_irqsave(&dev->request_lock, flags);
34888@@ -622,7 +622,7 @@ static void rndis_filter_halt_device(str
34889
34890 /* Setup the rndis set */
34891 halt = &request->request_msg.msg.halt_req;
34892- halt->req_id = atomic_inc_return(&dev->new_req_id);
34893+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34894
34895 /* Ignore return since this msg is optional. */
34896 rndis_filter_send_request(dev, request);
34897diff -urNp linux-3.1.1/drivers/staging/hv/vmbus_drv.c linux-3.1.1/drivers/staging/hv/vmbus_drv.c
34898--- linux-3.1.1/drivers/staging/hv/vmbus_drv.c 2011-11-11 15:19:27.000000000 -0500
34899+++ linux-3.1.1/drivers/staging/hv/vmbus_drv.c 2011-11-16 18:39:07.000000000 -0500
34900@@ -660,11 +660,11 @@ int vmbus_child_device_register(struct h
34901 {
34902 int ret = 0;
34903
34904- static atomic_t device_num = ATOMIC_INIT(0);
34905+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
34906
34907 /* Set the device name. Otherwise, device_register() will fail. */
34908 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
34909- atomic_inc_return(&device_num));
34910+ atomic_inc_return_unchecked(&device_num));
34911
34912 /* The new device belongs to this bus */
34913 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
34914diff -urNp linux-3.1.1/drivers/staging/iio/ring_generic.h linux-3.1.1/drivers/staging/iio/ring_generic.h
34915--- linux-3.1.1/drivers/staging/iio/ring_generic.h 2011-11-11 15:19:27.000000000 -0500
34916+++ linux-3.1.1/drivers/staging/iio/ring_generic.h 2011-11-16 18:39:07.000000000 -0500
34917@@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
34918
34919 int (*is_enabled)(struct iio_ring_buffer *ring);
34920 int (*enable)(struct iio_ring_buffer *ring);
34921-};
34922+} __no_const;
34923
34924 struct iio_ring_setup_ops {
34925 int (*preenable)(struct iio_dev *);
34926diff -urNp linux-3.1.1/drivers/staging/mei/interface.c linux-3.1.1/drivers/staging/mei/interface.c
34927--- linux-3.1.1/drivers/staging/mei/interface.c 2011-11-11 15:19:27.000000000 -0500
34928+++ linux-3.1.1/drivers/staging/mei/interface.c 2011-11-17 18:39:18.000000000 -0500
34929@@ -332,7 +332,7 @@ int mei_send_flow_control(struct mei_dev
34930 mei_hdr->reserved = 0;
34931
34932 mei_flow_control = (struct hbm_flow_control *) &dev->wr_msg_buf[1];
34933- memset(mei_flow_control, 0, sizeof(mei_flow_control));
34934+ memset(mei_flow_control, 0, sizeof(*mei_flow_control));
34935 mei_flow_control->host_addr = cl->host_client_id;
34936 mei_flow_control->me_addr = cl->me_client_id;
34937 mei_flow_control->cmd.cmd = MEI_FLOW_CONTROL_CMD;
34938@@ -396,7 +396,7 @@ int mei_disconnect(struct mei_device *de
34939
34940 mei_cli_disconnect =
34941 (struct hbm_client_disconnect_request *) &dev->wr_msg_buf[1];
34942- memset(mei_cli_disconnect, 0, sizeof(mei_cli_disconnect));
34943+ memset(mei_cli_disconnect, 0, sizeof(*mei_cli_disconnect));
34944 mei_cli_disconnect->host_addr = cl->host_client_id;
34945 mei_cli_disconnect->me_addr = cl->me_client_id;
34946 mei_cli_disconnect->cmd.cmd = CLIENT_DISCONNECT_REQ_CMD;
34947diff -urNp linux-3.1.1/drivers/staging/octeon/ethernet.c linux-3.1.1/drivers/staging/octeon/ethernet.c
34948--- linux-3.1.1/drivers/staging/octeon/ethernet.c 2011-11-11 15:19:27.000000000 -0500
34949+++ linux-3.1.1/drivers/staging/octeon/ethernet.c 2011-11-16 18:39:07.000000000 -0500
34950@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
34951 * since the RX tasklet also increments it.
34952 */
34953 #ifdef CONFIG_64BIT
34954- atomic64_add(rx_status.dropped_packets,
34955- (atomic64_t *)&priv->stats.rx_dropped);
34956+ atomic64_add_unchecked(rx_status.dropped_packets,
34957+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
34958 #else
34959- atomic_add(rx_status.dropped_packets,
34960- (atomic_t *)&priv->stats.rx_dropped);
34961+ atomic_add_unchecked(rx_status.dropped_packets,
34962+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
34963 #endif
34964 }
34965
34966diff -urNp linux-3.1.1/drivers/staging/octeon/ethernet-rx.c linux-3.1.1/drivers/staging/octeon/ethernet-rx.c
34967--- linux-3.1.1/drivers/staging/octeon/ethernet-rx.c 2011-11-11 15:19:27.000000000 -0500
34968+++ linux-3.1.1/drivers/staging/octeon/ethernet-rx.c 2011-11-16 18:39:07.000000000 -0500
34969@@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi
34970 /* Increment RX stats for virtual ports */
34971 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
34972 #ifdef CONFIG_64BIT
34973- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
34974- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
34975+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
34976+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
34977 #else
34978- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
34979- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
34980+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
34981+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
34982 #endif
34983 }
34984 netif_receive_skb(skb);
34985@@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi
34986 dev->name);
34987 */
34988 #ifdef CONFIG_64BIT
34989- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
34990+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
34991 #else
34992- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
34993+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
34994 #endif
34995 dev_kfree_skb_irq(skb);
34996 }
34997diff -urNp linux-3.1.1/drivers/staging/pohmelfs/inode.c linux-3.1.1/drivers/staging/pohmelfs/inode.c
34998--- linux-3.1.1/drivers/staging/pohmelfs/inode.c 2011-11-11 15:19:27.000000000 -0500
34999+++ linux-3.1.1/drivers/staging/pohmelfs/inode.c 2011-11-16 18:39:07.000000000 -0500
35000@@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct su
35001 mutex_init(&psb->mcache_lock);
35002 psb->mcache_root = RB_ROOT;
35003 psb->mcache_timeout = msecs_to_jiffies(5000);
35004- atomic_long_set(&psb->mcache_gen, 0);
35005+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
35006
35007 psb->trans_max_pages = 100;
35008
35009@@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct su
35010 INIT_LIST_HEAD(&psb->crypto_ready_list);
35011 INIT_LIST_HEAD(&psb->crypto_active_list);
35012
35013- atomic_set(&psb->trans_gen, 1);
35014+ atomic_set_unchecked(&psb->trans_gen, 1);
35015 atomic_long_set(&psb->total_inodes, 0);
35016
35017 mutex_init(&psb->state_lock);
35018diff -urNp linux-3.1.1/drivers/staging/pohmelfs/mcache.c linux-3.1.1/drivers/staging/pohmelfs/mcache.c
35019--- linux-3.1.1/drivers/staging/pohmelfs/mcache.c 2011-11-11 15:19:27.000000000 -0500
35020+++ linux-3.1.1/drivers/staging/pohmelfs/mcache.c 2011-11-16 18:39:07.000000000 -0500
35021@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
35022 m->data = data;
35023 m->start = start;
35024 m->size = size;
35025- m->gen = atomic_long_inc_return(&psb->mcache_gen);
35026+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
35027
35028 mutex_lock(&psb->mcache_lock);
35029 err = pohmelfs_mcache_insert(psb, m);
35030diff -urNp linux-3.1.1/drivers/staging/pohmelfs/netfs.h linux-3.1.1/drivers/staging/pohmelfs/netfs.h
35031--- linux-3.1.1/drivers/staging/pohmelfs/netfs.h 2011-11-11 15:19:27.000000000 -0500
35032+++ linux-3.1.1/drivers/staging/pohmelfs/netfs.h 2011-11-16 18:39:07.000000000 -0500
35033@@ -571,14 +571,14 @@ struct pohmelfs_config;
35034 struct pohmelfs_sb {
35035 struct rb_root mcache_root;
35036 struct mutex mcache_lock;
35037- atomic_long_t mcache_gen;
35038+ atomic_long_unchecked_t mcache_gen;
35039 unsigned long mcache_timeout;
35040
35041 unsigned int idx;
35042
35043 unsigned int trans_retries;
35044
35045- atomic_t trans_gen;
35046+ atomic_unchecked_t trans_gen;
35047
35048 unsigned int crypto_attached_size;
35049 unsigned int crypto_align_size;
35050diff -urNp linux-3.1.1/drivers/staging/pohmelfs/trans.c linux-3.1.1/drivers/staging/pohmelfs/trans.c
35051--- linux-3.1.1/drivers/staging/pohmelfs/trans.c 2011-11-11 15:19:27.000000000 -0500
35052+++ linux-3.1.1/drivers/staging/pohmelfs/trans.c 2011-11-16 18:39:07.000000000 -0500
35053@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
35054 int err;
35055 struct netfs_cmd *cmd = t->iovec.iov_base;
35056
35057- t->gen = atomic_inc_return(&psb->trans_gen);
35058+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
35059
35060 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
35061 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
35062diff -urNp linux-3.1.1/drivers/staging/rtl8712/rtl871x_io.h linux-3.1.1/drivers/staging/rtl8712/rtl871x_io.h
35063--- linux-3.1.1/drivers/staging/rtl8712/rtl871x_io.h 2011-11-11 15:19:27.000000000 -0500
35064+++ linux-3.1.1/drivers/staging/rtl8712/rtl871x_io.h 2011-11-16 18:39:07.000000000 -0500
35065@@ -83,7 +83,7 @@ struct _io_ops {
35066 u8 *pmem);
35067 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
35068 u8 *pmem);
35069-};
35070+} __no_const;
35071
35072 struct io_req {
35073 struct list_head list;
35074diff -urNp linux-3.1.1/drivers/staging/sbe-2t3e3/netdev.c linux-3.1.1/drivers/staging/sbe-2t3e3/netdev.c
35075--- linux-3.1.1/drivers/staging/sbe-2t3e3/netdev.c 2011-11-11 15:19:27.000000000 -0500
35076+++ linux-3.1.1/drivers/staging/sbe-2t3e3/netdev.c 2011-11-16 18:39:08.000000000 -0500
35077@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
35078 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
35079
35080 if (rlen)
35081- if (copy_to_user(data, &resp, rlen))
35082+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
35083 return -EFAULT;
35084
35085 return 0;
35086diff -urNp linux-3.1.1/drivers/staging/usbip/usbip_common.h linux-3.1.1/drivers/staging/usbip/usbip_common.h
35087--- linux-3.1.1/drivers/staging/usbip/usbip_common.h 2011-11-11 15:19:27.000000000 -0500
35088+++ linux-3.1.1/drivers/staging/usbip/usbip_common.h 2011-11-16 18:39:08.000000000 -0500
35089@@ -289,7 +289,7 @@ struct usbip_device {
35090 void (*shutdown)(struct usbip_device *);
35091 void (*reset)(struct usbip_device *);
35092 void (*unusable)(struct usbip_device *);
35093- } eh_ops;
35094+ } __no_const eh_ops;
35095 };
35096
35097 #if 0
35098diff -urNp linux-3.1.1/drivers/staging/usbip/vhci.h linux-3.1.1/drivers/staging/usbip/vhci.h
35099--- linux-3.1.1/drivers/staging/usbip/vhci.h 2011-11-11 15:19:27.000000000 -0500
35100+++ linux-3.1.1/drivers/staging/usbip/vhci.h 2011-11-16 18:39:08.000000000 -0500
35101@@ -85,7 +85,7 @@ struct vhci_hcd {
35102 unsigned resuming:1;
35103 unsigned long re_timeout;
35104
35105- atomic_t seqnum;
35106+ atomic_unchecked_t seqnum;
35107
35108 /*
35109 * NOTE:
35110diff -urNp linux-3.1.1/drivers/staging/usbip/vhci_hcd.c linux-3.1.1/drivers/staging/usbip/vhci_hcd.c
35111--- linux-3.1.1/drivers/staging/usbip/vhci_hcd.c 2011-11-11 15:19:27.000000000 -0500
35112+++ linux-3.1.1/drivers/staging/usbip/vhci_hcd.c 2011-11-16 18:39:08.000000000 -0500
35113@@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
35114 return;
35115 }
35116
35117- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
35118+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35119 if (priv->seqnum == 0xffff)
35120 dev_info(&urb->dev->dev, "seqnum max\n");
35121
35122@@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_h
35123 return -ENOMEM;
35124 }
35125
35126- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
35127+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35128 if (unlink->seqnum == 0xffff)
35129 pr_info("seqnum max\n");
35130
35131@@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hc
35132 vdev->rhport = rhport;
35133 }
35134
35135- atomic_set(&vhci->seqnum, 0);
35136+ atomic_set_unchecked(&vhci->seqnum, 0);
35137 spin_lock_init(&vhci->lock);
35138
35139 hcd->power_budget = 0; /* no limit */
35140diff -urNp linux-3.1.1/drivers/staging/usbip/vhci_rx.c linux-3.1.1/drivers/staging/usbip/vhci_rx.c
35141--- linux-3.1.1/drivers/staging/usbip/vhci_rx.c 2011-11-11 15:19:27.000000000 -0500
35142+++ linux-3.1.1/drivers/staging/usbip/vhci_rx.c 2011-11-16 18:39:08.000000000 -0500
35143@@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct
35144 if (!urb) {
35145 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
35146 pr_info("max seqnum %d\n",
35147- atomic_read(&the_controller->seqnum));
35148+ atomic_read_unchecked(&the_controller->seqnum));
35149 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
35150 return;
35151 }
35152diff -urNp linux-3.1.1/drivers/staging/vt6655/hostap.c linux-3.1.1/drivers/staging/vt6655/hostap.c
35153--- linux-3.1.1/drivers/staging/vt6655/hostap.c 2011-11-11 15:19:27.000000000 -0500
35154+++ linux-3.1.1/drivers/staging/vt6655/hostap.c 2011-11-16 18:39:08.000000000 -0500
35155@@ -79,14 +79,13 @@ static int msglevel
35156 *
35157 */
35158
35159+static net_device_ops_no_const apdev_netdev_ops;
35160+
35161 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35162 {
35163 PSDevice apdev_priv;
35164 struct net_device *dev = pDevice->dev;
35165 int ret;
35166- const struct net_device_ops apdev_netdev_ops = {
35167- .ndo_start_xmit = pDevice->tx_80211,
35168- };
35169
35170 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35171
35172@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevic
35173 *apdev_priv = *pDevice;
35174 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35175
35176+ /* only half broken now */
35177+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35178 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35179
35180 pDevice->apdev->type = ARPHRD_IEEE80211;
35181diff -urNp linux-3.1.1/drivers/staging/vt6656/hostap.c linux-3.1.1/drivers/staging/vt6656/hostap.c
35182--- linux-3.1.1/drivers/staging/vt6656/hostap.c 2011-11-11 15:19:27.000000000 -0500
35183+++ linux-3.1.1/drivers/staging/vt6656/hostap.c 2011-11-16 18:39:08.000000000 -0500
35184@@ -80,14 +80,13 @@ static int msglevel
35185 *
35186 */
35187
35188+static net_device_ops_no_const apdev_netdev_ops;
35189+
35190 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35191 {
35192 PSDevice apdev_priv;
35193 struct net_device *dev = pDevice->dev;
35194 int ret;
35195- const struct net_device_ops apdev_netdev_ops = {
35196- .ndo_start_xmit = pDevice->tx_80211,
35197- };
35198
35199 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35200
35201@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevic
35202 *apdev_priv = *pDevice;
35203 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35204
35205+ /* only half broken now */
35206+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35207 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35208
35209 pDevice->apdev->type = ARPHRD_IEEE80211;
35210diff -urNp linux-3.1.1/drivers/staging/wlan-ng/hfa384x_usb.c linux-3.1.1/drivers/staging/wlan-ng/hfa384x_usb.c
35211--- linux-3.1.1/drivers/staging/wlan-ng/hfa384x_usb.c 2011-11-11 15:19:27.000000000 -0500
35212+++ linux-3.1.1/drivers/staging/wlan-ng/hfa384x_usb.c 2011-11-16 18:39:08.000000000 -0500
35213@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
35214
35215 struct usbctlx_completor {
35216 int (*complete) (struct usbctlx_completor *);
35217-};
35218+} __no_const;
35219
35220 static int
35221 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
35222diff -urNp linux-3.1.1/drivers/staging/zcache/tmem.c linux-3.1.1/drivers/staging/zcache/tmem.c
35223--- linux-3.1.1/drivers/staging/zcache/tmem.c 2011-11-11 15:19:27.000000000 -0500
35224+++ linux-3.1.1/drivers/staging/zcache/tmem.c 2011-11-16 18:39:08.000000000 -0500
35225@@ -39,7 +39,7 @@
35226 * A tmem host implementation must use this function to register callbacks
35227 * for memory allocation.
35228 */
35229-static struct tmem_hostops tmem_hostops;
35230+static tmem_hostops_no_const tmem_hostops;
35231
35232 static void tmem_objnode_tree_init(void);
35233
35234@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
35235 * A tmem host implementation must use this function to register
35236 * callbacks for a page-accessible memory (PAM) implementation
35237 */
35238-static struct tmem_pamops tmem_pamops;
35239+static tmem_pamops_no_const tmem_pamops;
35240
35241 void tmem_register_pamops(struct tmem_pamops *m)
35242 {
35243diff -urNp linux-3.1.1/drivers/staging/zcache/tmem.h linux-3.1.1/drivers/staging/zcache/tmem.h
35244--- linux-3.1.1/drivers/staging/zcache/tmem.h 2011-11-11 15:19:27.000000000 -0500
35245+++ linux-3.1.1/drivers/staging/zcache/tmem.h 2011-11-16 18:39:08.000000000 -0500
35246@@ -180,6 +180,7 @@ struct tmem_pamops {
35247 void (*new_obj)(struct tmem_obj *);
35248 int (*replace_in_obj)(void *, struct tmem_obj *);
35249 };
35250+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
35251 extern void tmem_register_pamops(struct tmem_pamops *m);
35252
35253 /* memory allocation methods provided by the host implementation */
35254@@ -189,6 +190,7 @@ struct tmem_hostops {
35255 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
35256 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
35257 };
35258+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
35259 extern void tmem_register_hostops(struct tmem_hostops *m);
35260
35261 /* core tmem accessor functions */
35262diff -urNp linux-3.1.1/drivers/target/iscsi/iscsi_target.c linux-3.1.1/drivers/target/iscsi/iscsi_target.c
35263--- linux-3.1.1/drivers/target/iscsi/iscsi_target.c 2011-11-11 15:19:27.000000000 -0500
35264+++ linux-3.1.1/drivers/target/iscsi/iscsi_target.c 2011-11-16 18:39:08.000000000 -0500
35265@@ -1368,7 +1368,7 @@ static int iscsit_handle_data_out(struct
35266 * outstanding_r2ts reaches zero, go ahead and send the delayed
35267 * TASK_ABORTED status.
35268 */
35269- if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
35270+ if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
35271 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
35272 if (--cmd->outstanding_r2ts < 1) {
35273 iscsit_stop_dataout_timer(cmd);
35274diff -urNp linux-3.1.1/drivers/target/target_core_alua.c linux-3.1.1/drivers/target/target_core_alua.c
35275--- linux-3.1.1/drivers/target/target_core_alua.c 2011-11-11 15:19:27.000000000 -0500
35276+++ linux-3.1.1/drivers/target/target_core_alua.c 2011-11-16 18:40:29.000000000 -0500
35277@@ -723,6 +723,8 @@ static int core_alua_update_tpg_primary_
35278 char path[ALUA_METADATA_PATH_LEN];
35279 int len;
35280
35281+ pax_track_stack();
35282+
35283 memset(path, 0, ALUA_METADATA_PATH_LEN);
35284
35285 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
35286@@ -986,6 +988,8 @@ static int core_alua_update_tpg_secondar
35287 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
35288 int len;
35289
35290+ pax_track_stack();
35291+
35292 memset(path, 0, ALUA_METADATA_PATH_LEN);
35293 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
35294
35295diff -urNp linux-3.1.1/drivers/target/target_core_cdb.c linux-3.1.1/drivers/target/target_core_cdb.c
35296--- linux-3.1.1/drivers/target/target_core_cdb.c 2011-11-11 15:19:27.000000000 -0500
35297+++ linux-3.1.1/drivers/target/target_core_cdb.c 2011-11-16 18:40:29.000000000 -0500
35298@@ -933,6 +933,8 @@ target_emulate_modesense(struct se_cmd *
35299 int length = 0;
35300 unsigned char buf[SE_MODE_PAGE_BUF];
35301
35302+ pax_track_stack();
35303+
35304 memset(buf, 0, SE_MODE_PAGE_BUF);
35305
35306 switch (cdb[2] & 0x3f) {
35307diff -urNp linux-3.1.1/drivers/target/target_core_configfs.c linux-3.1.1/drivers/target/target_core_configfs.c
35308--- linux-3.1.1/drivers/target/target_core_configfs.c 2011-11-11 15:19:27.000000000 -0500
35309+++ linux-3.1.1/drivers/target/target_core_configfs.c 2011-11-16 19:04:37.000000000 -0500
35310@@ -1267,6 +1267,8 @@ static ssize_t target_core_dev_pr_show_a
35311 ssize_t len = 0;
35312 int reg_count = 0, prf_isid;
35313
35314+ pax_track_stack();
35315+
35316 if (!su_dev->se_dev_ptr)
35317 return -ENODEV;
35318
35319diff -urNp linux-3.1.1/drivers/target/target_core_pr.c linux-3.1.1/drivers/target/target_core_pr.c
35320--- linux-3.1.1/drivers/target/target_core_pr.c 2011-11-11 15:19:27.000000000 -0500
35321+++ linux-3.1.1/drivers/target/target_core_pr.c 2011-11-16 18:40:29.000000000 -0500
35322@@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
35323 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
35324 u16 tpgt;
35325
35326+ pax_track_stack();
35327+
35328 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
35329 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
35330 /*
35331@@ -1867,6 +1869,8 @@ static int __core_scsi3_update_aptpl_buf
35332 ssize_t len = 0;
35333 int reg_count = 0;
35334
35335+ pax_track_stack();
35336+
35337 memset(buf, 0, pr_aptpl_buf_len);
35338 /*
35339 * Called to clear metadata once APTPL has been deactivated.
35340@@ -1989,6 +1993,8 @@ static int __core_scsi3_write_aptpl_to_f
35341 char path[512];
35342 int ret;
35343
35344+ pax_track_stack();
35345+
35346 memset(iov, 0, sizeof(struct iovec));
35347 memset(path, 0, 512);
35348
35349diff -urNp linux-3.1.1/drivers/target/target_core_tmr.c linux-3.1.1/drivers/target/target_core_tmr.c
35350--- linux-3.1.1/drivers/target/target_core_tmr.c 2011-11-11 15:19:27.000000000 -0500
35351+++ linux-3.1.1/drivers/target/target_core_tmr.c 2011-11-16 18:39:08.000000000 -0500
35352@@ -255,7 +255,7 @@ static void core_tmr_drain_task_list(
35353 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
35354 cmd->t_task_list_num,
35355 atomic_read(&cmd->t_task_cdbs_left),
35356- atomic_read(&cmd->t_task_cdbs_sent),
35357+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35358 atomic_read(&cmd->t_transport_active),
35359 atomic_read(&cmd->t_transport_stop),
35360 atomic_read(&cmd->t_transport_sent));
35361@@ -291,7 +291,7 @@ static void core_tmr_drain_task_list(
35362 pr_debug("LUN_RESET: got t_transport_active = 1 for"
35363 " task: %p, t_fe_count: %d dev: %p\n", task,
35364 fe_count, dev);
35365- atomic_set(&cmd->t_transport_aborted, 1);
35366+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35367 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35368
35369 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35370@@ -299,7 +299,7 @@ static void core_tmr_drain_task_list(
35371 }
35372 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
35373 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
35374- atomic_set(&cmd->t_transport_aborted, 1);
35375+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35376 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35377
35378 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35379diff -urNp linux-3.1.1/drivers/target/target_core_transport.c linux-3.1.1/drivers/target/target_core_transport.c
35380--- linux-3.1.1/drivers/target/target_core_transport.c 2011-11-11 15:19:27.000000000 -0500
35381+++ linux-3.1.1/drivers/target/target_core_transport.c 2011-11-16 18:39:08.000000000 -0500
35382@@ -1445,7 +1445,7 @@ struct se_device *transport_add_device_t
35383
35384 dev->queue_depth = dev_limits->queue_depth;
35385 atomic_set(&dev->depth_left, dev->queue_depth);
35386- atomic_set(&dev->dev_ordered_id, 0);
35387+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
35388
35389 se_dev_set_default_attribs(dev, dev_limits);
35390
35391@@ -1633,7 +1633,7 @@ static int transport_check_alloc_task_at
35392 * Used to determine when ORDERED commands should go from
35393 * Dormant to Active status.
35394 */
35395- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
35396+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
35397 smp_mb__after_atomic_inc();
35398 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
35399 cmd->se_ordered_id, cmd->sam_task_attr,
35400@@ -1960,7 +1960,7 @@ static void transport_generic_request_fa
35401 " t_transport_active: %d t_transport_stop: %d"
35402 " t_transport_sent: %d\n", cmd->t_task_list_num,
35403 atomic_read(&cmd->t_task_cdbs_left),
35404- atomic_read(&cmd->t_task_cdbs_sent),
35405+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35406 atomic_read(&cmd->t_task_cdbs_ex_left),
35407 atomic_read(&cmd->t_transport_active),
35408 atomic_read(&cmd->t_transport_stop),
35409@@ -2460,9 +2460,9 @@ check_depth:
35410 spin_lock_irqsave(&cmd->t_state_lock, flags);
35411 atomic_set(&task->task_active, 1);
35412 atomic_set(&task->task_sent, 1);
35413- atomic_inc(&cmd->t_task_cdbs_sent);
35414+ atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
35415
35416- if (atomic_read(&cmd->t_task_cdbs_sent) ==
35417+ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
35418 cmd->t_task_list_num)
35419 atomic_set(&cmd->transport_sent, 1);
35420
35421@@ -4665,7 +4665,7 @@ static void transport_generic_wait_for_t
35422 atomic_set(&cmd->transport_lun_stop, 0);
35423 }
35424 if (!atomic_read(&cmd->t_transport_active) ||
35425- atomic_read(&cmd->t_transport_aborted))
35426+ atomic_read_unchecked(&cmd->t_transport_aborted))
35427 goto remove;
35428
35429 atomic_set(&cmd->t_transport_stop, 1);
35430@@ -4900,7 +4900,7 @@ int transport_check_aborted_status(struc
35431 {
35432 int ret = 0;
35433
35434- if (atomic_read(&cmd->t_transport_aborted) != 0) {
35435+ if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
35436 if (!send_status ||
35437 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
35438 return 1;
35439@@ -4937,7 +4937,7 @@ void transport_send_task_abort(struct se
35440 */
35441 if (cmd->data_direction == DMA_TO_DEVICE) {
35442 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
35443- atomic_inc(&cmd->t_transport_aborted);
35444+ atomic_inc_unchecked(&cmd->t_transport_aborted);
35445 smp_mb__after_atomic_inc();
35446 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
35447 transport_new_cmd_failure(cmd);
35448@@ -5051,7 +5051,7 @@ static void transport_processing_shutdow
35449 cmd->se_tfo->get_task_tag(cmd),
35450 cmd->t_task_list_num,
35451 atomic_read(&cmd->t_task_cdbs_left),
35452- atomic_read(&cmd->t_task_cdbs_sent),
35453+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35454 atomic_read(&cmd->t_transport_active),
35455 atomic_read(&cmd->t_transport_stop),
35456 atomic_read(&cmd->t_transport_sent));
35457diff -urNp linux-3.1.1/drivers/telephony/ixj.c linux-3.1.1/drivers/telephony/ixj.c
35458--- linux-3.1.1/drivers/telephony/ixj.c 2011-11-11 15:19:27.000000000 -0500
35459+++ linux-3.1.1/drivers/telephony/ixj.c 2011-11-16 18:40:29.000000000 -0500
35460@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
35461 bool mContinue;
35462 char *pIn, *pOut;
35463
35464+ pax_track_stack();
35465+
35466 if (!SCI_Prepare(j))
35467 return 0;
35468
35469diff -urNp linux-3.1.1/drivers/tty/hvc/hvcs.c linux-3.1.1/drivers/tty/hvc/hvcs.c
35470--- linux-3.1.1/drivers/tty/hvc/hvcs.c 2011-11-11 15:19:27.000000000 -0500
35471+++ linux-3.1.1/drivers/tty/hvc/hvcs.c 2011-11-16 18:39:08.000000000 -0500
35472@@ -83,6 +83,7 @@
35473 #include <asm/hvcserver.h>
35474 #include <asm/uaccess.h>
35475 #include <asm/vio.h>
35476+#include <asm/local.h>
35477
35478 /*
35479 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
35480@@ -270,7 +271,7 @@ struct hvcs_struct {
35481 unsigned int index;
35482
35483 struct tty_struct *tty;
35484- int open_count;
35485+ local_t open_count;
35486
35487 /*
35488 * Used to tell the driver kernel_thread what operations need to take
35489@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
35490
35491 spin_lock_irqsave(&hvcsd->lock, flags);
35492
35493- if (hvcsd->open_count > 0) {
35494+ if (local_read(&hvcsd->open_count) > 0) {
35495 spin_unlock_irqrestore(&hvcsd->lock, flags);
35496 printk(KERN_INFO "HVCS: vterm state unchanged. "
35497 "The hvcs device node is still in use.\n");
35498@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
35499 if ((retval = hvcs_partner_connect(hvcsd)))
35500 goto error_release;
35501
35502- hvcsd->open_count = 1;
35503+ local_set(&hvcsd->open_count, 1);
35504 hvcsd->tty = tty;
35505 tty->driver_data = hvcsd;
35506
35507@@ -1179,7 +1180,7 @@ fast_open:
35508
35509 spin_lock_irqsave(&hvcsd->lock, flags);
35510 kref_get(&hvcsd->kref);
35511- hvcsd->open_count++;
35512+ local_inc(&hvcsd->open_count);
35513 hvcsd->todo_mask |= HVCS_SCHED_READ;
35514 spin_unlock_irqrestore(&hvcsd->lock, flags);
35515
35516@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
35517 hvcsd = tty->driver_data;
35518
35519 spin_lock_irqsave(&hvcsd->lock, flags);
35520- if (--hvcsd->open_count == 0) {
35521+ if (local_dec_and_test(&hvcsd->open_count)) {
35522
35523 vio_disable_interrupts(hvcsd->vdev);
35524
35525@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
35526 free_irq(irq, hvcsd);
35527 kref_put(&hvcsd->kref, destroy_hvcs_struct);
35528 return;
35529- } else if (hvcsd->open_count < 0) {
35530+ } else if (local_read(&hvcsd->open_count) < 0) {
35531 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
35532 " is missmanaged.\n",
35533- hvcsd->vdev->unit_address, hvcsd->open_count);
35534+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
35535 }
35536
35537 spin_unlock_irqrestore(&hvcsd->lock, flags);
35538@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
35539
35540 spin_lock_irqsave(&hvcsd->lock, flags);
35541 /* Preserve this so that we know how many kref refs to put */
35542- temp_open_count = hvcsd->open_count;
35543+ temp_open_count = local_read(&hvcsd->open_count);
35544
35545 /*
35546 * Don't kref put inside the spinlock because the destruction
35547@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
35548 hvcsd->tty->driver_data = NULL;
35549 hvcsd->tty = NULL;
35550
35551- hvcsd->open_count = 0;
35552+ local_set(&hvcsd->open_count, 0);
35553
35554 /* This will drop any buffered data on the floor which is OK in a hangup
35555 * scenario. */
35556@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
35557 * the middle of a write operation? This is a crummy place to do this
35558 * but we want to keep it all in the spinlock.
35559 */
35560- if (hvcsd->open_count <= 0) {
35561+ if (local_read(&hvcsd->open_count) <= 0) {
35562 spin_unlock_irqrestore(&hvcsd->lock, flags);
35563 return -ENODEV;
35564 }
35565@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
35566 {
35567 struct hvcs_struct *hvcsd = tty->driver_data;
35568
35569- if (!hvcsd || hvcsd->open_count <= 0)
35570+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
35571 return 0;
35572
35573 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
35574diff -urNp linux-3.1.1/drivers/tty/ipwireless/tty.c linux-3.1.1/drivers/tty/ipwireless/tty.c
35575--- linux-3.1.1/drivers/tty/ipwireless/tty.c 2011-11-11 15:19:27.000000000 -0500
35576+++ linux-3.1.1/drivers/tty/ipwireless/tty.c 2011-11-16 18:39:08.000000000 -0500
35577@@ -29,6 +29,7 @@
35578 #include <linux/tty_driver.h>
35579 #include <linux/tty_flip.h>
35580 #include <linux/uaccess.h>
35581+#include <asm/local.h>
35582
35583 #include "tty.h"
35584 #include "network.h"
35585@@ -51,7 +52,7 @@ struct ipw_tty {
35586 int tty_type;
35587 struct ipw_network *network;
35588 struct tty_struct *linux_tty;
35589- int open_count;
35590+ local_t open_count;
35591 unsigned int control_lines;
35592 struct mutex ipw_tty_mutex;
35593 int tx_bytes_queued;
35594@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
35595 mutex_unlock(&tty->ipw_tty_mutex);
35596 return -ENODEV;
35597 }
35598- if (tty->open_count == 0)
35599+ if (local_read(&tty->open_count) == 0)
35600 tty->tx_bytes_queued = 0;
35601
35602- tty->open_count++;
35603+ local_inc(&tty->open_count);
35604
35605 tty->linux_tty = linux_tty;
35606 linux_tty->driver_data = tty;
35607@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
35608
35609 static void do_ipw_close(struct ipw_tty *tty)
35610 {
35611- tty->open_count--;
35612-
35613- if (tty->open_count == 0) {
35614+ if (local_dec_return(&tty->open_count) == 0) {
35615 struct tty_struct *linux_tty = tty->linux_tty;
35616
35617 if (linux_tty != NULL) {
35618@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
35619 return;
35620
35621 mutex_lock(&tty->ipw_tty_mutex);
35622- if (tty->open_count == 0) {
35623+ if (local_read(&tty->open_count) == 0) {
35624 mutex_unlock(&tty->ipw_tty_mutex);
35625 return;
35626 }
35627@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
35628 return;
35629 }
35630
35631- if (!tty->open_count) {
35632+ if (!local_read(&tty->open_count)) {
35633 mutex_unlock(&tty->ipw_tty_mutex);
35634 return;
35635 }
35636@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
35637 return -ENODEV;
35638
35639 mutex_lock(&tty->ipw_tty_mutex);
35640- if (!tty->open_count) {
35641+ if (!local_read(&tty->open_count)) {
35642 mutex_unlock(&tty->ipw_tty_mutex);
35643 return -EINVAL;
35644 }
35645@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
35646 if (!tty)
35647 return -ENODEV;
35648
35649- if (!tty->open_count)
35650+ if (!local_read(&tty->open_count))
35651 return -EINVAL;
35652
35653 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
35654@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
35655 if (!tty)
35656 return 0;
35657
35658- if (!tty->open_count)
35659+ if (!local_read(&tty->open_count))
35660 return 0;
35661
35662 return tty->tx_bytes_queued;
35663@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
35664 if (!tty)
35665 return -ENODEV;
35666
35667- if (!tty->open_count)
35668+ if (!local_read(&tty->open_count))
35669 return -EINVAL;
35670
35671 return get_control_lines(tty);
35672@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
35673 if (!tty)
35674 return -ENODEV;
35675
35676- if (!tty->open_count)
35677+ if (!local_read(&tty->open_count))
35678 return -EINVAL;
35679
35680 return set_control_lines(tty, set, clear);
35681@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
35682 if (!tty)
35683 return -ENODEV;
35684
35685- if (!tty->open_count)
35686+ if (!local_read(&tty->open_count))
35687 return -EINVAL;
35688
35689 /* FIXME: Exactly how is the tty object locked here .. */
35690@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
35691 against a parallel ioctl etc */
35692 mutex_lock(&ttyj->ipw_tty_mutex);
35693 }
35694- while (ttyj->open_count)
35695+ while (local_read(&ttyj->open_count))
35696 do_ipw_close(ttyj);
35697 ipwireless_disassociate_network_ttys(network,
35698 ttyj->channel_idx);
35699diff -urNp linux-3.1.1/drivers/tty/n_gsm.c linux-3.1.1/drivers/tty/n_gsm.c
35700--- linux-3.1.1/drivers/tty/n_gsm.c 2011-11-11 15:19:27.000000000 -0500
35701+++ linux-3.1.1/drivers/tty/n_gsm.c 2011-11-16 18:39:08.000000000 -0500
35702@@ -1625,7 +1625,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
35703 kref_init(&dlci->ref);
35704 mutex_init(&dlci->mutex);
35705 dlci->fifo = &dlci->_fifo;
35706- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
35707+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
35708 kfree(dlci);
35709 return NULL;
35710 }
35711diff -urNp linux-3.1.1/drivers/tty/n_tty.c linux-3.1.1/drivers/tty/n_tty.c
35712--- linux-3.1.1/drivers/tty/n_tty.c 2011-11-11 15:19:27.000000000 -0500
35713+++ linux-3.1.1/drivers/tty/n_tty.c 2011-11-16 18:39:08.000000000 -0500
35714@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
35715 {
35716 *ops = tty_ldisc_N_TTY;
35717 ops->owner = NULL;
35718- ops->refcount = ops->flags = 0;
35719+ atomic_set(&ops->refcount, 0);
35720+ ops->flags = 0;
35721 }
35722 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
35723diff -urNp linux-3.1.1/drivers/tty/pty.c linux-3.1.1/drivers/tty/pty.c
35724--- linux-3.1.1/drivers/tty/pty.c 2011-11-11 15:19:27.000000000 -0500
35725+++ linux-3.1.1/drivers/tty/pty.c 2011-11-16 18:39:08.000000000 -0500
35726@@ -773,8 +773,10 @@ static void __init unix98_pty_init(void)
35727 register_sysctl_table(pty_root_table);
35728
35729 /* Now create the /dev/ptmx special device */
35730+ pax_open_kernel();
35731 tty_default_fops(&ptmx_fops);
35732- ptmx_fops.open = ptmx_open;
35733+ *(void **)&ptmx_fops.open = ptmx_open;
35734+ pax_close_kernel();
35735
35736 cdev_init(&ptmx_cdev, &ptmx_fops);
35737 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
35738diff -urNp linux-3.1.1/drivers/tty/rocket.c linux-3.1.1/drivers/tty/rocket.c
35739--- linux-3.1.1/drivers/tty/rocket.c 2011-11-11 15:19:27.000000000 -0500
35740+++ linux-3.1.1/drivers/tty/rocket.c 2011-11-16 18:40:29.000000000 -0500
35741@@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
35742 struct rocket_ports tmp;
35743 int board;
35744
35745+ pax_track_stack();
35746+
35747 if (!retports)
35748 return -EFAULT;
35749 memset(&tmp, 0, sizeof (tmp));
35750diff -urNp linux-3.1.1/drivers/tty/serial/kgdboc.c linux-3.1.1/drivers/tty/serial/kgdboc.c
35751--- linux-3.1.1/drivers/tty/serial/kgdboc.c 2011-11-11 15:19:27.000000000 -0500
35752+++ linux-3.1.1/drivers/tty/serial/kgdboc.c 2011-11-16 18:39:08.000000000 -0500
35753@@ -23,8 +23,9 @@
35754 #define MAX_CONFIG_LEN 40
35755
35756 static struct kgdb_io kgdboc_io_ops;
35757+static struct kgdb_io kgdboc_io_ops_console;
35758
35759-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
35760+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
35761 static int configured = -1;
35762
35763 static char config[MAX_CONFIG_LEN];
35764@@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
35765 kgdboc_unregister_kbd();
35766 if (configured == 1)
35767 kgdb_unregister_io_module(&kgdboc_io_ops);
35768+ else if (configured == 2)
35769+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
35770 }
35771
35772 static int configure_kgdboc(void)
35773@@ -156,13 +159,13 @@ static int configure_kgdboc(void)
35774 int err;
35775 char *cptr = config;
35776 struct console *cons;
35777+ int is_console = 0;
35778
35779 err = kgdboc_option_setup(config);
35780 if (err || !strlen(config) || isspace(config[0]))
35781 goto noconfig;
35782
35783 err = -ENODEV;
35784- kgdboc_io_ops.is_console = 0;
35785 kgdb_tty_driver = NULL;
35786
35787 kgdboc_use_kms = 0;
35788@@ -183,7 +186,7 @@ static int configure_kgdboc(void)
35789 int idx;
35790 if (cons->device && cons->device(cons, &idx) == p &&
35791 idx == tty_line) {
35792- kgdboc_io_ops.is_console = 1;
35793+ is_console = 1;
35794 break;
35795 }
35796 cons = cons->next;
35797@@ -193,12 +196,16 @@ static int configure_kgdboc(void)
35798 kgdb_tty_line = tty_line;
35799
35800 do_register:
35801- err = kgdb_register_io_module(&kgdboc_io_ops);
35802+ if (is_console) {
35803+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
35804+ configured = 2;
35805+ } else {
35806+ err = kgdb_register_io_module(&kgdboc_io_ops);
35807+ configured = 1;
35808+ }
35809 if (err)
35810 goto noconfig;
35811
35812- configured = 1;
35813-
35814 return 0;
35815
35816 noconfig:
35817@@ -212,7 +219,7 @@ noconfig:
35818 static int __init init_kgdboc(void)
35819 {
35820 /* Already configured? */
35821- if (configured == 1)
35822+ if (configured >= 1)
35823 return 0;
35824
35825 return configure_kgdboc();
35826@@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
35827 if (config[len - 1] == '\n')
35828 config[len - 1] = '\0';
35829
35830- if (configured == 1)
35831+ if (configured >= 1)
35832 cleanup_kgdboc();
35833
35834 /* Go and configure with the new params. */
35835@@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
35836 .post_exception = kgdboc_post_exp_handler,
35837 };
35838
35839+static struct kgdb_io kgdboc_io_ops_console = {
35840+ .name = "kgdboc",
35841+ .read_char = kgdboc_get_char,
35842+ .write_char = kgdboc_put_char,
35843+ .pre_exception = kgdboc_pre_exp_handler,
35844+ .post_exception = kgdboc_post_exp_handler,
35845+ .is_console = 1
35846+};
35847+
35848 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
35849 /* This is only available if kgdboc is a built in for early debugging */
35850 static int __init kgdboc_early_init(char *opt)
35851diff -urNp linux-3.1.1/drivers/tty/serial/mfd.c linux-3.1.1/drivers/tty/serial/mfd.c
35852--- linux-3.1.1/drivers/tty/serial/mfd.c 2011-11-11 15:19:27.000000000 -0500
35853+++ linux-3.1.1/drivers/tty/serial/mfd.c 2011-11-16 18:39:08.000000000 -0500
35854@@ -1423,7 +1423,7 @@ static void serial_hsu_remove(struct pci
35855 }
35856
35857 /* First 3 are UART ports, and the 4th is the DMA */
35858-static const struct pci_device_id pci_ids[] __devinitdata = {
35859+static const struct pci_device_id pci_ids[] __devinitconst = {
35860 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) },
35861 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) },
35862 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) },
35863diff -urNp linux-3.1.1/drivers/tty/serial/mrst_max3110.c linux-3.1.1/drivers/tty/serial/mrst_max3110.c
35864--- linux-3.1.1/drivers/tty/serial/mrst_max3110.c 2011-11-11 15:19:27.000000000 -0500
35865+++ linux-3.1.1/drivers/tty/serial/mrst_max3110.c 2011-11-16 18:40:29.000000000 -0500
35866@@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
35867 int loop = 1, num, total = 0;
35868 u8 recv_buf[512], *pbuf;
35869
35870+ pax_track_stack();
35871+
35872 pbuf = recv_buf;
35873 do {
35874 num = max3110_read_multi(max, pbuf);
35875diff -urNp linux-3.1.1/drivers/tty/tty_io.c linux-3.1.1/drivers/tty/tty_io.c
35876--- linux-3.1.1/drivers/tty/tty_io.c 2011-11-11 15:19:27.000000000 -0500
35877+++ linux-3.1.1/drivers/tty/tty_io.c 2011-11-16 18:39:08.000000000 -0500
35878@@ -3238,7 +3238,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
35879
35880 void tty_default_fops(struct file_operations *fops)
35881 {
35882- *fops = tty_fops;
35883+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
35884 }
35885
35886 /*
35887diff -urNp linux-3.1.1/drivers/tty/tty_ldisc.c linux-3.1.1/drivers/tty/tty_ldisc.c
35888--- linux-3.1.1/drivers/tty/tty_ldisc.c 2011-11-11 15:19:27.000000000 -0500
35889+++ linux-3.1.1/drivers/tty/tty_ldisc.c 2011-11-16 18:39:08.000000000 -0500
35890@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
35891 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
35892 struct tty_ldisc_ops *ldo = ld->ops;
35893
35894- ldo->refcount--;
35895+ atomic_dec(&ldo->refcount);
35896 module_put(ldo->owner);
35897 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35898
35899@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
35900 spin_lock_irqsave(&tty_ldisc_lock, flags);
35901 tty_ldiscs[disc] = new_ldisc;
35902 new_ldisc->num = disc;
35903- new_ldisc->refcount = 0;
35904+ atomic_set(&new_ldisc->refcount, 0);
35905 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35906
35907 return ret;
35908@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
35909 return -EINVAL;
35910
35911 spin_lock_irqsave(&tty_ldisc_lock, flags);
35912- if (tty_ldiscs[disc]->refcount)
35913+ if (atomic_read(&tty_ldiscs[disc]->refcount))
35914 ret = -EBUSY;
35915 else
35916 tty_ldiscs[disc] = NULL;
35917@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
35918 if (ldops) {
35919 ret = ERR_PTR(-EAGAIN);
35920 if (try_module_get(ldops->owner)) {
35921- ldops->refcount++;
35922+ atomic_inc(&ldops->refcount);
35923 ret = ldops;
35924 }
35925 }
35926@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
35927 unsigned long flags;
35928
35929 spin_lock_irqsave(&tty_ldisc_lock, flags);
35930- ldops->refcount--;
35931+ atomic_dec(&ldops->refcount);
35932 module_put(ldops->owner);
35933 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35934 }
35935diff -urNp linux-3.1.1/drivers/tty/vt/keyboard.c linux-3.1.1/drivers/tty/vt/keyboard.c
35936--- linux-3.1.1/drivers/tty/vt/keyboard.c 2011-11-11 15:19:27.000000000 -0500
35937+++ linux-3.1.1/drivers/tty/vt/keyboard.c 2011-11-16 18:40:29.000000000 -0500
35938@@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, u
35939 kbd->kbdmode == VC_OFF) &&
35940 value != KVAL(K_SAK))
35941 return; /* SAK is allowed even in raw mode */
35942+
35943+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
35944+ {
35945+ void *func = fn_handler[value];
35946+ if (func == fn_show_state || func == fn_show_ptregs ||
35947+ func == fn_show_mem)
35948+ return;
35949+ }
35950+#endif
35951+
35952 fn_handler[value](vc);
35953 }
35954
35955diff -urNp linux-3.1.1/drivers/tty/vt/vt.c linux-3.1.1/drivers/tty/vt/vt.c
35956--- linux-3.1.1/drivers/tty/vt/vt.c 2011-11-11 15:19:27.000000000 -0500
35957+++ linux-3.1.1/drivers/tty/vt/vt.c 2011-11-16 18:39:08.000000000 -0500
35958@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
35959
35960 static void notify_write(struct vc_data *vc, unsigned int unicode)
35961 {
35962- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
35963+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
35964 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
35965 }
35966
35967diff -urNp linux-3.1.1/drivers/tty/vt/vt_ioctl.c linux-3.1.1/drivers/tty/vt/vt_ioctl.c
35968--- linux-3.1.1/drivers/tty/vt/vt_ioctl.c 2011-11-11 15:19:27.000000000 -0500
35969+++ linux-3.1.1/drivers/tty/vt/vt_ioctl.c 2011-11-16 18:40:29.000000000 -0500
35970@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
35971 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
35972 return -EFAULT;
35973
35974- if (!capable(CAP_SYS_TTY_CONFIG))
35975- perm = 0;
35976-
35977 switch (cmd) {
35978 case KDGKBENT:
35979 key_map = key_maps[s];
35980@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
35981 val = (i ? K_HOLE : K_NOSUCHMAP);
35982 return put_user(val, &user_kbe->kb_value);
35983 case KDSKBENT:
35984+ if (!capable(CAP_SYS_TTY_CONFIG))
35985+ perm = 0;
35986+
35987 if (!perm)
35988 return -EPERM;
35989 if (!i && v == K_NOSUCHMAP) {
35990@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
35991 int i, j, k;
35992 int ret;
35993
35994- if (!capable(CAP_SYS_TTY_CONFIG))
35995- perm = 0;
35996-
35997 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
35998 if (!kbs) {
35999 ret = -ENOMEM;
36000@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
36001 kfree(kbs);
36002 return ((p && *p) ? -EOVERFLOW : 0);
36003 case KDSKBSENT:
36004+ if (!capable(CAP_SYS_TTY_CONFIG))
36005+ perm = 0;
36006+
36007 if (!perm) {
36008 ret = -EPERM;
36009 goto reterr;
36010diff -urNp linux-3.1.1/drivers/uio/uio.c linux-3.1.1/drivers/uio/uio.c
36011--- linux-3.1.1/drivers/uio/uio.c 2011-11-11 15:19:27.000000000 -0500
36012+++ linux-3.1.1/drivers/uio/uio.c 2011-11-16 18:39:08.000000000 -0500
36013@@ -25,6 +25,7 @@
36014 #include <linux/kobject.h>
36015 #include <linux/cdev.h>
36016 #include <linux/uio_driver.h>
36017+#include <asm/local.h>
36018
36019 #define UIO_MAX_DEVICES (1U << MINORBITS)
36020
36021@@ -32,10 +33,10 @@ struct uio_device {
36022 struct module *owner;
36023 struct device *dev;
36024 int minor;
36025- atomic_t event;
36026+ atomic_unchecked_t event;
36027 struct fasync_struct *async_queue;
36028 wait_queue_head_t wait;
36029- int vma_count;
36030+ local_t vma_count;
36031 struct uio_info *info;
36032 struct kobject *map_dir;
36033 struct kobject *portio_dir;
36034@@ -242,7 +243,7 @@ static ssize_t show_event(struct device
36035 struct device_attribute *attr, char *buf)
36036 {
36037 struct uio_device *idev = dev_get_drvdata(dev);
36038- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
36039+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
36040 }
36041
36042 static struct device_attribute uio_class_attributes[] = {
36043@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *i
36044 {
36045 struct uio_device *idev = info->uio_dev;
36046
36047- atomic_inc(&idev->event);
36048+ atomic_inc_unchecked(&idev->event);
36049 wake_up_interruptible(&idev->wait);
36050 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
36051 }
36052@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode,
36053 }
36054
36055 listener->dev = idev;
36056- listener->event_count = atomic_read(&idev->event);
36057+ listener->event_count = atomic_read_unchecked(&idev->event);
36058 filep->private_data = listener;
36059
36060 if (idev->info->open) {
36061@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file
36062 return -EIO;
36063
36064 poll_wait(filep, &idev->wait, wait);
36065- if (listener->event_count != atomic_read(&idev->event))
36066+ if (listener->event_count != atomic_read_unchecked(&idev->event))
36067 return POLLIN | POLLRDNORM;
36068 return 0;
36069 }
36070@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *fil
36071 do {
36072 set_current_state(TASK_INTERRUPTIBLE);
36073
36074- event_count = atomic_read(&idev->event);
36075+ event_count = atomic_read_unchecked(&idev->event);
36076 if (event_count != listener->event_count) {
36077 if (copy_to_user(buf, &event_count, count))
36078 retval = -EFAULT;
36079@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_
36080 static void uio_vma_open(struct vm_area_struct *vma)
36081 {
36082 struct uio_device *idev = vma->vm_private_data;
36083- idev->vma_count++;
36084+ local_inc(&idev->vma_count);
36085 }
36086
36087 static void uio_vma_close(struct vm_area_struct *vma)
36088 {
36089 struct uio_device *idev = vma->vm_private_data;
36090- idev->vma_count--;
36091+ local_dec(&idev->vma_count);
36092 }
36093
36094 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36095@@ -823,7 +824,7 @@ int __uio_register_device(struct module
36096 idev->owner = owner;
36097 idev->info = info;
36098 init_waitqueue_head(&idev->wait);
36099- atomic_set(&idev->event, 0);
36100+ atomic_set_unchecked(&idev->event, 0);
36101
36102 ret = uio_get_minor(idev);
36103 if (ret)
36104diff -urNp linux-3.1.1/drivers/usb/atm/cxacru.c linux-3.1.1/drivers/usb/atm/cxacru.c
36105--- linux-3.1.1/drivers/usb/atm/cxacru.c 2011-11-11 15:19:27.000000000 -0500
36106+++ linux-3.1.1/drivers/usb/atm/cxacru.c 2011-11-16 18:39:08.000000000 -0500
36107@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
36108 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
36109 if (ret < 2)
36110 return -EINVAL;
36111- if (index < 0 || index > 0x7f)
36112+ if (index > 0x7f)
36113 return -EINVAL;
36114 pos += tmp;
36115
36116diff -urNp linux-3.1.1/drivers/usb/atm/usbatm.c linux-3.1.1/drivers/usb/atm/usbatm.c
36117--- linux-3.1.1/drivers/usb/atm/usbatm.c 2011-11-11 15:19:27.000000000 -0500
36118+++ linux-3.1.1/drivers/usb/atm/usbatm.c 2011-11-16 18:39:08.000000000 -0500
36119@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
36120 if (printk_ratelimit())
36121 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
36122 __func__, vpi, vci);
36123- atomic_inc(&vcc->stats->rx_err);
36124+ atomic_inc_unchecked(&vcc->stats->rx_err);
36125 return;
36126 }
36127
36128@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
36129 if (length > ATM_MAX_AAL5_PDU) {
36130 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
36131 __func__, length, vcc);
36132- atomic_inc(&vcc->stats->rx_err);
36133+ atomic_inc_unchecked(&vcc->stats->rx_err);
36134 goto out;
36135 }
36136
36137@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
36138 if (sarb->len < pdu_length) {
36139 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
36140 __func__, pdu_length, sarb->len, vcc);
36141- atomic_inc(&vcc->stats->rx_err);
36142+ atomic_inc_unchecked(&vcc->stats->rx_err);
36143 goto out;
36144 }
36145
36146 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
36147 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
36148 __func__, vcc);
36149- atomic_inc(&vcc->stats->rx_err);
36150+ atomic_inc_unchecked(&vcc->stats->rx_err);
36151 goto out;
36152 }
36153
36154@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
36155 if (printk_ratelimit())
36156 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
36157 __func__, length);
36158- atomic_inc(&vcc->stats->rx_drop);
36159+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36160 goto out;
36161 }
36162
36163@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
36164
36165 vcc->push(vcc, skb);
36166
36167- atomic_inc(&vcc->stats->rx);
36168+ atomic_inc_unchecked(&vcc->stats->rx);
36169 out:
36170 skb_trim(sarb, 0);
36171 }
36172@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned l
36173 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
36174
36175 usbatm_pop(vcc, skb);
36176- atomic_inc(&vcc->stats->tx);
36177+ atomic_inc_unchecked(&vcc->stats->tx);
36178
36179 skb = skb_dequeue(&instance->sndqueue);
36180 }
36181@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
36182 if (!left--)
36183 return sprintf(page,
36184 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
36185- atomic_read(&atm_dev->stats.aal5.tx),
36186- atomic_read(&atm_dev->stats.aal5.tx_err),
36187- atomic_read(&atm_dev->stats.aal5.rx),
36188- atomic_read(&atm_dev->stats.aal5.rx_err),
36189- atomic_read(&atm_dev->stats.aal5.rx_drop));
36190+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
36191+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
36192+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
36193+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
36194+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
36195
36196 if (!left--) {
36197 if (instance->disconnected)
36198diff -urNp linux-3.1.1/drivers/usb/core/devices.c linux-3.1.1/drivers/usb/core/devices.c
36199--- linux-3.1.1/drivers/usb/core/devices.c 2011-11-11 15:19:27.000000000 -0500
36200+++ linux-3.1.1/drivers/usb/core/devices.c 2011-11-16 18:39:08.000000000 -0500
36201@@ -126,7 +126,7 @@ static const char format_endpt[] =
36202 * time it gets called.
36203 */
36204 static struct device_connect_event {
36205- atomic_t count;
36206+ atomic_unchecked_t count;
36207 wait_queue_head_t wait;
36208 } device_event = {
36209 .count = ATOMIC_INIT(1),
36210@@ -164,7 +164,7 @@ static const struct class_info clas_info
36211
36212 void usbfs_conn_disc_event(void)
36213 {
36214- atomic_add(2, &device_event.count);
36215+ atomic_add_unchecked(2, &device_event.count);
36216 wake_up(&device_event.wait);
36217 }
36218
36219@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
36220
36221 poll_wait(file, &device_event.wait, wait);
36222
36223- event_count = atomic_read(&device_event.count);
36224+ event_count = atomic_read_unchecked(&device_event.count);
36225 if (file->f_version != event_count) {
36226 file->f_version = event_count;
36227 return POLLIN | POLLRDNORM;
36228diff -urNp linux-3.1.1/drivers/usb/core/message.c linux-3.1.1/drivers/usb/core/message.c
36229--- linux-3.1.1/drivers/usb/core/message.c 2011-11-11 15:19:27.000000000 -0500
36230+++ linux-3.1.1/drivers/usb/core/message.c 2011-11-16 18:39:08.000000000 -0500
36231@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
36232 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
36233 if (buf) {
36234 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
36235- if (len > 0) {
36236- smallbuf = kmalloc(++len, GFP_NOIO);
36237+ if (len++ > 0) {
36238+ smallbuf = kmalloc(len, GFP_NOIO);
36239 if (!smallbuf)
36240 return buf;
36241 memcpy(smallbuf, buf, len);
36242diff -urNp linux-3.1.1/drivers/usb/early/ehci-dbgp.c linux-3.1.1/drivers/usb/early/ehci-dbgp.c
36243--- linux-3.1.1/drivers/usb/early/ehci-dbgp.c 2011-11-11 15:19:27.000000000 -0500
36244+++ linux-3.1.1/drivers/usb/early/ehci-dbgp.c 2011-11-16 18:39:08.000000000 -0500
36245@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
36246
36247 #ifdef CONFIG_KGDB
36248 static struct kgdb_io kgdbdbgp_io_ops;
36249-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
36250+static struct kgdb_io kgdbdbgp_io_ops_console;
36251+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
36252 #else
36253 #define dbgp_kgdb_mode (0)
36254 #endif
36255@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
36256 .write_char = kgdbdbgp_write_char,
36257 };
36258
36259+static struct kgdb_io kgdbdbgp_io_ops_console = {
36260+ .name = "kgdbdbgp",
36261+ .read_char = kgdbdbgp_read_char,
36262+ .write_char = kgdbdbgp_write_char,
36263+ .is_console = 1
36264+};
36265+
36266 static int kgdbdbgp_wait_time;
36267
36268 static int __init kgdbdbgp_parse_config(char *str)
36269@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(
36270 ptr++;
36271 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
36272 }
36273- kgdb_register_io_module(&kgdbdbgp_io_ops);
36274- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
36275+ if (early_dbgp_console.index != -1)
36276+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
36277+ else
36278+ kgdb_register_io_module(&kgdbdbgp_io_ops);
36279
36280 return 0;
36281 }
36282diff -urNp linux-3.1.1/drivers/usb/host/xhci-mem.c linux-3.1.1/drivers/usb/host/xhci-mem.c
36283--- linux-3.1.1/drivers/usb/host/xhci-mem.c 2011-11-11 15:19:27.000000000 -0500
36284+++ linux-3.1.1/drivers/usb/host/xhci-mem.c 2011-11-16 18:40:29.000000000 -0500
36285@@ -1690,6 +1690,8 @@ static int xhci_check_trb_in_td_math(str
36286 unsigned int num_tests;
36287 int i, ret;
36288
36289+ pax_track_stack();
36290+
36291 num_tests = ARRAY_SIZE(simple_test_vector);
36292 for (i = 0; i < num_tests; i++) {
36293 ret = xhci_test_trb_in_td(xhci,
36294diff -urNp linux-3.1.1/drivers/usb/wusbcore/wa-hc.h linux-3.1.1/drivers/usb/wusbcore/wa-hc.h
36295--- linux-3.1.1/drivers/usb/wusbcore/wa-hc.h 2011-11-11 15:19:27.000000000 -0500
36296+++ linux-3.1.1/drivers/usb/wusbcore/wa-hc.h 2011-11-16 18:39:08.000000000 -0500
36297@@ -192,7 +192,7 @@ struct wahc {
36298 struct list_head xfer_delayed_list;
36299 spinlock_t xfer_list_lock;
36300 struct work_struct xfer_work;
36301- atomic_t xfer_id_count;
36302+ atomic_unchecked_t xfer_id_count;
36303 };
36304
36305
36306@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
36307 INIT_LIST_HEAD(&wa->xfer_delayed_list);
36308 spin_lock_init(&wa->xfer_list_lock);
36309 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
36310- atomic_set(&wa->xfer_id_count, 1);
36311+ atomic_set_unchecked(&wa->xfer_id_count, 1);
36312 }
36313
36314 /**
36315diff -urNp linux-3.1.1/drivers/usb/wusbcore/wa-xfer.c linux-3.1.1/drivers/usb/wusbcore/wa-xfer.c
36316--- linux-3.1.1/drivers/usb/wusbcore/wa-xfer.c 2011-11-11 15:19:27.000000000 -0500
36317+++ linux-3.1.1/drivers/usb/wusbcore/wa-xfer.c 2011-11-16 18:39:08.000000000 -0500
36318@@ -295,7 +295,7 @@ out:
36319 */
36320 static void wa_xfer_id_init(struct wa_xfer *xfer)
36321 {
36322- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
36323+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
36324 }
36325
36326 /*
36327diff -urNp linux-3.1.1/drivers/vhost/vhost.c linux-3.1.1/drivers/vhost/vhost.c
36328--- linux-3.1.1/drivers/vhost/vhost.c 2011-11-11 15:19:27.000000000 -0500
36329+++ linux-3.1.1/drivers/vhost/vhost.c 2011-11-16 18:39:08.000000000 -0500
36330@@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhos
36331 return 0;
36332 }
36333
36334-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
36335+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
36336 {
36337 struct file *eventfp, *filep = NULL,
36338 *pollstart = NULL, *pollstop = NULL;
36339diff -urNp linux-3.1.1/drivers/video/aty/aty128fb.c linux-3.1.1/drivers/video/aty/aty128fb.c
36340--- linux-3.1.1/drivers/video/aty/aty128fb.c 2011-11-11 15:19:27.000000000 -0500
36341+++ linux-3.1.1/drivers/video/aty/aty128fb.c 2011-11-16 18:39:08.000000000 -0500
36342@@ -148,7 +148,7 @@ enum {
36343 };
36344
36345 /* Must match above enum */
36346-static const char *r128_family[] __devinitdata = {
36347+static const char *r128_family[] __devinitconst = {
36348 "AGP",
36349 "PCI",
36350 "PRO AGP",
36351diff -urNp linux-3.1.1/drivers/video/fbcmap.c linux-3.1.1/drivers/video/fbcmap.c
36352--- linux-3.1.1/drivers/video/fbcmap.c 2011-11-11 15:19:27.000000000 -0500
36353+++ linux-3.1.1/drivers/video/fbcmap.c 2011-11-16 18:39:08.000000000 -0500
36354@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
36355 rc = -ENODEV;
36356 goto out;
36357 }
36358- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
36359- !info->fbops->fb_setcmap)) {
36360+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
36361 rc = -EINVAL;
36362 goto out1;
36363 }
36364diff -urNp linux-3.1.1/drivers/video/fbmem.c linux-3.1.1/drivers/video/fbmem.c
36365--- linux-3.1.1/drivers/video/fbmem.c 2011-11-11 15:19:27.000000000 -0500
36366+++ linux-3.1.1/drivers/video/fbmem.c 2011-11-16 18:40:29.000000000 -0500
36367@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
36368 image->dx += image->width + 8;
36369 }
36370 } else if (rotate == FB_ROTATE_UD) {
36371- for (x = 0; x < num && image->dx >= 0; x++) {
36372+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
36373 info->fbops->fb_imageblit(info, image);
36374 image->dx -= image->width + 8;
36375 }
36376@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
36377 image->dy += image->height + 8;
36378 }
36379 } else if (rotate == FB_ROTATE_CCW) {
36380- for (x = 0; x < num && image->dy >= 0; x++) {
36381+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
36382 info->fbops->fb_imageblit(info, image);
36383 image->dy -= image->height + 8;
36384 }
36385@@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
36386 int flags = info->flags;
36387 int ret = 0;
36388
36389+ pax_track_stack();
36390+
36391 if (var->activate & FB_ACTIVATE_INV_MODE) {
36392 struct fb_videomode mode1, mode2;
36393
36394@@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
36395 void __user *argp = (void __user *)arg;
36396 long ret = 0;
36397
36398+ pax_track_stack();
36399+
36400 switch (cmd) {
36401 case FBIOGET_VSCREENINFO:
36402 if (!lock_fb_info(info))
36403@@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
36404 return -EFAULT;
36405 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
36406 return -EINVAL;
36407- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
36408+ if (con2fb.framebuffer >= FB_MAX)
36409 return -EINVAL;
36410 if (!registered_fb[con2fb.framebuffer])
36411 request_module("fb%d", con2fb.framebuffer);
36412diff -urNp linux-3.1.1/drivers/video/geode/gx1fb_core.c linux-3.1.1/drivers/video/geode/gx1fb_core.c
36413--- linux-3.1.1/drivers/video/geode/gx1fb_core.c 2011-11-11 15:19:27.000000000 -0500
36414+++ linux-3.1.1/drivers/video/geode/gx1fb_core.c 2011-11-16 18:39:08.000000000 -0500
36415@@ -29,7 +29,7 @@ static int crt_option = 1;
36416 static char panel_option[32] = "";
36417
36418 /* Modes relevant to the GX1 (taken from modedb.c) */
36419-static const struct fb_videomode __devinitdata gx1_modedb[] = {
36420+static const struct fb_videomode __devinitconst gx1_modedb[] = {
36421 /* 640x480-60 VESA */
36422 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
36423 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
36424diff -urNp linux-3.1.1/drivers/video/gxt4500.c linux-3.1.1/drivers/video/gxt4500.c
36425--- linux-3.1.1/drivers/video/gxt4500.c 2011-11-11 15:19:27.000000000 -0500
36426+++ linux-3.1.1/drivers/video/gxt4500.c 2011-11-16 18:39:08.000000000 -0500
36427@@ -156,7 +156,7 @@ struct gxt4500_par {
36428 static char *mode_option;
36429
36430 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
36431-static const struct fb_videomode defaultmode __devinitdata = {
36432+static const struct fb_videomode defaultmode __devinitconst = {
36433 .refresh = 60,
36434 .xres = 1280,
36435 .yres = 1024,
36436@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, stru
36437 return 0;
36438 }
36439
36440-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
36441+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
36442 .id = "IBM GXT4500P",
36443 .type = FB_TYPE_PACKED_PIXELS,
36444 .visual = FB_VISUAL_PSEUDOCOLOR,
36445diff -urNp linux-3.1.1/drivers/video/i810/i810_accel.c linux-3.1.1/drivers/video/i810/i810_accel.c
36446--- linux-3.1.1/drivers/video/i810/i810_accel.c 2011-11-11 15:19:27.000000000 -0500
36447+++ linux-3.1.1/drivers/video/i810/i810_accel.c 2011-11-16 18:39:08.000000000 -0500
36448@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
36449 }
36450 }
36451 printk("ringbuffer lockup!!!\n");
36452+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
36453 i810_report_error(mmio);
36454 par->dev_flags |= LOCKUP;
36455 info->pixmap.scan_align = 1;
36456diff -urNp linux-3.1.1/drivers/video/i810/i810_main.c linux-3.1.1/drivers/video/i810/i810_main.c
36457--- linux-3.1.1/drivers/video/i810/i810_main.c 2011-11-11 15:19:27.000000000 -0500
36458+++ linux-3.1.1/drivers/video/i810/i810_main.c 2011-11-16 18:39:08.000000000 -0500
36459@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_
36460 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
36461
36462 /* PCI */
36463-static const char *i810_pci_list[] __devinitdata = {
36464+static const char *i810_pci_list[] __devinitconst = {
36465 "Intel(R) 810 Framebuffer Device" ,
36466 "Intel(R) 810-DC100 Framebuffer Device" ,
36467 "Intel(R) 810E Framebuffer Device" ,
36468diff -urNp linux-3.1.1/drivers/video/jz4740_fb.c linux-3.1.1/drivers/video/jz4740_fb.c
36469--- linux-3.1.1/drivers/video/jz4740_fb.c 2011-11-11 15:19:27.000000000 -0500
36470+++ linux-3.1.1/drivers/video/jz4740_fb.c 2011-11-16 18:39:08.000000000 -0500
36471@@ -136,7 +136,7 @@ struct jzfb {
36472 uint32_t pseudo_palette[16];
36473 };
36474
36475-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
36476+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
36477 .id = "JZ4740 FB",
36478 .type = FB_TYPE_PACKED_PIXELS,
36479 .visual = FB_VISUAL_TRUECOLOR,
36480diff -urNp linux-3.1.1/drivers/video/logo/logo_linux_clut224.ppm linux-3.1.1/drivers/video/logo/logo_linux_clut224.ppm
36481--- linux-3.1.1/drivers/video/logo/logo_linux_clut224.ppm 2011-11-11 15:19:27.000000000 -0500
36482+++ linux-3.1.1/drivers/video/logo/logo_linux_clut224.ppm 2011-11-16 18:40:29.000000000 -0500
36483@@ -1,1604 +1,1123 @@
36484 P3
36485-# Standard 224-color Linux logo
36486 80 80
36487 255
36488- 0 0 0 0 0 0 0 0 0 0 0 0
36489- 0 0 0 0 0 0 0 0 0 0 0 0
36490- 0 0 0 0 0 0 0 0 0 0 0 0
36491- 0 0 0 0 0 0 0 0 0 0 0 0
36492- 0 0 0 0 0 0 0 0 0 0 0 0
36493- 0 0 0 0 0 0 0 0 0 0 0 0
36494- 0 0 0 0 0 0 0 0 0 0 0 0
36495- 0 0 0 0 0 0 0 0 0 0 0 0
36496- 0 0 0 0 0 0 0 0 0 0 0 0
36497- 6 6 6 6 6 6 10 10 10 10 10 10
36498- 10 10 10 6 6 6 6 6 6 6 6 6
36499- 0 0 0 0 0 0 0 0 0 0 0 0
36500- 0 0 0 0 0 0 0 0 0 0 0 0
36501- 0 0 0 0 0 0 0 0 0 0 0 0
36502- 0 0 0 0 0 0 0 0 0 0 0 0
36503- 0 0 0 0 0 0 0 0 0 0 0 0
36504- 0 0 0 0 0 0 0 0 0 0 0 0
36505- 0 0 0 0 0 0 0 0 0 0 0 0
36506- 0 0 0 0 0 0 0 0 0 0 0 0
36507- 0 0 0 0 0 0 0 0 0 0 0 0
36508- 0 0 0 0 0 0 0 0 0 0 0 0
36509- 0 0 0 0 0 0 0 0 0 0 0 0
36510- 0 0 0 0 0 0 0 0 0 0 0 0
36511- 0 0 0 0 0 0 0 0 0 0 0 0
36512- 0 0 0 0 0 0 0 0 0 0 0 0
36513- 0 0 0 0 0 0 0 0 0 0 0 0
36514- 0 0 0 0 0 0 0 0 0 0 0 0
36515- 0 0 0 0 0 0 0 0 0 0 0 0
36516- 0 0 0 6 6 6 10 10 10 14 14 14
36517- 22 22 22 26 26 26 30 30 30 34 34 34
36518- 30 30 30 30 30 30 26 26 26 18 18 18
36519- 14 14 14 10 10 10 6 6 6 0 0 0
36520- 0 0 0 0 0 0 0 0 0 0 0 0
36521- 0 0 0 0 0 0 0 0 0 0 0 0
36522- 0 0 0 0 0 0 0 0 0 0 0 0
36523- 0 0 0 0 0 0 0 0 0 0 0 0
36524- 0 0 0 0 0 0 0 0 0 0 0 0
36525- 0 0 0 0 0 0 0 0 0 0 0 0
36526- 0 0 0 0 0 0 0 0 0 0 0 0
36527- 0 0 0 0 0 0 0 0 0 0 0 0
36528- 0 0 0 0 0 0 0 0 0 0 0 0
36529- 0 0 0 0 0 1 0 0 1 0 0 0
36530- 0 0 0 0 0 0 0 0 0 0 0 0
36531- 0 0 0 0 0 0 0 0 0 0 0 0
36532- 0 0 0 0 0 0 0 0 0 0 0 0
36533- 0 0 0 0 0 0 0 0 0 0 0 0
36534- 0 0 0 0 0 0 0 0 0 0 0 0
36535- 0 0 0 0 0 0 0 0 0 0 0 0
36536- 6 6 6 14 14 14 26 26 26 42 42 42
36537- 54 54 54 66 66 66 78 78 78 78 78 78
36538- 78 78 78 74 74 74 66 66 66 54 54 54
36539- 42 42 42 26 26 26 18 18 18 10 10 10
36540- 6 6 6 0 0 0 0 0 0 0 0 0
36541- 0 0 0 0 0 0 0 0 0 0 0 0
36542- 0 0 0 0 0 0 0 0 0 0 0 0
36543- 0 0 0 0 0 0 0 0 0 0 0 0
36544- 0 0 0 0 0 0 0 0 0 0 0 0
36545- 0 0 0 0 0 0 0 0 0 0 0 0
36546- 0 0 0 0 0 0 0 0 0 0 0 0
36547- 0 0 0 0 0 0 0 0 0 0 0 0
36548- 0 0 0 0 0 0 0 0 0 0 0 0
36549- 0 0 1 0 0 0 0 0 0 0 0 0
36550- 0 0 0 0 0 0 0 0 0 0 0 0
36551- 0 0 0 0 0 0 0 0 0 0 0 0
36552- 0 0 0 0 0 0 0 0 0 0 0 0
36553- 0 0 0 0 0 0 0 0 0 0 0 0
36554- 0 0 0 0 0 0 0 0 0 0 0 0
36555- 0 0 0 0 0 0 0 0 0 10 10 10
36556- 22 22 22 42 42 42 66 66 66 86 86 86
36557- 66 66 66 38 38 38 38 38 38 22 22 22
36558- 26 26 26 34 34 34 54 54 54 66 66 66
36559- 86 86 86 70 70 70 46 46 46 26 26 26
36560- 14 14 14 6 6 6 0 0 0 0 0 0
36561- 0 0 0 0 0 0 0 0 0 0 0 0
36562- 0 0 0 0 0 0 0 0 0 0 0 0
36563- 0 0 0 0 0 0 0 0 0 0 0 0
36564- 0 0 0 0 0 0 0 0 0 0 0 0
36565- 0 0 0 0 0 0 0 0 0 0 0 0
36566- 0 0 0 0 0 0 0 0 0 0 0 0
36567- 0 0 0 0 0 0 0 0 0 0 0 0
36568- 0 0 0 0 0 0 0 0 0 0 0 0
36569- 0 0 1 0 0 1 0 0 1 0 0 0
36570- 0 0 0 0 0 0 0 0 0 0 0 0
36571- 0 0 0 0 0 0 0 0 0 0 0 0
36572- 0 0 0 0 0 0 0 0 0 0 0 0
36573- 0 0 0 0 0 0 0 0 0 0 0 0
36574- 0 0 0 0 0 0 0 0 0 0 0 0
36575- 0 0 0 0 0 0 10 10 10 26 26 26
36576- 50 50 50 82 82 82 58 58 58 6 6 6
36577- 2 2 6 2 2 6 2 2 6 2 2 6
36578- 2 2 6 2 2 6 2 2 6 2 2 6
36579- 6 6 6 54 54 54 86 86 86 66 66 66
36580- 38 38 38 18 18 18 6 6 6 0 0 0
36581- 0 0 0 0 0 0 0 0 0 0 0 0
36582- 0 0 0 0 0 0 0 0 0 0 0 0
36583- 0 0 0 0 0 0 0 0 0 0 0 0
36584- 0 0 0 0 0 0 0 0 0 0 0 0
36585- 0 0 0 0 0 0 0 0 0 0 0 0
36586- 0 0 0 0 0 0 0 0 0 0 0 0
36587- 0 0 0 0 0 0 0 0 0 0 0 0
36588- 0 0 0 0 0 0 0 0 0 0 0 0
36589- 0 0 0 0 0 0 0 0 0 0 0 0
36590- 0 0 0 0 0 0 0 0 0 0 0 0
36591- 0 0 0 0 0 0 0 0 0 0 0 0
36592- 0 0 0 0 0 0 0 0 0 0 0 0
36593- 0 0 0 0 0 0 0 0 0 0 0 0
36594- 0 0 0 0 0 0 0 0 0 0 0 0
36595- 0 0 0 6 6 6 22 22 22 50 50 50
36596- 78 78 78 34 34 34 2 2 6 2 2 6
36597- 2 2 6 2 2 6 2 2 6 2 2 6
36598- 2 2 6 2 2 6 2 2 6 2 2 6
36599- 2 2 6 2 2 6 6 6 6 70 70 70
36600- 78 78 78 46 46 46 22 22 22 6 6 6
36601- 0 0 0 0 0 0 0 0 0 0 0 0
36602- 0 0 0 0 0 0 0 0 0 0 0 0
36603- 0 0 0 0 0 0 0 0 0 0 0 0
36604- 0 0 0 0 0 0 0 0 0 0 0 0
36605- 0 0 0 0 0 0 0 0 0 0 0 0
36606- 0 0 0 0 0 0 0 0 0 0 0 0
36607- 0 0 0 0 0 0 0 0 0 0 0 0
36608- 0 0 0 0 0 0 0 0 0 0 0 0
36609- 0 0 1 0 0 1 0 0 1 0 0 0
36610- 0 0 0 0 0 0 0 0 0 0 0 0
36611- 0 0 0 0 0 0 0 0 0 0 0 0
36612- 0 0 0 0 0 0 0 0 0 0 0 0
36613- 0 0 0 0 0 0 0 0 0 0 0 0
36614- 0 0 0 0 0 0 0 0 0 0 0 0
36615- 6 6 6 18 18 18 42 42 42 82 82 82
36616- 26 26 26 2 2 6 2 2 6 2 2 6
36617- 2 2 6 2 2 6 2 2 6 2 2 6
36618- 2 2 6 2 2 6 2 2 6 14 14 14
36619- 46 46 46 34 34 34 6 6 6 2 2 6
36620- 42 42 42 78 78 78 42 42 42 18 18 18
36621- 6 6 6 0 0 0 0 0 0 0 0 0
36622- 0 0 0 0 0 0 0 0 0 0 0 0
36623- 0 0 0 0 0 0 0 0 0 0 0 0
36624- 0 0 0 0 0 0 0 0 0 0 0 0
36625- 0 0 0 0 0 0 0 0 0 0 0 0
36626- 0 0 0 0 0 0 0 0 0 0 0 0
36627- 0 0 0 0 0 0 0 0 0 0 0 0
36628- 0 0 0 0 0 0 0 0 0 0 0 0
36629- 0 0 1 0 0 0 0 0 1 0 0 0
36630- 0 0 0 0 0 0 0 0 0 0 0 0
36631- 0 0 0 0 0 0 0 0 0 0 0 0
36632- 0 0 0 0 0 0 0 0 0 0 0 0
36633- 0 0 0 0 0 0 0 0 0 0 0 0
36634- 0 0 0 0 0 0 0 0 0 0 0 0
36635- 10 10 10 30 30 30 66 66 66 58 58 58
36636- 2 2 6 2 2 6 2 2 6 2 2 6
36637- 2 2 6 2 2 6 2 2 6 2 2 6
36638- 2 2 6 2 2 6 2 2 6 26 26 26
36639- 86 86 86 101 101 101 46 46 46 10 10 10
36640- 2 2 6 58 58 58 70 70 70 34 34 34
36641- 10 10 10 0 0 0 0 0 0 0 0 0
36642- 0 0 0 0 0 0 0 0 0 0 0 0
36643- 0 0 0 0 0 0 0 0 0 0 0 0
36644- 0 0 0 0 0 0 0 0 0 0 0 0
36645- 0 0 0 0 0 0 0 0 0 0 0 0
36646- 0 0 0 0 0 0 0 0 0 0 0 0
36647- 0 0 0 0 0 0 0 0 0 0 0 0
36648- 0 0 0 0 0 0 0 0 0 0 0 0
36649- 0 0 1 0 0 1 0 0 1 0 0 0
36650- 0 0 0 0 0 0 0 0 0 0 0 0
36651- 0 0 0 0 0 0 0 0 0 0 0 0
36652- 0 0 0 0 0 0 0 0 0 0 0 0
36653- 0 0 0 0 0 0 0 0 0 0 0 0
36654- 0 0 0 0 0 0 0 0 0 0 0 0
36655- 14 14 14 42 42 42 86 86 86 10 10 10
36656- 2 2 6 2 2 6 2 2 6 2 2 6
36657- 2 2 6 2 2 6 2 2 6 2 2 6
36658- 2 2 6 2 2 6 2 2 6 30 30 30
36659- 94 94 94 94 94 94 58 58 58 26 26 26
36660- 2 2 6 6 6 6 78 78 78 54 54 54
36661- 22 22 22 6 6 6 0 0 0 0 0 0
36662- 0 0 0 0 0 0 0 0 0 0 0 0
36663- 0 0 0 0 0 0 0 0 0 0 0 0
36664- 0 0 0 0 0 0 0 0 0 0 0 0
36665- 0 0 0 0 0 0 0 0 0 0 0 0
36666- 0 0 0 0 0 0 0 0 0 0 0 0
36667- 0 0 0 0 0 0 0 0 0 0 0 0
36668- 0 0 0 0 0 0 0 0 0 0 0 0
36669- 0 0 0 0 0 0 0 0 0 0 0 0
36670- 0 0 0 0 0 0 0 0 0 0 0 0
36671- 0 0 0 0 0 0 0 0 0 0 0 0
36672- 0 0 0 0 0 0 0 0 0 0 0 0
36673- 0 0 0 0 0 0 0 0 0 0 0 0
36674- 0 0 0 0 0 0 0 0 0 6 6 6
36675- 22 22 22 62 62 62 62 62 62 2 2 6
36676- 2 2 6 2 2 6 2 2 6 2 2 6
36677- 2 2 6 2 2 6 2 2 6 2 2 6
36678- 2 2 6 2 2 6 2 2 6 26 26 26
36679- 54 54 54 38 38 38 18 18 18 10 10 10
36680- 2 2 6 2 2 6 34 34 34 82 82 82
36681- 38 38 38 14 14 14 0 0 0 0 0 0
36682- 0 0 0 0 0 0 0 0 0 0 0 0
36683- 0 0 0 0 0 0 0 0 0 0 0 0
36684- 0 0 0 0 0 0 0 0 0 0 0 0
36685- 0 0 0 0 0 0 0 0 0 0 0 0
36686- 0 0 0 0 0 0 0 0 0 0 0 0
36687- 0 0 0 0 0 0 0 0 0 0 0 0
36688- 0 0 0 0 0 0 0 0 0 0 0 0
36689- 0 0 0 0 0 1 0 0 1 0 0 0
36690- 0 0 0 0 0 0 0 0 0 0 0 0
36691- 0 0 0 0 0 0 0 0 0 0 0 0
36692- 0 0 0 0 0 0 0 0 0 0 0 0
36693- 0 0 0 0 0 0 0 0 0 0 0 0
36694- 0 0 0 0 0 0 0 0 0 6 6 6
36695- 30 30 30 78 78 78 30 30 30 2 2 6
36696- 2 2 6 2 2 6 2 2 6 2 2 6
36697- 2 2 6 2 2 6 2 2 6 2 2 6
36698- 2 2 6 2 2 6 2 2 6 10 10 10
36699- 10 10 10 2 2 6 2 2 6 2 2 6
36700- 2 2 6 2 2 6 2 2 6 78 78 78
36701- 50 50 50 18 18 18 6 6 6 0 0 0
36702- 0 0 0 0 0 0 0 0 0 0 0 0
36703- 0 0 0 0 0 0 0 0 0 0 0 0
36704- 0 0 0 0 0 0 0 0 0 0 0 0
36705- 0 0 0 0 0 0 0 0 0 0 0 0
36706- 0 0 0 0 0 0 0 0 0 0 0 0
36707- 0 0 0 0 0 0 0 0 0 0 0 0
36708- 0 0 0 0 0 0 0 0 0 0 0 0
36709- 0 0 1 0 0 0 0 0 0 0 0 0
36710- 0 0 0 0 0 0 0 0 0 0 0 0
36711- 0 0 0 0 0 0 0 0 0 0 0 0
36712- 0 0 0 0 0 0 0 0 0 0 0 0
36713- 0 0 0 0 0 0 0 0 0 0 0 0
36714- 0 0 0 0 0 0 0 0 0 10 10 10
36715- 38 38 38 86 86 86 14 14 14 2 2 6
36716- 2 2 6 2 2 6 2 2 6 2 2 6
36717- 2 2 6 2 2 6 2 2 6 2 2 6
36718- 2 2 6 2 2 6 2 2 6 2 2 6
36719- 2 2 6 2 2 6 2 2 6 2 2 6
36720- 2 2 6 2 2 6 2 2 6 54 54 54
36721- 66 66 66 26 26 26 6 6 6 0 0 0
36722- 0 0 0 0 0 0 0 0 0 0 0 0
36723- 0 0 0 0 0 0 0 0 0 0 0 0
36724- 0 0 0 0 0 0 0 0 0 0 0 0
36725- 0 0 0 0 0 0 0 0 0 0 0 0
36726- 0 0 0 0 0 0 0 0 0 0 0 0
36727- 0 0 0 0 0 0 0 0 0 0 0 0
36728- 0 0 0 0 0 0 0 0 0 0 0 0
36729- 0 0 0 0 0 1 0 0 1 0 0 0
36730- 0 0 0 0 0 0 0 0 0 0 0 0
36731- 0 0 0 0 0 0 0 0 0 0 0 0
36732- 0 0 0 0 0 0 0 0 0 0 0 0
36733- 0 0 0 0 0 0 0 0 0 0 0 0
36734- 0 0 0 0 0 0 0 0 0 14 14 14
36735- 42 42 42 82 82 82 2 2 6 2 2 6
36736- 2 2 6 6 6 6 10 10 10 2 2 6
36737- 2 2 6 2 2 6 2 2 6 2 2 6
36738- 2 2 6 2 2 6 2 2 6 6 6 6
36739- 14 14 14 10 10 10 2 2 6 2 2 6
36740- 2 2 6 2 2 6 2 2 6 18 18 18
36741- 82 82 82 34 34 34 10 10 10 0 0 0
36742- 0 0 0 0 0 0 0 0 0 0 0 0
36743- 0 0 0 0 0 0 0 0 0 0 0 0
36744- 0 0 0 0 0 0 0 0 0 0 0 0
36745- 0 0 0 0 0 0 0 0 0 0 0 0
36746- 0 0 0 0 0 0 0 0 0 0 0 0
36747- 0 0 0 0 0 0 0 0 0 0 0 0
36748- 0 0 0 0 0 0 0 0 0 0 0 0
36749- 0 0 1 0 0 0 0 0 0 0 0 0
36750- 0 0 0 0 0 0 0 0 0 0 0 0
36751- 0 0 0 0 0 0 0 0 0 0 0 0
36752- 0 0 0 0 0 0 0 0 0 0 0 0
36753- 0 0 0 0 0 0 0 0 0 0 0 0
36754- 0 0 0 0 0 0 0 0 0 14 14 14
36755- 46 46 46 86 86 86 2 2 6 2 2 6
36756- 6 6 6 6 6 6 22 22 22 34 34 34
36757- 6 6 6 2 2 6 2 2 6 2 2 6
36758- 2 2 6 2 2 6 18 18 18 34 34 34
36759- 10 10 10 50 50 50 22 22 22 2 2 6
36760- 2 2 6 2 2 6 2 2 6 10 10 10
36761- 86 86 86 42 42 42 14 14 14 0 0 0
36762- 0 0 0 0 0 0 0 0 0 0 0 0
36763- 0 0 0 0 0 0 0 0 0 0 0 0
36764- 0 0 0 0 0 0 0 0 0 0 0 0
36765- 0 0 0 0 0 0 0 0 0 0 0 0
36766- 0 0 0 0 0 0 0 0 0 0 0 0
36767- 0 0 0 0 0 0 0 0 0 0 0 0
36768- 0 0 0 0 0 0 0 0 0 0 0 0
36769- 0 0 1 0 0 1 0 0 1 0 0 0
36770- 0 0 0 0 0 0 0 0 0 0 0 0
36771- 0 0 0 0 0 0 0 0 0 0 0 0
36772- 0 0 0 0 0 0 0 0 0 0 0 0
36773- 0 0 0 0 0 0 0 0 0 0 0 0
36774- 0 0 0 0 0 0 0 0 0 14 14 14
36775- 46 46 46 86 86 86 2 2 6 2 2 6
36776- 38 38 38 116 116 116 94 94 94 22 22 22
36777- 22 22 22 2 2 6 2 2 6 2 2 6
36778- 14 14 14 86 86 86 138 138 138 162 162 162
36779-154 154 154 38 38 38 26 26 26 6 6 6
36780- 2 2 6 2 2 6 2 2 6 2 2 6
36781- 86 86 86 46 46 46 14 14 14 0 0 0
36782- 0 0 0 0 0 0 0 0 0 0 0 0
36783- 0 0 0 0 0 0 0 0 0 0 0 0
36784- 0 0 0 0 0 0 0 0 0 0 0 0
36785- 0 0 0 0 0 0 0 0 0 0 0 0
36786- 0 0 0 0 0 0 0 0 0 0 0 0
36787- 0 0 0 0 0 0 0 0 0 0 0 0
36788- 0 0 0 0 0 0 0 0 0 0 0 0
36789- 0 0 0 0 0 0 0 0 0 0 0 0
36790- 0 0 0 0 0 0 0 0 0 0 0 0
36791- 0 0 0 0 0 0 0 0 0 0 0 0
36792- 0 0 0 0 0 0 0 0 0 0 0 0
36793- 0 0 0 0 0 0 0 0 0 0 0 0
36794- 0 0 0 0 0 0 0 0 0 14 14 14
36795- 46 46 46 86 86 86 2 2 6 14 14 14
36796-134 134 134 198 198 198 195 195 195 116 116 116
36797- 10 10 10 2 2 6 2 2 6 6 6 6
36798-101 98 89 187 187 187 210 210 210 218 218 218
36799-214 214 214 134 134 134 14 14 14 6 6 6
36800- 2 2 6 2 2 6 2 2 6 2 2 6
36801- 86 86 86 50 50 50 18 18 18 6 6 6
36802- 0 0 0 0 0 0 0 0 0 0 0 0
36803- 0 0 0 0 0 0 0 0 0 0 0 0
36804- 0 0 0 0 0 0 0 0 0 0 0 0
36805- 0 0 0 0 0 0 0 0 0 0 0 0
36806- 0 0 0 0 0 0 0 0 0 0 0 0
36807- 0 0 0 0 0 0 0 0 0 0 0 0
36808- 0 0 0 0 0 0 0 0 1 0 0 0
36809- 0 0 1 0 0 1 0 0 1 0 0 0
36810- 0 0 0 0 0 0 0 0 0 0 0 0
36811- 0 0 0 0 0 0 0 0 0 0 0 0
36812- 0 0 0 0 0 0 0 0 0 0 0 0
36813- 0 0 0 0 0 0 0 0 0 0 0 0
36814- 0 0 0 0 0 0 0 0 0 14 14 14
36815- 46 46 46 86 86 86 2 2 6 54 54 54
36816-218 218 218 195 195 195 226 226 226 246 246 246
36817- 58 58 58 2 2 6 2 2 6 30 30 30
36818-210 210 210 253 253 253 174 174 174 123 123 123
36819-221 221 221 234 234 234 74 74 74 2 2 6
36820- 2 2 6 2 2 6 2 2 6 2 2 6
36821- 70 70 70 58 58 58 22 22 22 6 6 6
36822- 0 0 0 0 0 0 0 0 0 0 0 0
36823- 0 0 0 0 0 0 0 0 0 0 0 0
36824- 0 0 0 0 0 0 0 0 0 0 0 0
36825- 0 0 0 0 0 0 0 0 0 0 0 0
36826- 0 0 0 0 0 0 0 0 0 0 0 0
36827- 0 0 0 0 0 0 0 0 0 0 0 0
36828- 0 0 0 0 0 0 0 0 0 0 0 0
36829- 0 0 0 0 0 0 0 0 0 0 0 0
36830- 0 0 0 0 0 0 0 0 0 0 0 0
36831- 0 0 0 0 0 0 0 0 0 0 0 0
36832- 0 0 0 0 0 0 0 0 0 0 0 0
36833- 0 0 0 0 0 0 0 0 0 0 0 0
36834- 0 0 0 0 0 0 0 0 0 14 14 14
36835- 46 46 46 82 82 82 2 2 6 106 106 106
36836-170 170 170 26 26 26 86 86 86 226 226 226
36837-123 123 123 10 10 10 14 14 14 46 46 46
36838-231 231 231 190 190 190 6 6 6 70 70 70
36839- 90 90 90 238 238 238 158 158 158 2 2 6
36840- 2 2 6 2 2 6 2 2 6 2 2 6
36841- 70 70 70 58 58 58 22 22 22 6 6 6
36842- 0 0 0 0 0 0 0 0 0 0 0 0
36843- 0 0 0 0 0 0 0 0 0 0 0 0
36844- 0 0 0 0 0 0 0 0 0 0 0 0
36845- 0 0 0 0 0 0 0 0 0 0 0 0
36846- 0 0 0 0 0 0 0 0 0 0 0 0
36847- 0 0 0 0 0 0 0 0 0 0 0 0
36848- 0 0 0 0 0 0 0 0 1 0 0 0
36849- 0 0 1 0 0 1 0 0 1 0 0 0
36850- 0 0 0 0 0 0 0 0 0 0 0 0
36851- 0 0 0 0 0 0 0 0 0 0 0 0
36852- 0 0 0 0 0 0 0 0 0 0 0 0
36853- 0 0 0 0 0 0 0 0 0 0 0 0
36854- 0 0 0 0 0 0 0 0 0 14 14 14
36855- 42 42 42 86 86 86 6 6 6 116 116 116
36856-106 106 106 6 6 6 70 70 70 149 149 149
36857-128 128 128 18 18 18 38 38 38 54 54 54
36858-221 221 221 106 106 106 2 2 6 14 14 14
36859- 46 46 46 190 190 190 198 198 198 2 2 6
36860- 2 2 6 2 2 6 2 2 6 2 2 6
36861- 74 74 74 62 62 62 22 22 22 6 6 6
36862- 0 0 0 0 0 0 0 0 0 0 0 0
36863- 0 0 0 0 0 0 0 0 0 0 0 0
36864- 0 0 0 0 0 0 0 0 0 0 0 0
36865- 0 0 0 0 0 0 0 0 0 0 0 0
36866- 0 0 0 0 0 0 0 0 0 0 0 0
36867- 0 0 0 0 0 0 0 0 0 0 0 0
36868- 0 0 0 0 0 0 0 0 1 0 0 0
36869- 0 0 1 0 0 0 0 0 1 0 0 0
36870- 0 0 0 0 0 0 0 0 0 0 0 0
36871- 0 0 0 0 0 0 0 0 0 0 0 0
36872- 0 0 0 0 0 0 0 0 0 0 0 0
36873- 0 0 0 0 0 0 0 0 0 0 0 0
36874- 0 0 0 0 0 0 0 0 0 14 14 14
36875- 42 42 42 94 94 94 14 14 14 101 101 101
36876-128 128 128 2 2 6 18 18 18 116 116 116
36877-118 98 46 121 92 8 121 92 8 98 78 10
36878-162 162 162 106 106 106 2 2 6 2 2 6
36879- 2 2 6 195 195 195 195 195 195 6 6 6
36880- 2 2 6 2 2 6 2 2 6 2 2 6
36881- 74 74 74 62 62 62 22 22 22 6 6 6
36882- 0 0 0 0 0 0 0 0 0 0 0 0
36883- 0 0 0 0 0 0 0 0 0 0 0 0
36884- 0 0 0 0 0 0 0 0 0 0 0 0
36885- 0 0 0 0 0 0 0 0 0 0 0 0
36886- 0 0 0 0 0 0 0 0 0 0 0 0
36887- 0 0 0 0 0 0 0 0 0 0 0 0
36888- 0 0 0 0 0 0 0 0 1 0 0 1
36889- 0 0 1 0 0 0 0 0 1 0 0 0
36890- 0 0 0 0 0 0 0 0 0 0 0 0
36891- 0 0 0 0 0 0 0 0 0 0 0 0
36892- 0 0 0 0 0 0 0 0 0 0 0 0
36893- 0 0 0 0 0 0 0 0 0 0 0 0
36894- 0 0 0 0 0 0 0 0 0 10 10 10
36895- 38 38 38 90 90 90 14 14 14 58 58 58
36896-210 210 210 26 26 26 54 38 6 154 114 10
36897-226 170 11 236 186 11 225 175 15 184 144 12
36898-215 174 15 175 146 61 37 26 9 2 2 6
36899- 70 70 70 246 246 246 138 138 138 2 2 6
36900- 2 2 6 2 2 6 2 2 6 2 2 6
36901- 70 70 70 66 66 66 26 26 26 6 6 6
36902- 0 0 0 0 0 0 0 0 0 0 0 0
36903- 0 0 0 0 0 0 0 0 0 0 0 0
36904- 0 0 0 0 0 0 0 0 0 0 0 0
36905- 0 0 0 0 0 0 0 0 0 0 0 0
36906- 0 0 0 0 0 0 0 0 0 0 0 0
36907- 0 0 0 0 0 0 0 0 0 0 0 0
36908- 0 0 0 0 0 0 0 0 0 0 0 0
36909- 0 0 0 0 0 0 0 0 0 0 0 0
36910- 0 0 0 0 0 0 0 0 0 0 0 0
36911- 0 0 0 0 0 0 0 0 0 0 0 0
36912- 0 0 0 0 0 0 0 0 0 0 0 0
36913- 0 0 0 0 0 0 0 0 0 0 0 0
36914- 0 0 0 0 0 0 0 0 0 10 10 10
36915- 38 38 38 86 86 86 14 14 14 10 10 10
36916-195 195 195 188 164 115 192 133 9 225 175 15
36917-239 182 13 234 190 10 232 195 16 232 200 30
36918-245 207 45 241 208 19 232 195 16 184 144 12
36919-218 194 134 211 206 186 42 42 42 2 2 6
36920- 2 2 6 2 2 6 2 2 6 2 2 6
36921- 50 50 50 74 74 74 30 30 30 6 6 6
36922- 0 0 0 0 0 0 0 0 0 0 0 0
36923- 0 0 0 0 0 0 0 0 0 0 0 0
36924- 0 0 0 0 0 0 0 0 0 0 0 0
36925- 0 0 0 0 0 0 0 0 0 0 0 0
36926- 0 0 0 0 0 0 0 0 0 0 0 0
36927- 0 0 0 0 0 0 0 0 0 0 0 0
36928- 0 0 0 0 0 0 0 0 0 0 0 0
36929- 0 0 0 0 0 0 0 0 0 0 0 0
36930- 0 0 0 0 0 0 0 0 0 0 0 0
36931- 0 0 0 0 0 0 0 0 0 0 0 0
36932- 0 0 0 0 0 0 0 0 0 0 0 0
36933- 0 0 0 0 0 0 0 0 0 0 0 0
36934- 0 0 0 0 0 0 0 0 0 10 10 10
36935- 34 34 34 86 86 86 14 14 14 2 2 6
36936-121 87 25 192 133 9 219 162 10 239 182 13
36937-236 186 11 232 195 16 241 208 19 244 214 54
36938-246 218 60 246 218 38 246 215 20 241 208 19
36939-241 208 19 226 184 13 121 87 25 2 2 6
36940- 2 2 6 2 2 6 2 2 6 2 2 6
36941- 50 50 50 82 82 82 34 34 34 10 10 10
36942- 0 0 0 0 0 0 0 0 0 0 0 0
36943- 0 0 0 0 0 0 0 0 0 0 0 0
36944- 0 0 0 0 0 0 0 0 0 0 0 0
36945- 0 0 0 0 0 0 0 0 0 0 0 0
36946- 0 0 0 0 0 0 0 0 0 0 0 0
36947- 0 0 0 0 0 0 0 0 0 0 0 0
36948- 0 0 0 0 0 0 0 0 0 0 0 0
36949- 0 0 0 0 0 0 0 0 0 0 0 0
36950- 0 0 0 0 0 0 0 0 0 0 0 0
36951- 0 0 0 0 0 0 0 0 0 0 0 0
36952- 0 0 0 0 0 0 0 0 0 0 0 0
36953- 0 0 0 0 0 0 0 0 0 0 0 0
36954- 0 0 0 0 0 0 0 0 0 10 10 10
36955- 34 34 34 82 82 82 30 30 30 61 42 6
36956-180 123 7 206 145 10 230 174 11 239 182 13
36957-234 190 10 238 202 15 241 208 19 246 218 74
36958-246 218 38 246 215 20 246 215 20 246 215 20
36959-226 184 13 215 174 15 184 144 12 6 6 6
36960- 2 2 6 2 2 6 2 2 6 2 2 6
36961- 26 26 26 94 94 94 42 42 42 14 14 14
36962- 0 0 0 0 0 0 0 0 0 0 0 0
36963- 0 0 0 0 0 0 0 0 0 0 0 0
36964- 0 0 0 0 0 0 0 0 0 0 0 0
36965- 0 0 0 0 0 0 0 0 0 0 0 0
36966- 0 0 0 0 0 0 0 0 0 0 0 0
36967- 0 0 0 0 0 0 0 0 0 0 0 0
36968- 0 0 0 0 0 0 0 0 0 0 0 0
36969- 0 0 0 0 0 0 0 0 0 0 0 0
36970- 0 0 0 0 0 0 0 0 0 0 0 0
36971- 0 0 0 0 0 0 0 0 0 0 0 0
36972- 0 0 0 0 0 0 0 0 0 0 0 0
36973- 0 0 0 0 0 0 0 0 0 0 0 0
36974- 0 0 0 0 0 0 0 0 0 10 10 10
36975- 30 30 30 78 78 78 50 50 50 104 69 6
36976-192 133 9 216 158 10 236 178 12 236 186 11
36977-232 195 16 241 208 19 244 214 54 245 215 43
36978-246 215 20 246 215 20 241 208 19 198 155 10
36979-200 144 11 216 158 10 156 118 10 2 2 6
36980- 2 2 6 2 2 6 2 2 6 2 2 6
36981- 6 6 6 90 90 90 54 54 54 18 18 18
36982- 6 6 6 0 0 0 0 0 0 0 0 0
36983- 0 0 0 0 0 0 0 0 0 0 0 0
36984- 0 0 0 0 0 0 0 0 0 0 0 0
36985- 0 0 0 0 0 0 0 0 0 0 0 0
36986- 0 0 0 0 0 0 0 0 0 0 0 0
36987- 0 0 0 0 0 0 0 0 0 0 0 0
36988- 0 0 0 0 0 0 0 0 0 0 0 0
36989- 0 0 0 0 0 0 0 0 0 0 0 0
36990- 0 0 0 0 0 0 0 0 0 0 0 0
36991- 0 0 0 0 0 0 0 0 0 0 0 0
36992- 0 0 0 0 0 0 0 0 0 0 0 0
36993- 0 0 0 0 0 0 0 0 0 0 0 0
36994- 0 0 0 0 0 0 0 0 0 10 10 10
36995- 30 30 30 78 78 78 46 46 46 22 22 22
36996-137 92 6 210 162 10 239 182 13 238 190 10
36997-238 202 15 241 208 19 246 215 20 246 215 20
36998-241 208 19 203 166 17 185 133 11 210 150 10
36999-216 158 10 210 150 10 102 78 10 2 2 6
37000- 6 6 6 54 54 54 14 14 14 2 2 6
37001- 2 2 6 62 62 62 74 74 74 30 30 30
37002- 10 10 10 0 0 0 0 0 0 0 0 0
37003- 0 0 0 0 0 0 0 0 0 0 0 0
37004- 0 0 0 0 0 0 0 0 0 0 0 0
37005- 0 0 0 0 0 0 0 0 0 0 0 0
37006- 0 0 0 0 0 0 0 0 0 0 0 0
37007- 0 0 0 0 0 0 0 0 0 0 0 0
37008- 0 0 0 0 0 0 0 0 0 0 0 0
37009- 0 0 0 0 0 0 0 0 0 0 0 0
37010- 0 0 0 0 0 0 0 0 0 0 0 0
37011- 0 0 0 0 0 0 0 0 0 0 0 0
37012- 0 0 0 0 0 0 0 0 0 0 0 0
37013- 0 0 0 0 0 0 0 0 0 0 0 0
37014- 0 0 0 0 0 0 0 0 0 10 10 10
37015- 34 34 34 78 78 78 50 50 50 6 6 6
37016- 94 70 30 139 102 15 190 146 13 226 184 13
37017-232 200 30 232 195 16 215 174 15 190 146 13
37018-168 122 10 192 133 9 210 150 10 213 154 11
37019-202 150 34 182 157 106 101 98 89 2 2 6
37020- 2 2 6 78 78 78 116 116 116 58 58 58
37021- 2 2 6 22 22 22 90 90 90 46 46 46
37022- 18 18 18 6 6 6 0 0 0 0 0 0
37023- 0 0 0 0 0 0 0 0 0 0 0 0
37024- 0 0 0 0 0 0 0 0 0 0 0 0
37025- 0 0 0 0 0 0 0 0 0 0 0 0
37026- 0 0 0 0 0 0 0 0 0 0 0 0
37027- 0 0 0 0 0 0 0 0 0 0 0 0
37028- 0 0 0 0 0 0 0 0 0 0 0 0
37029- 0 0 0 0 0 0 0 0 0 0 0 0
37030- 0 0 0 0 0 0 0 0 0 0 0 0
37031- 0 0 0 0 0 0 0 0 0 0 0 0
37032- 0 0 0 0 0 0 0 0 0 0 0 0
37033- 0 0 0 0 0 0 0 0 0 0 0 0
37034- 0 0 0 0 0 0 0 0 0 10 10 10
37035- 38 38 38 86 86 86 50 50 50 6 6 6
37036-128 128 128 174 154 114 156 107 11 168 122 10
37037-198 155 10 184 144 12 197 138 11 200 144 11
37038-206 145 10 206 145 10 197 138 11 188 164 115
37039-195 195 195 198 198 198 174 174 174 14 14 14
37040- 2 2 6 22 22 22 116 116 116 116 116 116
37041- 22 22 22 2 2 6 74 74 74 70 70 70
37042- 30 30 30 10 10 10 0 0 0 0 0 0
37043- 0 0 0 0 0 0 0 0 0 0 0 0
37044- 0 0 0 0 0 0 0 0 0 0 0 0
37045- 0 0 0 0 0 0 0 0 0 0 0 0
37046- 0 0 0 0 0 0 0 0 0 0 0 0
37047- 0 0 0 0 0 0 0 0 0 0 0 0
37048- 0 0 0 0 0 0 0 0 0 0 0 0
37049- 0 0 0 0 0 0 0 0 0 0 0 0
37050- 0 0 0 0 0 0 0 0 0 0 0 0
37051- 0 0 0 0 0 0 0 0 0 0 0 0
37052- 0 0 0 0 0 0 0 0 0 0 0 0
37053- 0 0 0 0 0 0 0 0 0 0 0 0
37054- 0 0 0 0 0 0 6 6 6 18 18 18
37055- 50 50 50 101 101 101 26 26 26 10 10 10
37056-138 138 138 190 190 190 174 154 114 156 107 11
37057-197 138 11 200 144 11 197 138 11 192 133 9
37058-180 123 7 190 142 34 190 178 144 187 187 187
37059-202 202 202 221 221 221 214 214 214 66 66 66
37060- 2 2 6 2 2 6 50 50 50 62 62 62
37061- 6 6 6 2 2 6 10 10 10 90 90 90
37062- 50 50 50 18 18 18 6 6 6 0 0 0
37063- 0 0 0 0 0 0 0 0 0 0 0 0
37064- 0 0 0 0 0 0 0 0 0 0 0 0
37065- 0 0 0 0 0 0 0 0 0 0 0 0
37066- 0 0 0 0 0 0 0 0 0 0 0 0
37067- 0 0 0 0 0 0 0 0 0 0 0 0
37068- 0 0 0 0 0 0 0 0 0 0 0 0
37069- 0 0 0 0 0 0 0 0 0 0 0 0
37070- 0 0 0 0 0 0 0 0 0 0 0 0
37071- 0 0 0 0 0 0 0 0 0 0 0 0
37072- 0 0 0 0 0 0 0 0 0 0 0 0
37073- 0 0 0 0 0 0 0 0 0 0 0 0
37074- 0 0 0 0 0 0 10 10 10 34 34 34
37075- 74 74 74 74 74 74 2 2 6 6 6 6
37076-144 144 144 198 198 198 190 190 190 178 166 146
37077-154 121 60 156 107 11 156 107 11 168 124 44
37078-174 154 114 187 187 187 190 190 190 210 210 210
37079-246 246 246 253 253 253 253 253 253 182 182 182
37080- 6 6 6 2 2 6 2 2 6 2 2 6
37081- 2 2 6 2 2 6 2 2 6 62 62 62
37082- 74 74 74 34 34 34 14 14 14 0 0 0
37083- 0 0 0 0 0 0 0 0 0 0 0 0
37084- 0 0 0 0 0 0 0 0 0 0 0 0
37085- 0 0 0 0 0 0 0 0 0 0 0 0
37086- 0 0 0 0 0 0 0 0 0 0 0 0
37087- 0 0 0 0 0 0 0 0 0 0 0 0
37088- 0 0 0 0 0 0 0 0 0 0 0 0
37089- 0 0 0 0 0 0 0 0 0 0 0 0
37090- 0 0 0 0 0 0 0 0 0 0 0 0
37091- 0 0 0 0 0 0 0 0 0 0 0 0
37092- 0 0 0 0 0 0 0 0 0 0 0 0
37093- 0 0 0 0 0 0 0 0 0 0 0 0
37094- 0 0 0 10 10 10 22 22 22 54 54 54
37095- 94 94 94 18 18 18 2 2 6 46 46 46
37096-234 234 234 221 221 221 190 190 190 190 190 190
37097-190 190 190 187 187 187 187 187 187 190 190 190
37098-190 190 190 195 195 195 214 214 214 242 242 242
37099-253 253 253 253 253 253 253 253 253 253 253 253
37100- 82 82 82 2 2 6 2 2 6 2 2 6
37101- 2 2 6 2 2 6 2 2 6 14 14 14
37102- 86 86 86 54 54 54 22 22 22 6 6 6
37103- 0 0 0 0 0 0 0 0 0 0 0 0
37104- 0 0 0 0 0 0 0 0 0 0 0 0
37105- 0 0 0 0 0 0 0 0 0 0 0 0
37106- 0 0 0 0 0 0 0 0 0 0 0 0
37107- 0 0 0 0 0 0 0 0 0 0 0 0
37108- 0 0 0 0 0 0 0 0 0 0 0 0
37109- 0 0 0 0 0 0 0 0 0 0 0 0
37110- 0 0 0 0 0 0 0 0 0 0 0 0
37111- 0 0 0 0 0 0 0 0 0 0 0 0
37112- 0 0 0 0 0 0 0 0 0 0 0 0
37113- 0 0 0 0 0 0 0 0 0 0 0 0
37114- 6 6 6 18 18 18 46 46 46 90 90 90
37115- 46 46 46 18 18 18 6 6 6 182 182 182
37116-253 253 253 246 246 246 206 206 206 190 190 190
37117-190 190 190 190 190 190 190 190 190 190 190 190
37118-206 206 206 231 231 231 250 250 250 253 253 253
37119-253 253 253 253 253 253 253 253 253 253 253 253
37120-202 202 202 14 14 14 2 2 6 2 2 6
37121- 2 2 6 2 2 6 2 2 6 2 2 6
37122- 42 42 42 86 86 86 42 42 42 18 18 18
37123- 6 6 6 0 0 0 0 0 0 0 0 0
37124- 0 0 0 0 0 0 0 0 0 0 0 0
37125- 0 0 0 0 0 0 0 0 0 0 0 0
37126- 0 0 0 0 0 0 0 0 0 0 0 0
37127- 0 0 0 0 0 0 0 0 0 0 0 0
37128- 0 0 0 0 0 0 0 0 0 0 0 0
37129- 0 0 0 0 0 0 0 0 0 0 0 0
37130- 0 0 0 0 0 0 0 0 0 0 0 0
37131- 0 0 0 0 0 0 0 0 0 0 0 0
37132- 0 0 0 0 0 0 0 0 0 0 0 0
37133- 0 0 0 0 0 0 0 0 0 6 6 6
37134- 14 14 14 38 38 38 74 74 74 66 66 66
37135- 2 2 6 6 6 6 90 90 90 250 250 250
37136-253 253 253 253 253 253 238 238 238 198 198 198
37137-190 190 190 190 190 190 195 195 195 221 221 221
37138-246 246 246 253 253 253 253 253 253 253 253 253
37139-253 253 253 253 253 253 253 253 253 253 253 253
37140-253 253 253 82 82 82 2 2 6 2 2 6
37141- 2 2 6 2 2 6 2 2 6 2 2 6
37142- 2 2 6 78 78 78 70 70 70 34 34 34
37143- 14 14 14 6 6 6 0 0 0 0 0 0
37144- 0 0 0 0 0 0 0 0 0 0 0 0
37145- 0 0 0 0 0 0 0 0 0 0 0 0
37146- 0 0 0 0 0 0 0 0 0 0 0 0
37147- 0 0 0 0 0 0 0 0 0 0 0 0
37148- 0 0 0 0 0 0 0 0 0 0 0 0
37149- 0 0 0 0 0 0 0 0 0 0 0 0
37150- 0 0 0 0 0 0 0 0 0 0 0 0
37151- 0 0 0 0 0 0 0 0 0 0 0 0
37152- 0 0 0 0 0 0 0 0 0 0 0 0
37153- 0 0 0 0 0 0 0 0 0 14 14 14
37154- 34 34 34 66 66 66 78 78 78 6 6 6
37155- 2 2 6 18 18 18 218 218 218 253 253 253
37156-253 253 253 253 253 253 253 253 253 246 246 246
37157-226 226 226 231 231 231 246 246 246 253 253 253
37158-253 253 253 253 253 253 253 253 253 253 253 253
37159-253 253 253 253 253 253 253 253 253 253 253 253
37160-253 253 253 178 178 178 2 2 6 2 2 6
37161- 2 2 6 2 2 6 2 2 6 2 2 6
37162- 2 2 6 18 18 18 90 90 90 62 62 62
37163- 30 30 30 10 10 10 0 0 0 0 0 0
37164- 0 0 0 0 0 0 0 0 0 0 0 0
37165- 0 0 0 0 0 0 0 0 0 0 0 0
37166- 0 0 0 0 0 0 0 0 0 0 0 0
37167- 0 0 0 0 0 0 0 0 0 0 0 0
37168- 0 0 0 0 0 0 0 0 0 0 0 0
37169- 0 0 0 0 0 0 0 0 0 0 0 0
37170- 0 0 0 0 0 0 0 0 0 0 0 0
37171- 0 0 0 0 0 0 0 0 0 0 0 0
37172- 0 0 0 0 0 0 0 0 0 0 0 0
37173- 0 0 0 0 0 0 10 10 10 26 26 26
37174- 58 58 58 90 90 90 18 18 18 2 2 6
37175- 2 2 6 110 110 110 253 253 253 253 253 253
37176-253 253 253 253 253 253 253 253 253 253 253 253
37177-250 250 250 253 253 253 253 253 253 253 253 253
37178-253 253 253 253 253 253 253 253 253 253 253 253
37179-253 253 253 253 253 253 253 253 253 253 253 253
37180-253 253 253 231 231 231 18 18 18 2 2 6
37181- 2 2 6 2 2 6 2 2 6 2 2 6
37182- 2 2 6 2 2 6 18 18 18 94 94 94
37183- 54 54 54 26 26 26 10 10 10 0 0 0
37184- 0 0 0 0 0 0 0 0 0 0 0 0
37185- 0 0 0 0 0 0 0 0 0 0 0 0
37186- 0 0 0 0 0 0 0 0 0 0 0 0
37187- 0 0 0 0 0 0 0 0 0 0 0 0
37188- 0 0 0 0 0 0 0 0 0 0 0 0
37189- 0 0 0 0 0 0 0 0 0 0 0 0
37190- 0 0 0 0 0 0 0 0 0 0 0 0
37191- 0 0 0 0 0 0 0 0 0 0 0 0
37192- 0 0 0 0 0 0 0 0 0 0 0 0
37193- 0 0 0 6 6 6 22 22 22 50 50 50
37194- 90 90 90 26 26 26 2 2 6 2 2 6
37195- 14 14 14 195 195 195 250 250 250 253 253 253
37196-253 253 253 253 253 253 253 253 253 253 253 253
37197-253 253 253 253 253 253 253 253 253 253 253 253
37198-253 253 253 253 253 253 253 253 253 253 253 253
37199-253 253 253 253 253 253 253 253 253 253 253 253
37200-250 250 250 242 242 242 54 54 54 2 2 6
37201- 2 2 6 2 2 6 2 2 6 2 2 6
37202- 2 2 6 2 2 6 2 2 6 38 38 38
37203- 86 86 86 50 50 50 22 22 22 6 6 6
37204- 0 0 0 0 0 0 0 0 0 0 0 0
37205- 0 0 0 0 0 0 0 0 0 0 0 0
37206- 0 0 0 0 0 0 0 0 0 0 0 0
37207- 0 0 0 0 0 0 0 0 0 0 0 0
37208- 0 0 0 0 0 0 0 0 0 0 0 0
37209- 0 0 0 0 0 0 0 0 0 0 0 0
37210- 0 0 0 0 0 0 0 0 0 0 0 0
37211- 0 0 0 0 0 0 0 0 0 0 0 0
37212- 0 0 0 0 0 0 0 0 0 0 0 0
37213- 6 6 6 14 14 14 38 38 38 82 82 82
37214- 34 34 34 2 2 6 2 2 6 2 2 6
37215- 42 42 42 195 195 195 246 246 246 253 253 253
37216-253 253 253 253 253 253 253 253 253 250 250 250
37217-242 242 242 242 242 242 250 250 250 253 253 253
37218-253 253 253 253 253 253 253 253 253 253 253 253
37219-253 253 253 250 250 250 246 246 246 238 238 238
37220-226 226 226 231 231 231 101 101 101 6 6 6
37221- 2 2 6 2 2 6 2 2 6 2 2 6
37222- 2 2 6 2 2 6 2 2 6 2 2 6
37223- 38 38 38 82 82 82 42 42 42 14 14 14
37224- 6 6 6 0 0 0 0 0 0 0 0 0
37225- 0 0 0 0 0 0 0 0 0 0 0 0
37226- 0 0 0 0 0 0 0 0 0 0 0 0
37227- 0 0 0 0 0 0 0 0 0 0 0 0
37228- 0 0 0 0 0 0 0 0 0 0 0 0
37229- 0 0 0 0 0 0 0 0 0 0 0 0
37230- 0 0 0 0 0 0 0 0 0 0 0 0
37231- 0 0 0 0 0 0 0 0 0 0 0 0
37232- 0 0 0 0 0 0 0 0 0 0 0 0
37233- 10 10 10 26 26 26 62 62 62 66 66 66
37234- 2 2 6 2 2 6 2 2 6 6 6 6
37235- 70 70 70 170 170 170 206 206 206 234 234 234
37236-246 246 246 250 250 250 250 250 250 238 238 238
37237-226 226 226 231 231 231 238 238 238 250 250 250
37238-250 250 250 250 250 250 246 246 246 231 231 231
37239-214 214 214 206 206 206 202 202 202 202 202 202
37240-198 198 198 202 202 202 182 182 182 18 18 18
37241- 2 2 6 2 2 6 2 2 6 2 2 6
37242- 2 2 6 2 2 6 2 2 6 2 2 6
37243- 2 2 6 62 62 62 66 66 66 30 30 30
37244- 10 10 10 0 0 0 0 0 0 0 0 0
37245- 0 0 0 0 0 0 0 0 0 0 0 0
37246- 0 0 0 0 0 0 0 0 0 0 0 0
37247- 0 0 0 0 0 0 0 0 0 0 0 0
37248- 0 0 0 0 0 0 0 0 0 0 0 0
37249- 0 0 0 0 0 0 0 0 0 0 0 0
37250- 0 0 0 0 0 0 0 0 0 0 0 0
37251- 0 0 0 0 0 0 0 0 0 0 0 0
37252- 0 0 0 0 0 0 0 0 0 0 0 0
37253- 14 14 14 42 42 42 82 82 82 18 18 18
37254- 2 2 6 2 2 6 2 2 6 10 10 10
37255- 94 94 94 182 182 182 218 218 218 242 242 242
37256-250 250 250 253 253 253 253 253 253 250 250 250
37257-234 234 234 253 253 253 253 253 253 253 253 253
37258-253 253 253 253 253 253 253 253 253 246 246 246
37259-238 238 238 226 226 226 210 210 210 202 202 202
37260-195 195 195 195 195 195 210 210 210 158 158 158
37261- 6 6 6 14 14 14 50 50 50 14 14 14
37262- 2 2 6 2 2 6 2 2 6 2 2 6
37263- 2 2 6 6 6 6 86 86 86 46 46 46
37264- 18 18 18 6 6 6 0 0 0 0 0 0
37265- 0 0 0 0 0 0 0 0 0 0 0 0
37266- 0 0 0 0 0 0 0 0 0 0 0 0
37267- 0 0 0 0 0 0 0 0 0 0 0 0
37268- 0 0 0 0 0 0 0 0 0 0 0 0
37269- 0 0 0 0 0 0 0 0 0 0 0 0
37270- 0 0 0 0 0 0 0 0 0 0 0 0
37271- 0 0 0 0 0 0 0 0 0 0 0 0
37272- 0 0 0 0 0 0 0 0 0 6 6 6
37273- 22 22 22 54 54 54 70 70 70 2 2 6
37274- 2 2 6 10 10 10 2 2 6 22 22 22
37275-166 166 166 231 231 231 250 250 250 253 253 253
37276-253 253 253 253 253 253 253 253 253 250 250 250
37277-242 242 242 253 253 253 253 253 253 253 253 253
37278-253 253 253 253 253 253 253 253 253 253 253 253
37279-253 253 253 253 253 253 253 253 253 246 246 246
37280-231 231 231 206 206 206 198 198 198 226 226 226
37281- 94 94 94 2 2 6 6 6 6 38 38 38
37282- 30 30 30 2 2 6 2 2 6 2 2 6
37283- 2 2 6 2 2 6 62 62 62 66 66 66
37284- 26 26 26 10 10 10 0 0 0 0 0 0
37285- 0 0 0 0 0 0 0 0 0 0 0 0
37286- 0 0 0 0 0 0 0 0 0 0 0 0
37287- 0 0 0 0 0 0 0 0 0 0 0 0
37288- 0 0 0 0 0 0 0 0 0 0 0 0
37289- 0 0 0 0 0 0 0 0 0 0 0 0
37290- 0 0 0 0 0 0 0 0 0 0 0 0
37291- 0 0 0 0 0 0 0 0 0 0 0 0
37292- 0 0 0 0 0 0 0 0 0 10 10 10
37293- 30 30 30 74 74 74 50 50 50 2 2 6
37294- 26 26 26 26 26 26 2 2 6 106 106 106
37295-238 238 238 253 253 253 253 253 253 253 253 253
37296-253 253 253 253 253 253 253 253 253 253 253 253
37297-253 253 253 253 253 253 253 253 253 253 253 253
37298-253 253 253 253 253 253 253 253 253 253 253 253
37299-253 253 253 253 253 253 253 253 253 253 253 253
37300-253 253 253 246 246 246 218 218 218 202 202 202
37301-210 210 210 14 14 14 2 2 6 2 2 6
37302- 30 30 30 22 22 22 2 2 6 2 2 6
37303- 2 2 6 2 2 6 18 18 18 86 86 86
37304- 42 42 42 14 14 14 0 0 0 0 0 0
37305- 0 0 0 0 0 0 0 0 0 0 0 0
37306- 0 0 0 0 0 0 0 0 0 0 0 0
37307- 0 0 0 0 0 0 0 0 0 0 0 0
37308- 0 0 0 0 0 0 0 0 0 0 0 0
37309- 0 0 0 0 0 0 0 0 0 0 0 0
37310- 0 0 0 0 0 0 0 0 0 0 0 0
37311- 0 0 0 0 0 0 0 0 0 0 0 0
37312- 0 0 0 0 0 0 0 0 0 14 14 14
37313- 42 42 42 90 90 90 22 22 22 2 2 6
37314- 42 42 42 2 2 6 18 18 18 218 218 218
37315-253 253 253 253 253 253 253 253 253 253 253 253
37316-253 253 253 253 253 253 253 253 253 253 253 253
37317-253 253 253 253 253 253 253 253 253 253 253 253
37318-253 253 253 253 253 253 253 253 253 253 253 253
37319-253 253 253 253 253 253 253 253 253 253 253 253
37320-253 253 253 253 253 253 250 250 250 221 221 221
37321-218 218 218 101 101 101 2 2 6 14 14 14
37322- 18 18 18 38 38 38 10 10 10 2 2 6
37323- 2 2 6 2 2 6 2 2 6 78 78 78
37324- 58 58 58 22 22 22 6 6 6 0 0 0
37325- 0 0 0 0 0 0 0 0 0 0 0 0
37326- 0 0 0 0 0 0 0 0 0 0 0 0
37327- 0 0 0 0 0 0 0 0 0 0 0 0
37328- 0 0 0 0 0 0 0 0 0 0 0 0
37329- 0 0 0 0 0 0 0 0 0 0 0 0
37330- 0 0 0 0 0 0 0 0 0 0 0 0
37331- 0 0 0 0 0 0 0 0 0 0 0 0
37332- 0 0 0 0 0 0 6 6 6 18 18 18
37333- 54 54 54 82 82 82 2 2 6 26 26 26
37334- 22 22 22 2 2 6 123 123 123 253 253 253
37335-253 253 253 253 253 253 253 253 253 253 253 253
37336-253 253 253 253 253 253 253 253 253 253 253 253
37337-253 253 253 253 253 253 253 253 253 253 253 253
37338-253 253 253 253 253 253 253 253 253 253 253 253
37339-253 253 253 253 253 253 253 253 253 253 253 253
37340-253 253 253 253 253 253 253 253 253 250 250 250
37341-238 238 238 198 198 198 6 6 6 38 38 38
37342- 58 58 58 26 26 26 38 38 38 2 2 6
37343- 2 2 6 2 2 6 2 2 6 46 46 46
37344- 78 78 78 30 30 30 10 10 10 0 0 0
37345- 0 0 0 0 0 0 0 0 0 0 0 0
37346- 0 0 0 0 0 0 0 0 0 0 0 0
37347- 0 0 0 0 0 0 0 0 0 0 0 0
37348- 0 0 0 0 0 0 0 0 0 0 0 0
37349- 0 0 0 0 0 0 0 0 0 0 0 0
37350- 0 0 0 0 0 0 0 0 0 0 0 0
37351- 0 0 0 0 0 0 0 0 0 0 0 0
37352- 0 0 0 0 0 0 10 10 10 30 30 30
37353- 74 74 74 58 58 58 2 2 6 42 42 42
37354- 2 2 6 22 22 22 231 231 231 253 253 253
37355-253 253 253 253 253 253 253 253 253 253 253 253
37356-253 253 253 253 253 253 253 253 253 250 250 250
37357-253 253 253 253 253 253 253 253 253 253 253 253
37358-253 253 253 253 253 253 253 253 253 253 253 253
37359-253 253 253 253 253 253 253 253 253 253 253 253
37360-253 253 253 253 253 253 253 253 253 253 253 253
37361-253 253 253 246 246 246 46 46 46 38 38 38
37362- 42 42 42 14 14 14 38 38 38 14 14 14
37363- 2 2 6 2 2 6 2 2 6 6 6 6
37364- 86 86 86 46 46 46 14 14 14 0 0 0
37365- 0 0 0 0 0 0 0 0 0 0 0 0
37366- 0 0 0 0 0 0 0 0 0 0 0 0
37367- 0 0 0 0 0 0 0 0 0 0 0 0
37368- 0 0 0 0 0 0 0 0 0 0 0 0
37369- 0 0 0 0 0 0 0 0 0 0 0 0
37370- 0 0 0 0 0 0 0 0 0 0 0 0
37371- 0 0 0 0 0 0 0 0 0 0 0 0
37372- 0 0 0 6 6 6 14 14 14 42 42 42
37373- 90 90 90 18 18 18 18 18 18 26 26 26
37374- 2 2 6 116 116 116 253 253 253 253 253 253
37375-253 253 253 253 253 253 253 253 253 253 253 253
37376-253 253 253 253 253 253 250 250 250 238 238 238
37377-253 253 253 253 253 253 253 253 253 253 253 253
37378-253 253 253 253 253 253 253 253 253 253 253 253
37379-253 253 253 253 253 253 253 253 253 253 253 253
37380-253 253 253 253 253 253 253 253 253 253 253 253
37381-253 253 253 253 253 253 94 94 94 6 6 6
37382- 2 2 6 2 2 6 10 10 10 34 34 34
37383- 2 2 6 2 2 6 2 2 6 2 2 6
37384- 74 74 74 58 58 58 22 22 22 6 6 6
37385- 0 0 0 0 0 0 0 0 0 0 0 0
37386- 0 0 0 0 0 0 0 0 0 0 0 0
37387- 0 0 0 0 0 0 0 0 0 0 0 0
37388- 0 0 0 0 0 0 0 0 0 0 0 0
37389- 0 0 0 0 0 0 0 0 0 0 0 0
37390- 0 0 0 0 0 0 0 0 0 0 0 0
37391- 0 0 0 0 0 0 0 0 0 0 0 0
37392- 0 0 0 10 10 10 26 26 26 66 66 66
37393- 82 82 82 2 2 6 38 38 38 6 6 6
37394- 14 14 14 210 210 210 253 253 253 253 253 253
37395-253 253 253 253 253 253 253 253 253 253 253 253
37396-253 253 253 253 253 253 246 246 246 242 242 242
37397-253 253 253 253 253 253 253 253 253 253 253 253
37398-253 253 253 253 253 253 253 253 253 253 253 253
37399-253 253 253 253 253 253 253 253 253 253 253 253
37400-253 253 253 253 253 253 253 253 253 253 253 253
37401-253 253 253 253 253 253 144 144 144 2 2 6
37402- 2 2 6 2 2 6 2 2 6 46 46 46
37403- 2 2 6 2 2 6 2 2 6 2 2 6
37404- 42 42 42 74 74 74 30 30 30 10 10 10
37405- 0 0 0 0 0 0 0 0 0 0 0 0
37406- 0 0 0 0 0 0 0 0 0 0 0 0
37407- 0 0 0 0 0 0 0 0 0 0 0 0
37408- 0 0 0 0 0 0 0 0 0 0 0 0
37409- 0 0 0 0 0 0 0 0 0 0 0 0
37410- 0 0 0 0 0 0 0 0 0 0 0 0
37411- 0 0 0 0 0 0 0 0 0 0 0 0
37412- 6 6 6 14 14 14 42 42 42 90 90 90
37413- 26 26 26 6 6 6 42 42 42 2 2 6
37414- 74 74 74 250 250 250 253 253 253 253 253 253
37415-253 253 253 253 253 253 253 253 253 253 253 253
37416-253 253 253 253 253 253 242 242 242 242 242 242
37417-253 253 253 253 253 253 253 253 253 253 253 253
37418-253 253 253 253 253 253 253 253 253 253 253 253
37419-253 253 253 253 253 253 253 253 253 253 253 253
37420-253 253 253 253 253 253 253 253 253 253 253 253
37421-253 253 253 253 253 253 182 182 182 2 2 6
37422- 2 2 6 2 2 6 2 2 6 46 46 46
37423- 2 2 6 2 2 6 2 2 6 2 2 6
37424- 10 10 10 86 86 86 38 38 38 10 10 10
37425- 0 0 0 0 0 0 0 0 0 0 0 0
37426- 0 0 0 0 0 0 0 0 0 0 0 0
37427- 0 0 0 0 0 0 0 0 0 0 0 0
37428- 0 0 0 0 0 0 0 0 0 0 0 0
37429- 0 0 0 0 0 0 0 0 0 0 0 0
37430- 0 0 0 0 0 0 0 0 0 0 0 0
37431- 0 0 0 0 0 0 0 0 0 0 0 0
37432- 10 10 10 26 26 26 66 66 66 82 82 82
37433- 2 2 6 22 22 22 18 18 18 2 2 6
37434-149 149 149 253 253 253 253 253 253 253 253 253
37435-253 253 253 253 253 253 253 253 253 253 253 253
37436-253 253 253 253 253 253 234 234 234 242 242 242
37437-253 253 253 253 253 253 253 253 253 253 253 253
37438-253 253 253 253 253 253 253 253 253 253 253 253
37439-253 253 253 253 253 253 253 253 253 253 253 253
37440-253 253 253 253 253 253 253 253 253 253 253 253
37441-253 253 253 253 253 253 206 206 206 2 2 6
37442- 2 2 6 2 2 6 2 2 6 38 38 38
37443- 2 2 6 2 2 6 2 2 6 2 2 6
37444- 6 6 6 86 86 86 46 46 46 14 14 14
37445- 0 0 0 0 0 0 0 0 0 0 0 0
37446- 0 0 0 0 0 0 0 0 0 0 0 0
37447- 0 0 0 0 0 0 0 0 0 0 0 0
37448- 0 0 0 0 0 0 0 0 0 0 0 0
37449- 0 0 0 0 0 0 0 0 0 0 0 0
37450- 0 0 0 0 0 0 0 0 0 0 0 0
37451- 0 0 0 0 0 0 0 0 0 6 6 6
37452- 18 18 18 46 46 46 86 86 86 18 18 18
37453- 2 2 6 34 34 34 10 10 10 6 6 6
37454-210 210 210 253 253 253 253 253 253 253 253 253
37455-253 253 253 253 253 253 253 253 253 253 253 253
37456-253 253 253 253 253 253 234 234 234 242 242 242
37457-253 253 253 253 253 253 253 253 253 253 253 253
37458-253 253 253 253 253 253 253 253 253 253 253 253
37459-253 253 253 253 253 253 253 253 253 253 253 253
37460-253 253 253 253 253 253 253 253 253 253 253 253
37461-253 253 253 253 253 253 221 221 221 6 6 6
37462- 2 2 6 2 2 6 6 6 6 30 30 30
37463- 2 2 6 2 2 6 2 2 6 2 2 6
37464- 2 2 6 82 82 82 54 54 54 18 18 18
37465- 6 6 6 0 0 0 0 0 0 0 0 0
37466- 0 0 0 0 0 0 0 0 0 0 0 0
37467- 0 0 0 0 0 0 0 0 0 0 0 0
37468- 0 0 0 0 0 0 0 0 0 0 0 0
37469- 0 0 0 0 0 0 0 0 0 0 0 0
37470- 0 0 0 0 0 0 0 0 0 0 0 0
37471- 0 0 0 0 0 0 0 0 0 10 10 10
37472- 26 26 26 66 66 66 62 62 62 2 2 6
37473- 2 2 6 38 38 38 10 10 10 26 26 26
37474-238 238 238 253 253 253 253 253 253 253 253 253
37475-253 253 253 253 253 253 253 253 253 253 253 253
37476-253 253 253 253 253 253 231 231 231 238 238 238
37477-253 253 253 253 253 253 253 253 253 253 253 253
37478-253 253 253 253 253 253 253 253 253 253 253 253
37479-253 253 253 253 253 253 253 253 253 253 253 253
37480-253 253 253 253 253 253 253 253 253 253 253 253
37481-253 253 253 253 253 253 231 231 231 6 6 6
37482- 2 2 6 2 2 6 10 10 10 30 30 30
37483- 2 2 6 2 2 6 2 2 6 2 2 6
37484- 2 2 6 66 66 66 58 58 58 22 22 22
37485- 6 6 6 0 0 0 0 0 0 0 0 0
37486- 0 0 0 0 0 0 0 0 0 0 0 0
37487- 0 0 0 0 0 0 0 0 0 0 0 0
37488- 0 0 0 0 0 0 0 0 0 0 0 0
37489- 0 0 0 0 0 0 0 0 0 0 0 0
37490- 0 0 0 0 0 0 0 0 0 0 0 0
37491- 0 0 0 0 0 0 0 0 0 10 10 10
37492- 38 38 38 78 78 78 6 6 6 2 2 6
37493- 2 2 6 46 46 46 14 14 14 42 42 42
37494-246 246 246 253 253 253 253 253 253 253 253 253
37495-253 253 253 253 253 253 253 253 253 253 253 253
37496-253 253 253 253 253 253 231 231 231 242 242 242
37497-253 253 253 253 253 253 253 253 253 253 253 253
37498-253 253 253 253 253 253 253 253 253 253 253 253
37499-253 253 253 253 253 253 253 253 253 253 253 253
37500-253 253 253 253 253 253 253 253 253 253 253 253
37501-253 253 253 253 253 253 234 234 234 10 10 10
37502- 2 2 6 2 2 6 22 22 22 14 14 14
37503- 2 2 6 2 2 6 2 2 6 2 2 6
37504- 2 2 6 66 66 66 62 62 62 22 22 22
37505- 6 6 6 0 0 0 0 0 0 0 0 0
37506- 0 0 0 0 0 0 0 0 0 0 0 0
37507- 0 0 0 0 0 0 0 0 0 0 0 0
37508- 0 0 0 0 0 0 0 0 0 0 0 0
37509- 0 0 0 0 0 0 0 0 0 0 0 0
37510- 0 0 0 0 0 0 0 0 0 0 0 0
37511- 0 0 0 0 0 0 6 6 6 18 18 18
37512- 50 50 50 74 74 74 2 2 6 2 2 6
37513- 14 14 14 70 70 70 34 34 34 62 62 62
37514-250 250 250 253 253 253 253 253 253 253 253 253
37515-253 253 253 253 253 253 253 253 253 253 253 253
37516-253 253 253 253 253 253 231 231 231 246 246 246
37517-253 253 253 253 253 253 253 253 253 253 253 253
37518-253 253 253 253 253 253 253 253 253 253 253 253
37519-253 253 253 253 253 253 253 253 253 253 253 253
37520-253 253 253 253 253 253 253 253 253 253 253 253
37521-253 253 253 253 253 253 234 234 234 14 14 14
37522- 2 2 6 2 2 6 30 30 30 2 2 6
37523- 2 2 6 2 2 6 2 2 6 2 2 6
37524- 2 2 6 66 66 66 62 62 62 22 22 22
37525- 6 6 6 0 0 0 0 0 0 0 0 0
37526- 0 0 0 0 0 0 0 0 0 0 0 0
37527- 0 0 0 0 0 0 0 0 0 0 0 0
37528- 0 0 0 0 0 0 0 0 0 0 0 0
37529- 0 0 0 0 0 0 0 0 0 0 0 0
37530- 0 0 0 0 0 0 0 0 0 0 0 0
37531- 0 0 0 0 0 0 6 6 6 18 18 18
37532- 54 54 54 62 62 62 2 2 6 2 2 6
37533- 2 2 6 30 30 30 46 46 46 70 70 70
37534-250 250 250 253 253 253 253 253 253 253 253 253
37535-253 253 253 253 253 253 253 253 253 253 253 253
37536-253 253 253 253 253 253 231 231 231 246 246 246
37537-253 253 253 253 253 253 253 253 253 253 253 253
37538-253 253 253 253 253 253 253 253 253 253 253 253
37539-253 253 253 253 253 253 253 253 253 253 253 253
37540-253 253 253 253 253 253 253 253 253 253 253 253
37541-253 253 253 253 253 253 226 226 226 10 10 10
37542- 2 2 6 6 6 6 30 30 30 2 2 6
37543- 2 2 6 2 2 6 2 2 6 2 2 6
37544- 2 2 6 66 66 66 58 58 58 22 22 22
37545- 6 6 6 0 0 0 0 0 0 0 0 0
37546- 0 0 0 0 0 0 0 0 0 0 0 0
37547- 0 0 0 0 0 0 0 0 0 0 0 0
37548- 0 0 0 0 0 0 0 0 0 0 0 0
37549- 0 0 0 0 0 0 0 0 0 0 0 0
37550- 0 0 0 0 0 0 0 0 0 0 0 0
37551- 0 0 0 0 0 0 6 6 6 22 22 22
37552- 58 58 58 62 62 62 2 2 6 2 2 6
37553- 2 2 6 2 2 6 30 30 30 78 78 78
37554-250 250 250 253 253 253 253 253 253 253 253 253
37555-253 253 253 253 253 253 253 253 253 253 253 253
37556-253 253 253 253 253 253 231 231 231 246 246 246
37557-253 253 253 253 253 253 253 253 253 253 253 253
37558-253 253 253 253 253 253 253 253 253 253 253 253
37559-253 253 253 253 253 253 253 253 253 253 253 253
37560-253 253 253 253 253 253 253 253 253 253 253 253
37561-253 253 253 253 253 253 206 206 206 2 2 6
37562- 22 22 22 34 34 34 18 14 6 22 22 22
37563- 26 26 26 18 18 18 6 6 6 2 2 6
37564- 2 2 6 82 82 82 54 54 54 18 18 18
37565- 6 6 6 0 0 0 0 0 0 0 0 0
37566- 0 0 0 0 0 0 0 0 0 0 0 0
37567- 0 0 0 0 0 0 0 0 0 0 0 0
37568- 0 0 0 0 0 0 0 0 0 0 0 0
37569- 0 0 0 0 0 0 0 0 0 0 0 0
37570- 0 0 0 0 0 0 0 0 0 0 0 0
37571- 0 0 0 0 0 0 6 6 6 26 26 26
37572- 62 62 62 106 106 106 74 54 14 185 133 11
37573-210 162 10 121 92 8 6 6 6 62 62 62
37574-238 238 238 253 253 253 253 253 253 253 253 253
37575-253 253 253 253 253 253 253 253 253 253 253 253
37576-253 253 253 253 253 253 231 231 231 246 246 246
37577-253 253 253 253 253 253 253 253 253 253 253 253
37578-253 253 253 253 253 253 253 253 253 253 253 253
37579-253 253 253 253 253 253 253 253 253 253 253 253
37580-253 253 253 253 253 253 253 253 253 253 253 253
37581-253 253 253 253 253 253 158 158 158 18 18 18
37582- 14 14 14 2 2 6 2 2 6 2 2 6
37583- 6 6 6 18 18 18 66 66 66 38 38 38
37584- 6 6 6 94 94 94 50 50 50 18 18 18
37585- 6 6 6 0 0 0 0 0 0 0 0 0
37586- 0 0 0 0 0 0 0 0 0 0 0 0
37587- 0 0 0 0 0 0 0 0 0 0 0 0
37588- 0 0 0 0 0 0 0 0 0 0 0 0
37589- 0 0 0 0 0 0 0 0 0 0 0 0
37590- 0 0 0 0 0 0 0 0 0 6 6 6
37591- 10 10 10 10 10 10 18 18 18 38 38 38
37592- 78 78 78 142 134 106 216 158 10 242 186 14
37593-246 190 14 246 190 14 156 118 10 10 10 10
37594- 90 90 90 238 238 238 253 253 253 253 253 253
37595-253 253 253 253 253 253 253 253 253 253 253 253
37596-253 253 253 253 253 253 231 231 231 250 250 250
37597-253 253 253 253 253 253 253 253 253 253 253 253
37598-253 253 253 253 253 253 253 253 253 253 253 253
37599-253 253 253 253 253 253 253 253 253 253 253 253
37600-253 253 253 253 253 253 253 253 253 246 230 190
37601-238 204 91 238 204 91 181 142 44 37 26 9
37602- 2 2 6 2 2 6 2 2 6 2 2 6
37603- 2 2 6 2 2 6 38 38 38 46 46 46
37604- 26 26 26 106 106 106 54 54 54 18 18 18
37605- 6 6 6 0 0 0 0 0 0 0 0 0
37606- 0 0 0 0 0 0 0 0 0 0 0 0
37607- 0 0 0 0 0 0 0 0 0 0 0 0
37608- 0 0 0 0 0 0 0 0 0 0 0 0
37609- 0 0 0 0 0 0 0 0 0 0 0 0
37610- 0 0 0 6 6 6 14 14 14 22 22 22
37611- 30 30 30 38 38 38 50 50 50 70 70 70
37612-106 106 106 190 142 34 226 170 11 242 186 14
37613-246 190 14 246 190 14 246 190 14 154 114 10
37614- 6 6 6 74 74 74 226 226 226 253 253 253
37615-253 253 253 253 253 253 253 253 253 253 253 253
37616-253 253 253 253 253 253 231 231 231 250 250 250
37617-253 253 253 253 253 253 253 253 253 253 253 253
37618-253 253 253 253 253 253 253 253 253 253 253 253
37619-253 253 253 253 253 253 253 253 253 253 253 253
37620-253 253 253 253 253 253 253 253 253 228 184 62
37621-241 196 14 241 208 19 232 195 16 38 30 10
37622- 2 2 6 2 2 6 2 2 6 2 2 6
37623- 2 2 6 6 6 6 30 30 30 26 26 26
37624-203 166 17 154 142 90 66 66 66 26 26 26
37625- 6 6 6 0 0 0 0 0 0 0 0 0
37626- 0 0 0 0 0 0 0 0 0 0 0 0
37627- 0 0 0 0 0 0 0 0 0 0 0 0
37628- 0 0 0 0 0 0 0 0 0 0 0 0
37629- 0 0 0 0 0 0 0 0 0 0 0 0
37630- 6 6 6 18 18 18 38 38 38 58 58 58
37631- 78 78 78 86 86 86 101 101 101 123 123 123
37632-175 146 61 210 150 10 234 174 13 246 186 14
37633-246 190 14 246 190 14 246 190 14 238 190 10
37634-102 78 10 2 2 6 46 46 46 198 198 198
37635-253 253 253 253 253 253 253 253 253 253 253 253
37636-253 253 253 253 253 253 234 234 234 242 242 242
37637-253 253 253 253 253 253 253 253 253 253 253 253
37638-253 253 253 253 253 253 253 253 253 253 253 253
37639-253 253 253 253 253 253 253 253 253 253 253 253
37640-253 253 253 253 253 253 253 253 253 224 178 62
37641-242 186 14 241 196 14 210 166 10 22 18 6
37642- 2 2 6 2 2 6 2 2 6 2 2 6
37643- 2 2 6 2 2 6 6 6 6 121 92 8
37644-238 202 15 232 195 16 82 82 82 34 34 34
37645- 10 10 10 0 0 0 0 0 0 0 0 0
37646- 0 0 0 0 0 0 0 0 0 0 0 0
37647- 0 0 0 0 0 0 0 0 0 0 0 0
37648- 0 0 0 0 0 0 0 0 0 0 0 0
37649- 0 0 0 0 0 0 0 0 0 0 0 0
37650- 14 14 14 38 38 38 70 70 70 154 122 46
37651-190 142 34 200 144 11 197 138 11 197 138 11
37652-213 154 11 226 170 11 242 186 14 246 190 14
37653-246 190 14 246 190 14 246 190 14 246 190 14
37654-225 175 15 46 32 6 2 2 6 22 22 22
37655-158 158 158 250 250 250 253 253 253 253 253 253
37656-253 253 253 253 253 253 253 253 253 253 253 253
37657-253 253 253 253 253 253 253 253 253 253 253 253
37658-253 253 253 253 253 253 253 253 253 253 253 253
37659-253 253 253 253 253 253 253 253 253 253 253 253
37660-253 253 253 250 250 250 242 242 242 224 178 62
37661-239 182 13 236 186 11 213 154 11 46 32 6
37662- 2 2 6 2 2 6 2 2 6 2 2 6
37663- 2 2 6 2 2 6 61 42 6 225 175 15
37664-238 190 10 236 186 11 112 100 78 42 42 42
37665- 14 14 14 0 0 0 0 0 0 0 0 0
37666- 0 0 0 0 0 0 0 0 0 0 0 0
37667- 0 0 0 0 0 0 0 0 0 0 0 0
37668- 0 0 0 0 0 0 0 0 0 0 0 0
37669- 0 0 0 0 0 0 0 0 0 6 6 6
37670- 22 22 22 54 54 54 154 122 46 213 154 11
37671-226 170 11 230 174 11 226 170 11 226 170 11
37672-236 178 12 242 186 14 246 190 14 246 190 14
37673-246 190 14 246 190 14 246 190 14 246 190 14
37674-241 196 14 184 144 12 10 10 10 2 2 6
37675- 6 6 6 116 116 116 242 242 242 253 253 253
37676-253 253 253 253 253 253 253 253 253 253 253 253
37677-253 253 253 253 253 253 253 253 253 253 253 253
37678-253 253 253 253 253 253 253 253 253 253 253 253
37679-253 253 253 253 253 253 253 253 253 253 253 253
37680-253 253 253 231 231 231 198 198 198 214 170 54
37681-236 178 12 236 178 12 210 150 10 137 92 6
37682- 18 14 6 2 2 6 2 2 6 2 2 6
37683- 6 6 6 70 47 6 200 144 11 236 178 12
37684-239 182 13 239 182 13 124 112 88 58 58 58
37685- 22 22 22 6 6 6 0 0 0 0 0 0
37686- 0 0 0 0 0 0 0 0 0 0 0 0
37687- 0 0 0 0 0 0 0 0 0 0 0 0
37688- 0 0 0 0 0 0 0 0 0 0 0 0
37689- 0 0 0 0 0 0 0 0 0 10 10 10
37690- 30 30 30 70 70 70 180 133 36 226 170 11
37691-239 182 13 242 186 14 242 186 14 246 186 14
37692-246 190 14 246 190 14 246 190 14 246 190 14
37693-246 190 14 246 190 14 246 190 14 246 190 14
37694-246 190 14 232 195 16 98 70 6 2 2 6
37695- 2 2 6 2 2 6 66 66 66 221 221 221
37696-253 253 253 253 253 253 253 253 253 253 253 253
37697-253 253 253 253 253 253 253 253 253 253 253 253
37698-253 253 253 253 253 253 253 253 253 253 253 253
37699-253 253 253 253 253 253 253 253 253 253 253 253
37700-253 253 253 206 206 206 198 198 198 214 166 58
37701-230 174 11 230 174 11 216 158 10 192 133 9
37702-163 110 8 116 81 8 102 78 10 116 81 8
37703-167 114 7 197 138 11 226 170 11 239 182 13
37704-242 186 14 242 186 14 162 146 94 78 78 78
37705- 34 34 34 14 14 14 6 6 6 0 0 0
37706- 0 0 0 0 0 0 0 0 0 0 0 0
37707- 0 0 0 0 0 0 0 0 0 0 0 0
37708- 0 0 0 0 0 0 0 0 0 0 0 0
37709- 0 0 0 0 0 0 0 0 0 6 6 6
37710- 30 30 30 78 78 78 190 142 34 226 170 11
37711-239 182 13 246 190 14 246 190 14 246 190 14
37712-246 190 14 246 190 14 246 190 14 246 190 14
37713-246 190 14 246 190 14 246 190 14 246 190 14
37714-246 190 14 241 196 14 203 166 17 22 18 6
37715- 2 2 6 2 2 6 2 2 6 38 38 38
37716-218 218 218 253 253 253 253 253 253 253 253 253
37717-253 253 253 253 253 253 253 253 253 253 253 253
37718-253 253 253 253 253 253 253 253 253 253 253 253
37719-253 253 253 253 253 253 253 253 253 253 253 253
37720-250 250 250 206 206 206 198 198 198 202 162 69
37721-226 170 11 236 178 12 224 166 10 210 150 10
37722-200 144 11 197 138 11 192 133 9 197 138 11
37723-210 150 10 226 170 11 242 186 14 246 190 14
37724-246 190 14 246 186 14 225 175 15 124 112 88
37725- 62 62 62 30 30 30 14 14 14 6 6 6
37726- 0 0 0 0 0 0 0 0 0 0 0 0
37727- 0 0 0 0 0 0 0 0 0 0 0 0
37728- 0 0 0 0 0 0 0 0 0 0 0 0
37729- 0 0 0 0 0 0 0 0 0 10 10 10
37730- 30 30 30 78 78 78 174 135 50 224 166 10
37731-239 182 13 246 190 14 246 190 14 246 190 14
37732-246 190 14 246 190 14 246 190 14 246 190 14
37733-246 190 14 246 190 14 246 190 14 246 190 14
37734-246 190 14 246 190 14 241 196 14 139 102 15
37735- 2 2 6 2 2 6 2 2 6 2 2 6
37736- 78 78 78 250 250 250 253 253 253 253 253 253
37737-253 253 253 253 253 253 253 253 253 253 253 253
37738-253 253 253 253 253 253 253 253 253 253 253 253
37739-253 253 253 253 253 253 253 253 253 253 253 253
37740-250 250 250 214 214 214 198 198 198 190 150 46
37741-219 162 10 236 178 12 234 174 13 224 166 10
37742-216 158 10 213 154 11 213 154 11 216 158 10
37743-226 170 11 239 182 13 246 190 14 246 190 14
37744-246 190 14 246 190 14 242 186 14 206 162 42
37745-101 101 101 58 58 58 30 30 30 14 14 14
37746- 6 6 6 0 0 0 0 0 0 0 0 0
37747- 0 0 0 0 0 0 0 0 0 0 0 0
37748- 0 0 0 0 0 0 0 0 0 0 0 0
37749- 0 0 0 0 0 0 0 0 0 10 10 10
37750- 30 30 30 74 74 74 174 135 50 216 158 10
37751-236 178 12 246 190 14 246 190 14 246 190 14
37752-246 190 14 246 190 14 246 190 14 246 190 14
37753-246 190 14 246 190 14 246 190 14 246 190 14
37754-246 190 14 246 190 14 241 196 14 226 184 13
37755- 61 42 6 2 2 6 2 2 6 2 2 6
37756- 22 22 22 238 238 238 253 253 253 253 253 253
37757-253 253 253 253 253 253 253 253 253 253 253 253
37758-253 253 253 253 253 253 253 253 253 253 253 253
37759-253 253 253 253 253 253 253 253 253 253 253 253
37760-253 253 253 226 226 226 187 187 187 180 133 36
37761-216 158 10 236 178 12 239 182 13 236 178 12
37762-230 174 11 226 170 11 226 170 11 230 174 11
37763-236 178 12 242 186 14 246 190 14 246 190 14
37764-246 190 14 246 190 14 246 186 14 239 182 13
37765-206 162 42 106 106 106 66 66 66 34 34 34
37766- 14 14 14 6 6 6 0 0 0 0 0 0
37767- 0 0 0 0 0 0 0 0 0 0 0 0
37768- 0 0 0 0 0 0 0 0 0 0 0 0
37769- 0 0 0 0 0 0 0 0 0 6 6 6
37770- 26 26 26 70 70 70 163 133 67 213 154 11
37771-236 178 12 246 190 14 246 190 14 246 190 14
37772-246 190 14 246 190 14 246 190 14 246 190 14
37773-246 190 14 246 190 14 246 190 14 246 190 14
37774-246 190 14 246 190 14 246 190 14 241 196 14
37775-190 146 13 18 14 6 2 2 6 2 2 6
37776- 46 46 46 246 246 246 253 253 253 253 253 253
37777-253 253 253 253 253 253 253 253 253 253 253 253
37778-253 253 253 253 253 253 253 253 253 253 253 253
37779-253 253 253 253 253 253 253 253 253 253 253 253
37780-253 253 253 221 221 221 86 86 86 156 107 11
37781-216 158 10 236 178 12 242 186 14 246 186 14
37782-242 186 14 239 182 13 239 182 13 242 186 14
37783-242 186 14 246 186 14 246 190 14 246 190 14
37784-246 190 14 246 190 14 246 190 14 246 190 14
37785-242 186 14 225 175 15 142 122 72 66 66 66
37786- 30 30 30 10 10 10 0 0 0 0 0 0
37787- 0 0 0 0 0 0 0 0 0 0 0 0
37788- 0 0 0 0 0 0 0 0 0 0 0 0
37789- 0 0 0 0 0 0 0 0 0 6 6 6
37790- 26 26 26 70 70 70 163 133 67 210 150 10
37791-236 178 12 246 190 14 246 190 14 246 190 14
37792-246 190 14 246 190 14 246 190 14 246 190 14
37793-246 190 14 246 190 14 246 190 14 246 190 14
37794-246 190 14 246 190 14 246 190 14 246 190 14
37795-232 195 16 121 92 8 34 34 34 106 106 106
37796-221 221 221 253 253 253 253 253 253 253 253 253
37797-253 253 253 253 253 253 253 253 253 253 253 253
37798-253 253 253 253 253 253 253 253 253 253 253 253
37799-253 253 253 253 253 253 253 253 253 253 253 253
37800-242 242 242 82 82 82 18 14 6 163 110 8
37801-216 158 10 236 178 12 242 186 14 246 190 14
37802-246 190 14 246 190 14 246 190 14 246 190 14
37803-246 190 14 246 190 14 246 190 14 246 190 14
37804-246 190 14 246 190 14 246 190 14 246 190 14
37805-246 190 14 246 190 14 242 186 14 163 133 67
37806- 46 46 46 18 18 18 6 6 6 0 0 0
37807- 0 0 0 0 0 0 0 0 0 0 0 0
37808- 0 0 0 0 0 0 0 0 0 0 0 0
37809- 0 0 0 0 0 0 0 0 0 10 10 10
37810- 30 30 30 78 78 78 163 133 67 210 150 10
37811-236 178 12 246 186 14 246 190 14 246 190 14
37812-246 190 14 246 190 14 246 190 14 246 190 14
37813-246 190 14 246 190 14 246 190 14 246 190 14
37814-246 190 14 246 190 14 246 190 14 246 190 14
37815-241 196 14 215 174 15 190 178 144 253 253 253
37816-253 253 253 253 253 253 253 253 253 253 253 253
37817-253 253 253 253 253 253 253 253 253 253 253 253
37818-253 253 253 253 253 253 253 253 253 253 253 253
37819-253 253 253 253 253 253 253 253 253 218 218 218
37820- 58 58 58 2 2 6 22 18 6 167 114 7
37821-216 158 10 236 178 12 246 186 14 246 190 14
37822-246 190 14 246 190 14 246 190 14 246 190 14
37823-246 190 14 246 190 14 246 190 14 246 190 14
37824-246 190 14 246 190 14 246 190 14 246 190 14
37825-246 190 14 246 186 14 242 186 14 190 150 46
37826- 54 54 54 22 22 22 6 6 6 0 0 0
37827- 0 0 0 0 0 0 0 0 0 0 0 0
37828- 0 0 0 0 0 0 0 0 0 0 0 0
37829- 0 0 0 0 0 0 0 0 0 14 14 14
37830- 38 38 38 86 86 86 180 133 36 213 154 11
37831-236 178 12 246 186 14 246 190 14 246 190 14
37832-246 190 14 246 190 14 246 190 14 246 190 14
37833-246 190 14 246 190 14 246 190 14 246 190 14
37834-246 190 14 246 190 14 246 190 14 246 190 14
37835-246 190 14 232 195 16 190 146 13 214 214 214
37836-253 253 253 253 253 253 253 253 253 253 253 253
37837-253 253 253 253 253 253 253 253 253 253 253 253
37838-253 253 253 253 253 253 253 253 253 253 253 253
37839-253 253 253 250 250 250 170 170 170 26 26 26
37840- 2 2 6 2 2 6 37 26 9 163 110 8
37841-219 162 10 239 182 13 246 186 14 246 190 14
37842-246 190 14 246 190 14 246 190 14 246 190 14
37843-246 190 14 246 190 14 246 190 14 246 190 14
37844-246 190 14 246 190 14 246 190 14 246 190 14
37845-246 186 14 236 178 12 224 166 10 142 122 72
37846- 46 46 46 18 18 18 6 6 6 0 0 0
37847- 0 0 0 0 0 0 0 0 0 0 0 0
37848- 0 0 0 0 0 0 0 0 0 0 0 0
37849- 0 0 0 0 0 0 6 6 6 18 18 18
37850- 50 50 50 109 106 95 192 133 9 224 166 10
37851-242 186 14 246 190 14 246 190 14 246 190 14
37852-246 190 14 246 190 14 246 190 14 246 190 14
37853-246 190 14 246 190 14 246 190 14 246 190 14
37854-246 190 14 246 190 14 246 190 14 246 190 14
37855-242 186 14 226 184 13 210 162 10 142 110 46
37856-226 226 226 253 253 253 253 253 253 253 253 253
37857-253 253 253 253 253 253 253 253 253 253 253 253
37858-253 253 253 253 253 253 253 253 253 253 253 253
37859-198 198 198 66 66 66 2 2 6 2 2 6
37860- 2 2 6 2 2 6 50 34 6 156 107 11
37861-219 162 10 239 182 13 246 186 14 246 190 14
37862-246 190 14 246 190 14 246 190 14 246 190 14
37863-246 190 14 246 190 14 246 190 14 246 190 14
37864-246 190 14 246 190 14 246 190 14 242 186 14
37865-234 174 13 213 154 11 154 122 46 66 66 66
37866- 30 30 30 10 10 10 0 0 0 0 0 0
37867- 0 0 0 0 0 0 0 0 0 0 0 0
37868- 0 0 0 0 0 0 0 0 0 0 0 0
37869- 0 0 0 0 0 0 6 6 6 22 22 22
37870- 58 58 58 154 121 60 206 145 10 234 174 13
37871-242 186 14 246 186 14 246 190 14 246 190 14
37872-246 190 14 246 190 14 246 190 14 246 190 14
37873-246 190 14 246 190 14 246 190 14 246 190 14
37874-246 190 14 246 190 14 246 190 14 246 190 14
37875-246 186 14 236 178 12 210 162 10 163 110 8
37876- 61 42 6 138 138 138 218 218 218 250 250 250
37877-253 253 253 253 253 253 253 253 253 250 250 250
37878-242 242 242 210 210 210 144 144 144 66 66 66
37879- 6 6 6 2 2 6 2 2 6 2 2 6
37880- 2 2 6 2 2 6 61 42 6 163 110 8
37881-216 158 10 236 178 12 246 190 14 246 190 14
37882-246 190 14 246 190 14 246 190 14 246 190 14
37883-246 190 14 246 190 14 246 190 14 246 190 14
37884-246 190 14 239 182 13 230 174 11 216 158 10
37885-190 142 34 124 112 88 70 70 70 38 38 38
37886- 18 18 18 6 6 6 0 0 0 0 0 0
37887- 0 0 0 0 0 0 0 0 0 0 0 0
37888- 0 0 0 0 0 0 0 0 0 0 0 0
37889- 0 0 0 0 0 0 6 6 6 22 22 22
37890- 62 62 62 168 124 44 206 145 10 224 166 10
37891-236 178 12 239 182 13 242 186 14 242 186 14
37892-246 186 14 246 190 14 246 190 14 246 190 14
37893-246 190 14 246 190 14 246 190 14 246 190 14
37894-246 190 14 246 190 14 246 190 14 246 190 14
37895-246 190 14 236 178 12 216 158 10 175 118 6
37896- 80 54 7 2 2 6 6 6 6 30 30 30
37897- 54 54 54 62 62 62 50 50 50 38 38 38
37898- 14 14 14 2 2 6 2 2 6 2 2 6
37899- 2 2 6 2 2 6 2 2 6 2 2 6
37900- 2 2 6 6 6 6 80 54 7 167 114 7
37901-213 154 11 236 178 12 246 190 14 246 190 14
37902-246 190 14 246 190 14 246 190 14 246 190 14
37903-246 190 14 242 186 14 239 182 13 239 182 13
37904-230 174 11 210 150 10 174 135 50 124 112 88
37905- 82 82 82 54 54 54 34 34 34 18 18 18
37906- 6 6 6 0 0 0 0 0 0 0 0 0
37907- 0 0 0 0 0 0 0 0 0 0 0 0
37908- 0 0 0 0 0 0 0 0 0 0 0 0
37909- 0 0 0 0 0 0 6 6 6 18 18 18
37910- 50 50 50 158 118 36 192 133 9 200 144 11
37911-216 158 10 219 162 10 224 166 10 226 170 11
37912-230 174 11 236 178 12 239 182 13 239 182 13
37913-242 186 14 246 186 14 246 190 14 246 190 14
37914-246 190 14 246 190 14 246 190 14 246 190 14
37915-246 186 14 230 174 11 210 150 10 163 110 8
37916-104 69 6 10 10 10 2 2 6 2 2 6
37917- 2 2 6 2 2 6 2 2 6 2 2 6
37918- 2 2 6 2 2 6 2 2 6 2 2 6
37919- 2 2 6 2 2 6 2 2 6 2 2 6
37920- 2 2 6 6 6 6 91 60 6 167 114 7
37921-206 145 10 230 174 11 242 186 14 246 190 14
37922-246 190 14 246 190 14 246 186 14 242 186 14
37923-239 182 13 230 174 11 224 166 10 213 154 11
37924-180 133 36 124 112 88 86 86 86 58 58 58
37925- 38 38 38 22 22 22 10 10 10 6 6 6
37926- 0 0 0 0 0 0 0 0 0 0 0 0
37927- 0 0 0 0 0 0 0 0 0 0 0 0
37928- 0 0 0 0 0 0 0 0 0 0 0 0
37929- 0 0 0 0 0 0 0 0 0 14 14 14
37930- 34 34 34 70 70 70 138 110 50 158 118 36
37931-167 114 7 180 123 7 192 133 9 197 138 11
37932-200 144 11 206 145 10 213 154 11 219 162 10
37933-224 166 10 230 174 11 239 182 13 242 186 14
37934-246 186 14 246 186 14 246 186 14 246 186 14
37935-239 182 13 216 158 10 185 133 11 152 99 6
37936-104 69 6 18 14 6 2 2 6 2 2 6
37937- 2 2 6 2 2 6 2 2 6 2 2 6
37938- 2 2 6 2 2 6 2 2 6 2 2 6
37939- 2 2 6 2 2 6 2 2 6 2 2 6
37940- 2 2 6 6 6 6 80 54 7 152 99 6
37941-192 133 9 219 162 10 236 178 12 239 182 13
37942-246 186 14 242 186 14 239 182 13 236 178 12
37943-224 166 10 206 145 10 192 133 9 154 121 60
37944- 94 94 94 62 62 62 42 42 42 22 22 22
37945- 14 14 14 6 6 6 0 0 0 0 0 0
37946- 0 0 0 0 0 0 0 0 0 0 0 0
37947- 0 0 0 0 0 0 0 0 0 0 0 0
37948- 0 0 0 0 0 0 0 0 0 0 0 0
37949- 0 0 0 0 0 0 0 0 0 6 6 6
37950- 18 18 18 34 34 34 58 58 58 78 78 78
37951-101 98 89 124 112 88 142 110 46 156 107 11
37952-163 110 8 167 114 7 175 118 6 180 123 7
37953-185 133 11 197 138 11 210 150 10 219 162 10
37954-226 170 11 236 178 12 236 178 12 234 174 13
37955-219 162 10 197 138 11 163 110 8 130 83 6
37956- 91 60 6 10 10 10 2 2 6 2 2 6
37957- 18 18 18 38 38 38 38 38 38 38 38 38
37958- 38 38 38 38 38 38 38 38 38 38 38 38
37959- 38 38 38 38 38 38 26 26 26 2 2 6
37960- 2 2 6 6 6 6 70 47 6 137 92 6
37961-175 118 6 200 144 11 219 162 10 230 174 11
37962-234 174 13 230 174 11 219 162 10 210 150 10
37963-192 133 9 163 110 8 124 112 88 82 82 82
37964- 50 50 50 30 30 30 14 14 14 6 6 6
37965- 0 0 0 0 0 0 0 0 0 0 0 0
37966- 0 0 0 0 0 0 0 0 0 0 0 0
37967- 0 0 0 0 0 0 0 0 0 0 0 0
37968- 0 0 0 0 0 0 0 0 0 0 0 0
37969- 0 0 0 0 0 0 0 0 0 0 0 0
37970- 6 6 6 14 14 14 22 22 22 34 34 34
37971- 42 42 42 58 58 58 74 74 74 86 86 86
37972-101 98 89 122 102 70 130 98 46 121 87 25
37973-137 92 6 152 99 6 163 110 8 180 123 7
37974-185 133 11 197 138 11 206 145 10 200 144 11
37975-180 123 7 156 107 11 130 83 6 104 69 6
37976- 50 34 6 54 54 54 110 110 110 101 98 89
37977- 86 86 86 82 82 82 78 78 78 78 78 78
37978- 78 78 78 78 78 78 78 78 78 78 78 78
37979- 78 78 78 82 82 82 86 86 86 94 94 94
37980-106 106 106 101 101 101 86 66 34 124 80 6
37981-156 107 11 180 123 7 192 133 9 200 144 11
37982-206 145 10 200 144 11 192 133 9 175 118 6
37983-139 102 15 109 106 95 70 70 70 42 42 42
37984- 22 22 22 10 10 10 0 0 0 0 0 0
37985- 0 0 0 0 0 0 0 0 0 0 0 0
37986- 0 0 0 0 0 0 0 0 0 0 0 0
37987- 0 0 0 0 0 0 0 0 0 0 0 0
37988- 0 0 0 0 0 0 0 0 0 0 0 0
37989- 0 0 0 0 0 0 0 0 0 0 0 0
37990- 0 0 0 0 0 0 6 6 6 10 10 10
37991- 14 14 14 22 22 22 30 30 30 38 38 38
37992- 50 50 50 62 62 62 74 74 74 90 90 90
37993-101 98 89 112 100 78 121 87 25 124 80 6
37994-137 92 6 152 99 6 152 99 6 152 99 6
37995-138 86 6 124 80 6 98 70 6 86 66 30
37996-101 98 89 82 82 82 58 58 58 46 46 46
37997- 38 38 38 34 34 34 34 34 34 34 34 34
37998- 34 34 34 34 34 34 34 34 34 34 34 34
37999- 34 34 34 34 34 34 38 38 38 42 42 42
38000- 54 54 54 82 82 82 94 86 76 91 60 6
38001-134 86 6 156 107 11 167 114 7 175 118 6
38002-175 118 6 167 114 7 152 99 6 121 87 25
38003-101 98 89 62 62 62 34 34 34 18 18 18
38004- 6 6 6 0 0 0 0 0 0 0 0 0
38005- 0 0 0 0 0 0 0 0 0 0 0 0
38006- 0 0 0 0 0 0 0 0 0 0 0 0
38007- 0 0 0 0 0 0 0 0 0 0 0 0
38008- 0 0 0 0 0 0 0 0 0 0 0 0
38009- 0 0 0 0 0 0 0 0 0 0 0 0
38010- 0 0 0 0 0 0 0 0 0 0 0 0
38011- 0 0 0 6 6 6 6 6 6 10 10 10
38012- 18 18 18 22 22 22 30 30 30 42 42 42
38013- 50 50 50 66 66 66 86 86 86 101 98 89
38014-106 86 58 98 70 6 104 69 6 104 69 6
38015-104 69 6 91 60 6 82 62 34 90 90 90
38016- 62 62 62 38 38 38 22 22 22 14 14 14
38017- 10 10 10 10 10 10 10 10 10 10 10 10
38018- 10 10 10 10 10 10 6 6 6 10 10 10
38019- 10 10 10 10 10 10 10 10 10 14 14 14
38020- 22 22 22 42 42 42 70 70 70 89 81 66
38021- 80 54 7 104 69 6 124 80 6 137 92 6
38022-134 86 6 116 81 8 100 82 52 86 86 86
38023- 58 58 58 30 30 30 14 14 14 6 6 6
38024- 0 0 0 0 0 0 0 0 0 0 0 0
38025- 0 0 0 0 0 0 0 0 0 0 0 0
38026- 0 0 0 0 0 0 0 0 0 0 0 0
38027- 0 0 0 0 0 0 0 0 0 0 0 0
38028- 0 0 0 0 0 0 0 0 0 0 0 0
38029- 0 0 0 0 0 0 0 0 0 0 0 0
38030- 0 0 0 0 0 0 0 0 0 0 0 0
38031- 0 0 0 0 0 0 0 0 0 0 0 0
38032- 0 0 0 6 6 6 10 10 10 14 14 14
38033- 18 18 18 26 26 26 38 38 38 54 54 54
38034- 70 70 70 86 86 86 94 86 76 89 81 66
38035- 89 81 66 86 86 86 74 74 74 50 50 50
38036- 30 30 30 14 14 14 6 6 6 0 0 0
38037- 0 0 0 0 0 0 0 0 0 0 0 0
38038- 0 0 0 0 0 0 0 0 0 0 0 0
38039- 0 0 0 0 0 0 0 0 0 0 0 0
38040- 6 6 6 18 18 18 34 34 34 58 58 58
38041- 82 82 82 89 81 66 89 81 66 89 81 66
38042- 94 86 66 94 86 76 74 74 74 50 50 50
38043- 26 26 26 14 14 14 6 6 6 0 0 0
38044- 0 0 0 0 0 0 0 0 0 0 0 0
38045- 0 0 0 0 0 0 0 0 0 0 0 0
38046- 0 0 0 0 0 0 0 0 0 0 0 0
38047- 0 0 0 0 0 0 0 0 0 0 0 0
38048- 0 0 0 0 0 0 0 0 0 0 0 0
38049- 0 0 0 0 0 0 0 0 0 0 0 0
38050- 0 0 0 0 0 0 0 0 0 0 0 0
38051- 0 0 0 0 0 0 0 0 0 0 0 0
38052- 0 0 0 0 0 0 0 0 0 0 0 0
38053- 6 6 6 6 6 6 14 14 14 18 18 18
38054- 30 30 30 38 38 38 46 46 46 54 54 54
38055- 50 50 50 42 42 42 30 30 30 18 18 18
38056- 10 10 10 0 0 0 0 0 0 0 0 0
38057- 0 0 0 0 0 0 0 0 0 0 0 0
38058- 0 0 0 0 0 0 0 0 0 0 0 0
38059- 0 0 0 0 0 0 0 0 0 0 0 0
38060- 0 0 0 6 6 6 14 14 14 26 26 26
38061- 38 38 38 50 50 50 58 58 58 58 58 58
38062- 54 54 54 42 42 42 30 30 30 18 18 18
38063- 10 10 10 0 0 0 0 0 0 0 0 0
38064- 0 0 0 0 0 0 0 0 0 0 0 0
38065- 0 0 0 0 0 0 0 0 0 0 0 0
38066- 0 0 0 0 0 0 0 0 0 0 0 0
38067- 0 0 0 0 0 0 0 0 0 0 0 0
38068- 0 0 0 0 0 0 0 0 0 0 0 0
38069- 0 0 0 0 0 0 0 0 0 0 0 0
38070- 0 0 0 0 0 0 0 0 0 0 0 0
38071- 0 0 0 0 0 0 0 0 0 0 0 0
38072- 0 0 0 0 0 0 0 0 0 0 0 0
38073- 0 0 0 0 0 0 0 0 0 6 6 6
38074- 6 6 6 10 10 10 14 14 14 18 18 18
38075- 18 18 18 14 14 14 10 10 10 6 6 6
38076- 0 0 0 0 0 0 0 0 0 0 0 0
38077- 0 0 0 0 0 0 0 0 0 0 0 0
38078- 0 0 0 0 0 0 0 0 0 0 0 0
38079- 0 0 0 0 0 0 0 0 0 0 0 0
38080- 0 0 0 0 0 0 0 0 0 6 6 6
38081- 14 14 14 18 18 18 22 22 22 22 22 22
38082- 18 18 18 14 14 14 10 10 10 6 6 6
38083- 0 0 0 0 0 0 0 0 0 0 0 0
38084- 0 0 0 0 0 0 0 0 0 0 0 0
38085- 0 0 0 0 0 0 0 0 0 0 0 0
38086- 0 0 0 0 0 0 0 0 0 0 0 0
38087- 0 0 0 0 0 0 0 0 0 0 0 0
38088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38092+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38101+4 4 4 4 4 4
38102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38106+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38115+4 4 4 4 4 4
38116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38129+4 4 4 4 4 4
38130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38133+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38134+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38143+4 4 4 4 4 4
38144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38146+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38147+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38148+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38157+4 4 4 4 4 4
38158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38161+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38162+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38171+4 4 4 4 4 4
38172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38174+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38175+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38176+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
38177+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
38178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38181+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
38182+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38183+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
38184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38185+4 4 4 4 4 4
38186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38188+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38189+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38190+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
38191+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
38192+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38195+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
38196+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
38197+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
38198+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38199+4 4 4 4 4 4
38200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38202+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38203+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38204+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
38205+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
38206+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38209+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
38210+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
38211+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
38212+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
38213+4 4 4 4 4 4
38214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38216+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38217+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
38218+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
38219+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
38220+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
38221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38222+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38223+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
38224+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
38225+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
38226+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
38227+4 4 4 4 4 4
38228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38231+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
38232+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
38233+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
38234+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
38235+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38236+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
38237+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
38238+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
38239+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
38240+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
38241+4 4 4 4 4 4
38242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38245+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
38246+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
38247+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
38248+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
38249+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
38250+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
38251+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
38252+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
38253+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
38254+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
38255+4 4 4 4 4 4
38256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38258+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
38259+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
38260+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
38261+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
38262+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
38263+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
38264+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
38265+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
38266+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
38267+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
38268+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
38269+4 4 4 4 4 4
38270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38272+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
38273+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
38274+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
38275+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
38276+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
38277+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
38278+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
38279+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
38280+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
38281+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
38282+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
38283+4 4 4 4 4 4
38284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38286+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
38287+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
38288+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
38289+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
38290+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
38291+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
38292+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
38293+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
38294+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
38295+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
38296+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38297+4 4 4 4 4 4
38298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38300+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
38301+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
38302+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
38303+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
38304+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
38305+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
38306+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
38307+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
38308+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
38309+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
38310+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
38311+4 4 4 4 4 4
38312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38313+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
38314+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
38315+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
38316+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
38317+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
38318+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
38319+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
38320+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
38321+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
38322+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
38323+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
38324+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
38325+4 4 4 4 4 4
38326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38327+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
38328+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
38329+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
38330+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38331+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
38332+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
38333+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
38334+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
38335+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
38336+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
38337+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
38338+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
38339+0 0 0 4 4 4
38340+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38341+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
38342+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
38343+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
38344+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
38345+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
38346+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
38347+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
38348+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
38349+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
38350+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
38351+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
38352+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
38353+2 0 0 0 0 0
38354+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
38355+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
38356+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
38357+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
38358+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
38359+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
38360+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
38361+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
38362+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
38363+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
38364+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
38365+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
38366+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
38367+37 38 37 0 0 0
38368+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38369+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
38370+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
38371+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
38372+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
38373+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
38374+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
38375+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
38376+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
38377+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
38378+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
38379+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
38380+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
38381+85 115 134 4 0 0
38382+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
38383+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
38384+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
38385+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
38386+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
38387+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
38388+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
38389+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
38390+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
38391+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
38392+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
38393+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
38394+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
38395+60 73 81 4 0 0
38396+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
38397+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
38398+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
38399+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
38400+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
38401+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
38402+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
38403+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
38404+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
38405+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
38406+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
38407+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
38408+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
38409+16 19 21 4 0 0
38410+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
38411+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
38412+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
38413+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
38414+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
38415+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
38416+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
38417+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
38418+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
38419+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
38420+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
38421+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
38422+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
38423+4 0 0 4 3 3
38424+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
38425+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
38426+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
38427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
38428+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
38429+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
38430+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
38431+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
38432+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
38433+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
38434+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
38435+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
38436+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
38437+3 2 2 4 4 4
38438+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
38439+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
38440+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
38441+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38442+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
38443+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
38444+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
38445+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
38446+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
38447+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
38448+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
38449+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
38450+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
38451+4 4 4 4 4 4
38452+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
38453+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
38454+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
38455+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
38456+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
38457+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
38458+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
38459+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
38460+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
38461+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
38462+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
38463+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
38464+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
38465+4 4 4 4 4 4
38466+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
38467+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
38468+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
38469+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
38470+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
38471+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38472+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
38473+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
38474+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
38475+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
38476+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
38477+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
38478+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
38479+5 5 5 5 5 5
38480+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
38481+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
38482+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
38483+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
38484+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
38485+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38486+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
38487+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
38488+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
38489+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
38490+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
38491+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
38492+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38493+5 5 5 4 4 4
38494+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
38495+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
38496+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
38497+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
38498+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38499+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
38500+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
38501+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
38502+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
38503+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
38504+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
38505+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38507+4 4 4 4 4 4
38508+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
38509+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
38510+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
38511+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
38512+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
38513+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38514+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38515+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
38516+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
38517+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
38518+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
38519+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
38520+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38521+4 4 4 4 4 4
38522+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
38523+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
38524+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
38525+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
38526+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38527+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
38528+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
38529+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
38530+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
38531+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
38532+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
38533+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38534+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38535+4 4 4 4 4 4
38536+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
38537+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
38538+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
38539+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
38540+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38541+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38542+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38543+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
38544+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
38545+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
38546+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
38547+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38548+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38549+4 4 4 4 4 4
38550+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
38551+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
38552+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
38553+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
38554+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38555+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
38556+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38557+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
38558+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
38559+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
38560+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38561+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38562+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38563+4 4 4 4 4 4
38564+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
38565+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
38566+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
38567+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
38568+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38569+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
38570+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
38571+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
38572+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
38573+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
38574+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
38575+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38577+4 4 4 4 4 4
38578+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
38579+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
38580+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
38581+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
38582+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38583+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
38584+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
38585+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
38586+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
38587+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
38588+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
38589+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38591+4 4 4 4 4 4
38592+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
38593+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
38594+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
38595+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38596+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
38597+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
38598+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
38599+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
38600+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
38601+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
38602+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38605+4 4 4 4 4 4
38606+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
38607+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
38608+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
38609+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38610+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38611+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
38612+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
38613+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
38614+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
38615+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
38616+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38619+4 4 4 4 4 4
38620+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
38621+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
38622+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38623+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38624+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38625+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
38626+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
38627+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
38628+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
38629+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
38630+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38633+4 4 4 4 4 4
38634+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
38635+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
38636+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38637+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38638+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38639+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
38640+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
38641+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
38642+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38643+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38644+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38647+4 4 4 4 4 4
38648+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38649+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
38650+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38651+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
38652+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
38653+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
38654+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
38655+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
38656+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38657+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38658+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38661+4 4 4 4 4 4
38662+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38663+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
38664+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38665+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
38666+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38667+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
38668+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
38669+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
38670+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38671+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38672+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38675+4 4 4 4 4 4
38676+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
38677+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
38678+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38679+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
38680+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
38681+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
38682+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
38683+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
38684+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38685+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38686+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38689+4 4 4 4 4 4
38690+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
38691+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
38692+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38693+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
38694+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
38695+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
38696+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
38697+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
38698+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38699+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38700+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38701+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38703+4 4 4 4 4 4
38704+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38705+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
38706+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38707+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
38708+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
38709+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
38710+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
38711+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
38712+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38713+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38714+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38715+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38717+4 4 4 4 4 4
38718+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
38719+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
38720+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38721+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
38722+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
38723+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
38724+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
38725+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
38726+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
38727+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38728+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38731+4 4 4 4 4 4
38732+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38733+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
38734+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
38735+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
38736+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
38737+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
38738+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
38739+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
38740+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38741+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38742+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38745+4 4 4 4 4 4
38746+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38747+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
38748+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38749+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
38750+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
38751+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
38752+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
38753+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
38754+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38755+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38756+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38759+4 4 4 4 4 4
38760+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38761+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
38762+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
38763+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
38764+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
38765+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
38766+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38767+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
38768+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38769+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38770+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38773+4 4 4 4 4 4
38774+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38775+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
38776+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
38777+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38778+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
38779+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
38780+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38781+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
38782+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38783+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38784+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38787+4 4 4 4 4 4
38788+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38789+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
38790+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
38791+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
38792+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
38793+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
38794+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
38795+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
38796+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
38797+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38798+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38801+4 4 4 4 4 4
38802+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38803+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
38804+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
38805+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
38806+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
38807+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
38808+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
38809+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
38810+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
38811+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38812+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38815+4 4 4 4 4 4
38816+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
38817+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
38818+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
38819+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
38820+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38821+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
38822+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
38823+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
38824+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
38825+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38826+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38829+4 4 4 4 4 4
38830+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38831+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
38832+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
38833+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
38834+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
38835+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
38836+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
38837+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
38838+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
38839+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38840+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38843+4 4 4 4 4 4
38844+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
38845+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
38846+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
38847+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
38848+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
38849+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
38850+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
38851+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
38852+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
38853+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
38854+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38857+4 4 4 4 4 4
38858+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
38859+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38860+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
38861+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
38862+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
38863+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
38864+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
38865+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
38866+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
38867+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
38868+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38871+4 4 4 4 4 4
38872+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
38873+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38874+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
38875+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
38876+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
38877+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
38878+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38879+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
38880+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
38881+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
38882+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38885+4 4 4 4 4 4
38886+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
38887+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
38888+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
38889+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
38890+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
38891+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
38892+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
38893+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
38894+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
38895+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
38896+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38899+4 4 4 4 4 4
38900+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
38901+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
38902+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38903+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
38904+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
38905+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
38906+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
38907+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
38908+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
38909+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
38910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38913+4 4 4 4 4 4
38914+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38915+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
38916+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
38917+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
38918+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
38919+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
38920+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
38921+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
38922+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
38923+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38925+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38927+4 4 4 4 4 4
38928+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
38929+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
38930+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
38931+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
38932+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
38933+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
38934+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
38935+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
38936+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
38937+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38941+4 4 4 4 4 4
38942+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
38943+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
38944+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
38945+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
38946+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
38947+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
38948+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
38949+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
38950+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38951+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38952+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38953+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38955+4 4 4 4 4 4
38956+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
38957+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38958+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
38959+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38960+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
38961+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
38962+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
38963+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
38964+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
38965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38969+4 4 4 4 4 4
38970+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
38971+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
38972+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
38973+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
38974+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
38975+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
38976+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
38977+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
38978+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
38979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38983+4 4 4 4 4 4
38984+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
38985+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
38986+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
38987+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
38988+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
38989+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
38990+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
38991+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
38992+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38997+4 4 4 4 4 4
38998+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
38999+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
39000+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39001+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
39002+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
39003+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
39004+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
39005+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
39006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39011+4 4 4 4 4 4
39012+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39013+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
39014+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
39015+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
39016+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
39017+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
39018+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
39019+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
39020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39025+4 4 4 4 4 4
39026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39027+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
39028+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39029+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
39030+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
39031+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
39032+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
39033+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
39034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39039+4 4 4 4 4 4
39040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39041+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
39042+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
39043+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
39044+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
39045+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
39046+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
39047+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
39048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39053+4 4 4 4 4 4
39054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39055+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
39056+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
39057+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39058+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
39059+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
39060+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
39061+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39067+4 4 4 4 4 4
39068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39070+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39071+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
39072+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
39073+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
39074+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
39075+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39081+4 4 4 4 4 4
39082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39085+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39086+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
39087+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
39088+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
39089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39092+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39095+4 4 4 4 4 4
39096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39099+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39100+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39101+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
39102+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
39103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39106+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39109+4 4 4 4 4 4
39110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39113+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
39114+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39115+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39116+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
39117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39123+4 4 4 4 4 4
39124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39127+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
39128+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
39129+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
39130+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
39131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39133+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39134+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39137+4 4 4 4 4 4
39138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39142+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
39143+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39144+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39146+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39147+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39148+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39151+4 4 4 4 4 4
39152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39156+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
39157+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
39158+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
39159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39161+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39162+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39165+4 4 4 4 4 4
39166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39170+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
39171+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
39172+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39174+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39175+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39176+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39179+4 4 4 4 4 4
39180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39184+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
39185+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
39186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39188+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39189+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39190+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39193+4 4 4 4 4 4
39194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39198+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39199+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
39200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39202+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39203+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39207+4 4 4 4 4 4
39208diff -urNp linux-3.1.1/drivers/video/udlfb.c linux-3.1.1/drivers/video/udlfb.c
39209--- linux-3.1.1/drivers/video/udlfb.c 2011-11-11 15:19:27.000000000 -0500
39210+++ linux-3.1.1/drivers/video/udlfb.c 2011-11-16 18:39:08.000000000 -0500
39211@@ -585,11 +585,11 @@ int dlfb_handle_damage(struct dlfb_data
39212 dlfb_urb_completion(urb);
39213
39214 error:
39215- atomic_add(bytes_sent, &dev->bytes_sent);
39216- atomic_add(bytes_identical, &dev->bytes_identical);
39217- atomic_add(width*height*2, &dev->bytes_rendered);
39218+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39219+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39220+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
39221 end_cycles = get_cycles();
39222- atomic_add(((unsigned int) ((end_cycles - start_cycles)
39223+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39224 >> 10)), /* Kcycles */
39225 &dev->cpu_kcycles_used);
39226
39227@@ -710,11 +710,11 @@ static void dlfb_dpy_deferred_io(struct
39228 dlfb_urb_completion(urb);
39229
39230 error:
39231- atomic_add(bytes_sent, &dev->bytes_sent);
39232- atomic_add(bytes_identical, &dev->bytes_identical);
39233- atomic_add(bytes_rendered, &dev->bytes_rendered);
39234+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39235+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39236+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
39237 end_cycles = get_cycles();
39238- atomic_add(((unsigned int) ((end_cycles - start_cycles)
39239+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39240 >> 10)), /* Kcycles */
39241 &dev->cpu_kcycles_used);
39242 }
39243@@ -1306,7 +1306,7 @@ static ssize_t metrics_bytes_rendered_sh
39244 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39245 struct dlfb_data *dev = fb_info->par;
39246 return snprintf(buf, PAGE_SIZE, "%u\n",
39247- atomic_read(&dev->bytes_rendered));
39248+ atomic_read_unchecked(&dev->bytes_rendered));
39249 }
39250
39251 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39252@@ -1314,7 +1314,7 @@ static ssize_t metrics_bytes_identical_s
39253 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39254 struct dlfb_data *dev = fb_info->par;
39255 return snprintf(buf, PAGE_SIZE, "%u\n",
39256- atomic_read(&dev->bytes_identical));
39257+ atomic_read_unchecked(&dev->bytes_identical));
39258 }
39259
39260 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39261@@ -1322,7 +1322,7 @@ static ssize_t metrics_bytes_sent_show(s
39262 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39263 struct dlfb_data *dev = fb_info->par;
39264 return snprintf(buf, PAGE_SIZE, "%u\n",
39265- atomic_read(&dev->bytes_sent));
39266+ atomic_read_unchecked(&dev->bytes_sent));
39267 }
39268
39269 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39270@@ -1330,7 +1330,7 @@ static ssize_t metrics_cpu_kcycles_used_
39271 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39272 struct dlfb_data *dev = fb_info->par;
39273 return snprintf(buf, PAGE_SIZE, "%u\n",
39274- atomic_read(&dev->cpu_kcycles_used));
39275+ atomic_read_unchecked(&dev->cpu_kcycles_used));
39276 }
39277
39278 static ssize_t edid_show(
39279@@ -1387,10 +1387,10 @@ static ssize_t metrics_reset_store(struc
39280 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39281 struct dlfb_data *dev = fb_info->par;
39282
39283- atomic_set(&dev->bytes_rendered, 0);
39284- atomic_set(&dev->bytes_identical, 0);
39285- atomic_set(&dev->bytes_sent, 0);
39286- atomic_set(&dev->cpu_kcycles_used, 0);
39287+ atomic_set_unchecked(&dev->bytes_rendered, 0);
39288+ atomic_set_unchecked(&dev->bytes_identical, 0);
39289+ atomic_set_unchecked(&dev->bytes_sent, 0);
39290+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
39291
39292 return count;
39293 }
39294diff -urNp linux-3.1.1/drivers/video/uvesafb.c linux-3.1.1/drivers/video/uvesafb.c
39295--- linux-3.1.1/drivers/video/uvesafb.c 2011-11-11 15:19:27.000000000 -0500
39296+++ linux-3.1.1/drivers/video/uvesafb.c 2011-11-16 18:39:08.000000000 -0500
39297@@ -19,6 +19,7 @@
39298 #include <linux/io.h>
39299 #include <linux/mutex.h>
39300 #include <linux/slab.h>
39301+#include <linux/moduleloader.h>
39302 #include <video/edid.h>
39303 #include <video/uvesafb.h>
39304 #ifdef CONFIG_X86
39305@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
39306 NULL,
39307 };
39308
39309- return call_usermodehelper(v86d_path, argv, envp, 1);
39310+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
39311 }
39312
39313 /*
39314@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
39315 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
39316 par->pmi_setpal = par->ypan = 0;
39317 } else {
39318+
39319+#ifdef CONFIG_PAX_KERNEXEC
39320+#ifdef CONFIG_MODULES
39321+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
39322+#endif
39323+ if (!par->pmi_code) {
39324+ par->pmi_setpal = par->ypan = 0;
39325+ return 0;
39326+ }
39327+#endif
39328+
39329 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
39330 + task->t.regs.edi);
39331+
39332+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39333+ pax_open_kernel();
39334+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
39335+ pax_close_kernel();
39336+
39337+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
39338+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
39339+#else
39340 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
39341 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
39342+#endif
39343+
39344 printk(KERN_INFO "uvesafb: protected mode interface info at "
39345 "%04x:%04x\n",
39346 (u16)task->t.regs.es, (u16)task->t.regs.edi);
39347@@ -1821,6 +1844,11 @@ out:
39348 if (par->vbe_modes)
39349 kfree(par->vbe_modes);
39350
39351+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39352+ if (par->pmi_code)
39353+ module_free_exec(NULL, par->pmi_code);
39354+#endif
39355+
39356 framebuffer_release(info);
39357 return err;
39358 }
39359@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
39360 kfree(par->vbe_state_orig);
39361 if (par->vbe_state_saved)
39362 kfree(par->vbe_state_saved);
39363+
39364+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39365+ if (par->pmi_code)
39366+ module_free_exec(NULL, par->pmi_code);
39367+#endif
39368+
39369 }
39370
39371 framebuffer_release(info);
39372diff -urNp linux-3.1.1/drivers/video/vesafb.c linux-3.1.1/drivers/video/vesafb.c
39373--- linux-3.1.1/drivers/video/vesafb.c 2011-11-11 15:19:27.000000000 -0500
39374+++ linux-3.1.1/drivers/video/vesafb.c 2011-11-16 18:39:08.000000000 -0500
39375@@ -9,6 +9,7 @@
39376 */
39377
39378 #include <linux/module.h>
39379+#include <linux/moduleloader.h>
39380 #include <linux/kernel.h>
39381 #include <linux/errno.h>
39382 #include <linux/string.h>
39383@@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
39384 static int vram_total __initdata; /* Set total amount of memory */
39385 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
39386 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
39387-static void (*pmi_start)(void) __read_mostly;
39388-static void (*pmi_pal) (void) __read_mostly;
39389+static void (*pmi_start)(void) __read_only;
39390+static void (*pmi_pal) (void) __read_only;
39391 static int depth __read_mostly;
39392 static int vga_compat __read_mostly;
39393 /* --------------------------------------------------------------------- */
39394@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
39395 unsigned int size_vmode;
39396 unsigned int size_remap;
39397 unsigned int size_total;
39398+ void *pmi_code = NULL;
39399
39400 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
39401 return -ENODEV;
39402@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
39403 size_remap = size_total;
39404 vesafb_fix.smem_len = size_remap;
39405
39406-#ifndef __i386__
39407- screen_info.vesapm_seg = 0;
39408-#endif
39409-
39410 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
39411 printk(KERN_WARNING
39412 "vesafb: cannot reserve video memory at 0x%lx\n",
39413@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct pl
39414 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
39415 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
39416
39417+#ifdef __i386__
39418+
39419+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39420+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
39421+ if (!pmi_code)
39422+#elif !defined(CONFIG_PAX_KERNEXEC)
39423+ if (0)
39424+#endif
39425+
39426+#endif
39427+ screen_info.vesapm_seg = 0;
39428+
39429 if (screen_info.vesapm_seg) {
39430- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
39431- screen_info.vesapm_seg,screen_info.vesapm_off);
39432+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
39433+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
39434 }
39435
39436 if (screen_info.vesapm_seg < 0xc000)
39437@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct pl
39438
39439 if (ypan || pmi_setpal) {
39440 unsigned short *pmi_base;
39441+
39442 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
39443- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
39444- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
39445+
39446+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39447+ pax_open_kernel();
39448+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
39449+#else
39450+ pmi_code = pmi_base;
39451+#endif
39452+
39453+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
39454+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
39455+
39456+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39457+ pmi_start = ktva_ktla(pmi_start);
39458+ pmi_pal = ktva_ktla(pmi_pal);
39459+ pax_close_kernel();
39460+#endif
39461+
39462 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
39463 if (pmi_base[3]) {
39464 printk(KERN_INFO "vesafb: pmi: ports = ");
39465@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct pl
39466 info->node, info->fix.id);
39467 return 0;
39468 err:
39469+
39470+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39471+ module_free_exec(NULL, pmi_code);
39472+#endif
39473+
39474 if (info->screen_base)
39475 iounmap(info->screen_base);
39476 framebuffer_release(info);
39477diff -urNp linux-3.1.1/drivers/video/via/via_clock.h linux-3.1.1/drivers/video/via/via_clock.h
39478--- linux-3.1.1/drivers/video/via/via_clock.h 2011-11-11 15:19:27.000000000 -0500
39479+++ linux-3.1.1/drivers/video/via/via_clock.h 2011-11-16 18:39:08.000000000 -0500
39480@@ -56,7 +56,7 @@ struct via_clock {
39481
39482 void (*set_engine_pll_state)(u8 state);
39483 void (*set_engine_pll)(struct via_pll_config config);
39484-};
39485+} __no_const;
39486
39487
39488 static inline u32 get_pll_internal_frequency(u32 ref_freq,
39489diff -urNp linux-3.1.1/drivers/virtio/virtio_balloon.c linux-3.1.1/drivers/virtio/virtio_balloon.c
39490--- linux-3.1.1/drivers/virtio/virtio_balloon.c 2011-11-11 15:19:27.000000000 -0500
39491+++ linux-3.1.1/drivers/virtio/virtio_balloon.c 2011-11-16 18:40:29.000000000 -0500
39492@@ -174,6 +174,8 @@ static void update_balloon_stats(struct
39493 struct sysinfo i;
39494 int idx = 0;
39495
39496+ pax_track_stack();
39497+
39498 all_vm_events(events);
39499 si_meminfo(&i);
39500
39501diff -urNp linux-3.1.1/drivers/xen/xen-pciback/conf_space.h linux-3.1.1/drivers/xen/xen-pciback/conf_space.h
39502--- linux-3.1.1/drivers/xen/xen-pciback/conf_space.h 2011-11-11 15:19:27.000000000 -0500
39503+++ linux-3.1.1/drivers/xen/xen-pciback/conf_space.h 2011-11-16 18:39:08.000000000 -0500
39504@@ -44,15 +44,15 @@ struct config_field {
39505 struct {
39506 conf_dword_write write;
39507 conf_dword_read read;
39508- } dw;
39509+ } __no_const dw;
39510 struct {
39511 conf_word_write write;
39512 conf_word_read read;
39513- } w;
39514+ } __no_const w;
39515 struct {
39516 conf_byte_write write;
39517 conf_byte_read read;
39518- } b;
39519+ } __no_const b;
39520 } u;
39521 struct list_head list;
39522 };
39523diff -urNp linux-3.1.1/fs/9p/vfs_inode.c linux-3.1.1/fs/9p/vfs_inode.c
39524--- linux-3.1.1/fs/9p/vfs_inode.c 2011-11-11 15:19:27.000000000 -0500
39525+++ linux-3.1.1/fs/9p/vfs_inode.c 2011-11-16 18:39:08.000000000 -0500
39526@@ -1288,7 +1288,7 @@ static void *v9fs_vfs_follow_link(struct
39527 void
39528 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39529 {
39530- char *s = nd_get_link(nd);
39531+ const char *s = nd_get_link(nd);
39532
39533 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
39534 IS_ERR(s) ? "<error>" : s);
39535diff -urNp linux-3.1.1/fs/aio.c linux-3.1.1/fs/aio.c
39536--- linux-3.1.1/fs/aio.c 2011-11-11 15:19:27.000000000 -0500
39537+++ linux-3.1.1/fs/aio.c 2011-11-16 18:40:29.000000000 -0500
39538@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
39539 size += sizeof(struct io_event) * nr_events;
39540 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39541
39542- if (nr_pages < 0)
39543+ if (nr_pages <= 0)
39544 return -EINVAL;
39545
39546 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39547@@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
39548 struct aio_timeout to;
39549 int retry = 0;
39550
39551+ pax_track_stack();
39552+
39553 /* needed to zero any padding within an entry (there shouldn't be
39554 * any, but C is fun!
39555 */
39556@@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
39557 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
39558 {
39559 ssize_t ret;
39560+ struct iovec iovstack;
39561
39562 #ifdef CONFIG_COMPAT
39563 if (compat)
39564 ret = compat_rw_copy_check_uvector(type,
39565 (struct compat_iovec __user *)kiocb->ki_buf,
39566- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39567+ kiocb->ki_nbytes, 1, &iovstack,
39568 &kiocb->ki_iovec);
39569 else
39570 #endif
39571 ret = rw_copy_check_uvector(type,
39572 (struct iovec __user *)kiocb->ki_buf,
39573- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39574+ kiocb->ki_nbytes, 1, &iovstack,
39575 &kiocb->ki_iovec);
39576 if (ret < 0)
39577 goto out;
39578
39579+ if (kiocb->ki_iovec == &iovstack) {
39580+ kiocb->ki_inline_vec = iovstack;
39581+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
39582+ }
39583 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39584 kiocb->ki_cur_seg = 0;
39585 /* ki_nbytes/left now reflect bytes instead of segs */
39586diff -urNp linux-3.1.1/fs/attr.c linux-3.1.1/fs/attr.c
39587--- linux-3.1.1/fs/attr.c 2011-11-11 15:19:27.000000000 -0500
39588+++ linux-3.1.1/fs/attr.c 2011-11-16 18:40:29.000000000 -0500
39589@@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
39590 unsigned long limit;
39591
39592 limit = rlimit(RLIMIT_FSIZE);
39593+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39594 if (limit != RLIM_INFINITY && offset > limit)
39595 goto out_sig;
39596 if (offset > inode->i_sb->s_maxbytes)
39597diff -urNp linux-3.1.1/fs/autofs4/waitq.c linux-3.1.1/fs/autofs4/waitq.c
39598--- linux-3.1.1/fs/autofs4/waitq.c 2011-11-11 15:19:27.000000000 -0500
39599+++ linux-3.1.1/fs/autofs4/waitq.c 2011-11-16 18:39:08.000000000 -0500
39600@@ -60,7 +60,7 @@ static int autofs4_write(struct file *fi
39601 {
39602 unsigned long sigpipe, flags;
39603 mm_segment_t fs;
39604- const char *data = (const char *)addr;
39605+ const char __user *data = (const char __force_user *)addr;
39606 ssize_t wr = 0;
39607
39608 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
39609diff -urNp linux-3.1.1/fs/befs/linuxvfs.c linux-3.1.1/fs/befs/linuxvfs.c
39610--- linux-3.1.1/fs/befs/linuxvfs.c 2011-11-11 15:19:27.000000000 -0500
39611+++ linux-3.1.1/fs/befs/linuxvfs.c 2011-11-16 18:39:08.000000000 -0500
39612@@ -503,7 +503,7 @@ static void befs_put_link(struct dentry
39613 {
39614 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39615 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39616- char *link = nd_get_link(nd);
39617+ const char *link = nd_get_link(nd);
39618 if (!IS_ERR(link))
39619 kfree(link);
39620 }
39621diff -urNp linux-3.1.1/fs/binfmt_aout.c linux-3.1.1/fs/binfmt_aout.c
39622--- linux-3.1.1/fs/binfmt_aout.c 2011-11-11 15:19:27.000000000 -0500
39623+++ linux-3.1.1/fs/binfmt_aout.c 2011-11-16 18:40:29.000000000 -0500
39624@@ -16,6 +16,7 @@
39625 #include <linux/string.h>
39626 #include <linux/fs.h>
39627 #include <linux/file.h>
39628+#include <linux/security.h>
39629 #include <linux/stat.h>
39630 #include <linux/fcntl.h>
39631 #include <linux/ptrace.h>
39632@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
39633 #endif
39634 # define START_STACK(u) ((void __user *)u.start_stack)
39635
39636+ memset(&dump, 0, sizeof(dump));
39637+
39638 fs = get_fs();
39639 set_fs(KERNEL_DS);
39640 has_dumped = 1;
39641@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
39642
39643 /* If the size of the dump file exceeds the rlimit, then see what would happen
39644 if we wrote the stack, but not the data area. */
39645+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39646 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
39647 dump.u_dsize = 0;
39648
39649 /* Make sure we have enough room to write the stack and data areas. */
39650+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39651 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
39652 dump.u_ssize = 0;
39653
39654@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
39655 rlim = rlimit(RLIMIT_DATA);
39656 if (rlim >= RLIM_INFINITY)
39657 rlim = ~0;
39658+
39659+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39660 if (ex.a_data + ex.a_bss > rlim)
39661 return -ENOMEM;
39662
39663@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
39664 install_exec_creds(bprm);
39665 current->flags &= ~PF_FORKNOEXEC;
39666
39667+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39668+ current->mm->pax_flags = 0UL;
39669+#endif
39670+
39671+#ifdef CONFIG_PAX_PAGEEXEC
39672+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39673+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39674+
39675+#ifdef CONFIG_PAX_EMUTRAMP
39676+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39677+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39678+#endif
39679+
39680+#ifdef CONFIG_PAX_MPROTECT
39681+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39682+ current->mm->pax_flags |= MF_PAX_MPROTECT;
39683+#endif
39684+
39685+ }
39686+#endif
39687+
39688 if (N_MAGIC(ex) == OMAGIC) {
39689 unsigned long text_addr, map_size;
39690 loff_t pos;
39691@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
39692
39693 down_write(&current->mm->mmap_sem);
39694 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39695- PROT_READ | PROT_WRITE | PROT_EXEC,
39696+ PROT_READ | PROT_WRITE,
39697 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39698 fd_offset + ex.a_text);
39699 up_write(&current->mm->mmap_sem);
39700diff -urNp linux-3.1.1/fs/binfmt_elf.c linux-3.1.1/fs/binfmt_elf.c
39701--- linux-3.1.1/fs/binfmt_elf.c 2011-11-11 15:19:27.000000000 -0500
39702+++ linux-3.1.1/fs/binfmt_elf.c 2011-11-16 18:40:29.000000000 -0500
39703@@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
39704 #define elf_core_dump NULL
39705 #endif
39706
39707+#ifdef CONFIG_PAX_MPROTECT
39708+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39709+#endif
39710+
39711 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39712 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39713 #else
39714@@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
39715 .load_binary = load_elf_binary,
39716 .load_shlib = load_elf_library,
39717 .core_dump = elf_core_dump,
39718+
39719+#ifdef CONFIG_PAX_MPROTECT
39720+ .handle_mprotect= elf_handle_mprotect,
39721+#endif
39722+
39723 .min_coredump = ELF_EXEC_PAGESIZE,
39724 };
39725
39726@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
39727
39728 static int set_brk(unsigned long start, unsigned long end)
39729 {
39730+ unsigned long e = end;
39731+
39732 start = ELF_PAGEALIGN(start);
39733 end = ELF_PAGEALIGN(end);
39734 if (end > start) {
39735@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
39736 if (BAD_ADDR(addr))
39737 return addr;
39738 }
39739- current->mm->start_brk = current->mm->brk = end;
39740+ current->mm->start_brk = current->mm->brk = e;
39741 return 0;
39742 }
39743
39744@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
39745 elf_addr_t __user *u_rand_bytes;
39746 const char *k_platform = ELF_PLATFORM;
39747 const char *k_base_platform = ELF_BASE_PLATFORM;
39748- unsigned char k_rand_bytes[16];
39749+ u32 k_rand_bytes[4];
39750 int items;
39751 elf_addr_t *elf_info;
39752 int ei_index = 0;
39753 const struct cred *cred = current_cred();
39754 struct vm_area_struct *vma;
39755+ unsigned long saved_auxv[AT_VECTOR_SIZE];
39756+
39757+ pax_track_stack();
39758
39759 /*
39760 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39761@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
39762 * Generate 16 random bytes for userspace PRNG seeding.
39763 */
39764 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39765- u_rand_bytes = (elf_addr_t __user *)
39766- STACK_ALLOC(p, sizeof(k_rand_bytes));
39767+ srandom32(k_rand_bytes[0] ^ random32());
39768+ srandom32(k_rand_bytes[1] ^ random32());
39769+ srandom32(k_rand_bytes[2] ^ random32());
39770+ srandom32(k_rand_bytes[3] ^ random32());
39771+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
39772+ u_rand_bytes = (elf_addr_t __user *) p;
39773 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39774 return -EFAULT;
39775
39776@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
39777 return -EFAULT;
39778 current->mm->env_end = p;
39779
39780+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39781+
39782 /* Put the elf_info on the stack in the right place. */
39783 sp = (elf_addr_t __user *)envp + 1;
39784- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39785+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39786 return -EFAULT;
39787 return 0;
39788 }
39789@@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
39790 {
39791 struct elf_phdr *elf_phdata;
39792 struct elf_phdr *eppnt;
39793- unsigned long load_addr = 0;
39794+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39795 int load_addr_set = 0;
39796 unsigned long last_bss = 0, elf_bss = 0;
39797- unsigned long error = ~0UL;
39798+ unsigned long error = -EINVAL;
39799 unsigned long total_size;
39800 int retval, i, size;
39801
39802@@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
39803 goto out_close;
39804 }
39805
39806+#ifdef CONFIG_PAX_SEGMEXEC
39807+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39808+ pax_task_size = SEGMEXEC_TASK_SIZE;
39809+#endif
39810+
39811 eppnt = elf_phdata;
39812 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39813 if (eppnt->p_type == PT_LOAD) {
39814@@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
39815 k = load_addr + eppnt->p_vaddr;
39816 if (BAD_ADDR(k) ||
39817 eppnt->p_filesz > eppnt->p_memsz ||
39818- eppnt->p_memsz > TASK_SIZE ||
39819- TASK_SIZE - eppnt->p_memsz < k) {
39820+ eppnt->p_memsz > pax_task_size ||
39821+ pax_task_size - eppnt->p_memsz < k) {
39822 error = -ENOMEM;
39823 goto out_close;
39824 }
39825@@ -528,6 +553,193 @@ out:
39826 return error;
39827 }
39828
39829+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
39830+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
39831+{
39832+ unsigned long pax_flags = 0UL;
39833+
39834+#ifdef CONFIG_PAX_PAGEEXEC
39835+ if (elf_phdata->p_flags & PF_PAGEEXEC)
39836+ pax_flags |= MF_PAX_PAGEEXEC;
39837+#endif
39838+
39839+#ifdef CONFIG_PAX_SEGMEXEC
39840+ if (elf_phdata->p_flags & PF_SEGMEXEC)
39841+ pax_flags |= MF_PAX_SEGMEXEC;
39842+#endif
39843+
39844+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39845+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39846+ if ((__supported_pte_mask & _PAGE_NX))
39847+ pax_flags &= ~MF_PAX_SEGMEXEC;
39848+ else
39849+ pax_flags &= ~MF_PAX_PAGEEXEC;
39850+ }
39851+#endif
39852+
39853+#ifdef CONFIG_PAX_EMUTRAMP
39854+ if (elf_phdata->p_flags & PF_EMUTRAMP)
39855+ pax_flags |= MF_PAX_EMUTRAMP;
39856+#endif
39857+
39858+#ifdef CONFIG_PAX_MPROTECT
39859+ if (elf_phdata->p_flags & PF_MPROTECT)
39860+ pax_flags |= MF_PAX_MPROTECT;
39861+#endif
39862+
39863+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39864+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
39865+ pax_flags |= MF_PAX_RANDMMAP;
39866+#endif
39867+
39868+ return pax_flags;
39869+}
39870+#endif
39871+
39872+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39873+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
39874+{
39875+ unsigned long pax_flags = 0UL;
39876+
39877+#ifdef CONFIG_PAX_PAGEEXEC
39878+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
39879+ pax_flags |= MF_PAX_PAGEEXEC;
39880+#endif
39881+
39882+#ifdef CONFIG_PAX_SEGMEXEC
39883+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
39884+ pax_flags |= MF_PAX_SEGMEXEC;
39885+#endif
39886+
39887+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39888+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39889+ if ((__supported_pte_mask & _PAGE_NX))
39890+ pax_flags &= ~MF_PAX_SEGMEXEC;
39891+ else
39892+ pax_flags &= ~MF_PAX_PAGEEXEC;
39893+ }
39894+#endif
39895+
39896+#ifdef CONFIG_PAX_EMUTRAMP
39897+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39898+ pax_flags |= MF_PAX_EMUTRAMP;
39899+#endif
39900+
39901+#ifdef CONFIG_PAX_MPROTECT
39902+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39903+ pax_flags |= MF_PAX_MPROTECT;
39904+#endif
39905+
39906+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39907+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39908+ pax_flags |= MF_PAX_RANDMMAP;
39909+#endif
39910+
39911+ return pax_flags;
39912+}
39913+#endif
39914+
39915+#ifdef CONFIG_PAX_EI_PAX
39916+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39917+{
39918+ unsigned long pax_flags = 0UL;
39919+
39920+#ifdef CONFIG_PAX_PAGEEXEC
39921+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39922+ pax_flags |= MF_PAX_PAGEEXEC;
39923+#endif
39924+
39925+#ifdef CONFIG_PAX_SEGMEXEC
39926+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39927+ pax_flags |= MF_PAX_SEGMEXEC;
39928+#endif
39929+
39930+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39931+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39932+ if ((__supported_pte_mask & _PAGE_NX))
39933+ pax_flags &= ~MF_PAX_SEGMEXEC;
39934+ else
39935+ pax_flags &= ~MF_PAX_PAGEEXEC;
39936+ }
39937+#endif
39938+
39939+#ifdef CONFIG_PAX_EMUTRAMP
39940+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39941+ pax_flags |= MF_PAX_EMUTRAMP;
39942+#endif
39943+
39944+#ifdef CONFIG_PAX_MPROTECT
39945+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39946+ pax_flags |= MF_PAX_MPROTECT;
39947+#endif
39948+
39949+#ifdef CONFIG_PAX_ASLR
39950+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39951+ pax_flags |= MF_PAX_RANDMMAP;
39952+#endif
39953+
39954+ return pax_flags;
39955+}
39956+#endif
39957+
39958+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39959+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39960+{
39961+ unsigned long pax_flags = 0UL;
39962+
39963+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39964+ unsigned long i;
39965+ int found_flags = 0;
39966+#endif
39967+
39968+#ifdef CONFIG_PAX_EI_PAX
39969+ pax_flags = pax_parse_ei_pax(elf_ex);
39970+#endif
39971+
39972+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39973+ for (i = 0UL; i < elf_ex->e_phnum; i++)
39974+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39975+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39976+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39977+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39978+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39979+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39980+ return -EINVAL;
39981+
39982+#ifdef CONFIG_PAX_SOFTMODE
39983+ if (pax_softmode)
39984+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
39985+ else
39986+#endif
39987+
39988+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
39989+ found_flags = 1;
39990+ break;
39991+ }
39992+#endif
39993+
39994+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
39995+ if (found_flags == 0) {
39996+ struct elf_phdr phdr;
39997+ memset(&phdr, 0, sizeof(phdr));
39998+ phdr.p_flags = PF_NOEMUTRAMP;
39999+#ifdef CONFIG_PAX_SOFTMODE
40000+ if (pax_softmode)
40001+ pax_flags = pax_parse_softmode(&phdr);
40002+ else
40003+#endif
40004+ pax_flags = pax_parse_hardmode(&phdr);
40005+ }
40006+#endif
40007+
40008+ if (0 > pax_check_flags(&pax_flags))
40009+ return -EINVAL;
40010+
40011+ current->mm->pax_flags = pax_flags;
40012+ return 0;
40013+}
40014+#endif
40015+
40016 /*
40017 * These are the functions used to load ELF style executables and shared
40018 * libraries. There is no binary dependent code anywhere else.
40019@@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
40020 {
40021 unsigned int random_variable = 0;
40022
40023+#ifdef CONFIG_PAX_RANDUSTACK
40024+ if (randomize_va_space)
40025+ return stack_top - current->mm->delta_stack;
40026+#endif
40027+
40028 if ((current->flags & PF_RANDOMIZE) &&
40029 !(current->personality & ADDR_NO_RANDOMIZE)) {
40030 random_variable = get_random_int() & STACK_RND_MASK;
40031@@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
40032 unsigned long load_addr = 0, load_bias = 0;
40033 int load_addr_set = 0;
40034 char * elf_interpreter = NULL;
40035- unsigned long error;
40036+ unsigned long error = 0;
40037 struct elf_phdr *elf_ppnt, *elf_phdata;
40038 unsigned long elf_bss, elf_brk;
40039 int retval, i;
40040@@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
40041 unsigned long start_code, end_code, start_data, end_data;
40042 unsigned long reloc_func_desc __maybe_unused = 0;
40043 int executable_stack = EXSTACK_DEFAULT;
40044- unsigned long def_flags = 0;
40045 struct {
40046 struct elfhdr elf_ex;
40047 struct elfhdr interp_elf_ex;
40048 } *loc;
40049+ unsigned long pax_task_size = TASK_SIZE;
40050
40051 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
40052 if (!loc) {
40053@@ -713,11 +930,81 @@ static int load_elf_binary(struct linux_
40054
40055 /* OK, This is the point of no return */
40056 current->flags &= ~PF_FORKNOEXEC;
40057- current->mm->def_flags = def_flags;
40058+
40059+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
40060+ current->mm->pax_flags = 0UL;
40061+#endif
40062+
40063+#ifdef CONFIG_PAX_DLRESOLVE
40064+ current->mm->call_dl_resolve = 0UL;
40065+#endif
40066+
40067+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
40068+ current->mm->call_syscall = 0UL;
40069+#endif
40070+
40071+#ifdef CONFIG_PAX_ASLR
40072+ current->mm->delta_mmap = 0UL;
40073+ current->mm->delta_stack = 0UL;
40074+#endif
40075+
40076+ current->mm->def_flags = 0;
40077+
40078+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
40079+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
40080+ send_sig(SIGKILL, current, 0);
40081+ goto out_free_dentry;
40082+ }
40083+#endif
40084+
40085+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
40086+ pax_set_initial_flags(bprm);
40087+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
40088+ if (pax_set_initial_flags_func)
40089+ (pax_set_initial_flags_func)(bprm);
40090+#endif
40091+
40092+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40093+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
40094+ current->mm->context.user_cs_limit = PAGE_SIZE;
40095+ current->mm->def_flags |= VM_PAGEEXEC;
40096+ }
40097+#endif
40098+
40099+#ifdef CONFIG_PAX_SEGMEXEC
40100+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
40101+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
40102+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
40103+ pax_task_size = SEGMEXEC_TASK_SIZE;
40104+ current->mm->def_flags |= VM_NOHUGEPAGE;
40105+ }
40106+#endif
40107+
40108+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
40109+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40110+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
40111+ put_cpu();
40112+ }
40113+#endif
40114
40115 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
40116 may depend on the personality. */
40117 SET_PERSONALITY(loc->elf_ex);
40118+
40119+#ifdef CONFIG_PAX_ASLR
40120+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
40121+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
40122+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
40123+ }
40124+#endif
40125+
40126+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40127+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40128+ executable_stack = EXSTACK_DISABLE_X;
40129+ current->personality &= ~READ_IMPLIES_EXEC;
40130+ } else
40131+#endif
40132+
40133 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
40134 current->personality |= READ_IMPLIES_EXEC;
40135
40136@@ -808,6 +1095,20 @@ static int load_elf_binary(struct linux_
40137 #else
40138 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
40139 #endif
40140+
40141+#ifdef CONFIG_PAX_RANDMMAP
40142+ /* PaX: randomize base address at the default exe base if requested */
40143+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
40144+#ifdef CONFIG_SPARC64
40145+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
40146+#else
40147+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
40148+#endif
40149+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
40150+ elf_flags |= MAP_FIXED;
40151+ }
40152+#endif
40153+
40154 }
40155
40156 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
40157@@ -840,9 +1141,9 @@ static int load_elf_binary(struct linux_
40158 * allowed task size. Note that p_filesz must always be
40159 * <= p_memsz so it is only necessary to check p_memsz.
40160 */
40161- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40162- elf_ppnt->p_memsz > TASK_SIZE ||
40163- TASK_SIZE - elf_ppnt->p_memsz < k) {
40164+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40165+ elf_ppnt->p_memsz > pax_task_size ||
40166+ pax_task_size - elf_ppnt->p_memsz < k) {
40167 /* set_brk can never work. Avoid overflows. */
40168 send_sig(SIGKILL, current, 0);
40169 retval = -EINVAL;
40170@@ -870,6 +1171,11 @@ static int load_elf_binary(struct linux_
40171 start_data += load_bias;
40172 end_data += load_bias;
40173
40174+#ifdef CONFIG_PAX_RANDMMAP
40175+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
40176+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
40177+#endif
40178+
40179 /* Calling set_brk effectively mmaps the pages that we need
40180 * for the bss and break sections. We must do this before
40181 * mapping in the interpreter, to make sure it doesn't wind
40182@@ -881,9 +1187,11 @@ static int load_elf_binary(struct linux_
40183 goto out_free_dentry;
40184 }
40185 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
40186- send_sig(SIGSEGV, current, 0);
40187- retval = -EFAULT; /* Nobody gets to see this, but.. */
40188- goto out_free_dentry;
40189+ /*
40190+ * This bss-zeroing can fail if the ELF
40191+ * file specifies odd protections. So
40192+ * we don't check the return value
40193+ */
40194 }
40195
40196 if (elf_interpreter) {
40197@@ -1098,7 +1406,7 @@ out:
40198 * Decide what to dump of a segment, part, all or none.
40199 */
40200 static unsigned long vma_dump_size(struct vm_area_struct *vma,
40201- unsigned long mm_flags)
40202+ unsigned long mm_flags, long signr)
40203 {
40204 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
40205
40206@@ -1132,7 +1440,7 @@ static unsigned long vma_dump_size(struc
40207 if (vma->vm_file == NULL)
40208 return 0;
40209
40210- if (FILTER(MAPPED_PRIVATE))
40211+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
40212 goto whole;
40213
40214 /*
40215@@ -1354,9 +1662,9 @@ static void fill_auxv_note(struct memelf
40216 {
40217 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
40218 int i = 0;
40219- do
40220+ do {
40221 i += 2;
40222- while (auxv[i - 2] != AT_NULL);
40223+ } while (auxv[i - 2] != AT_NULL);
40224 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
40225 }
40226
40227@@ -1862,14 +2170,14 @@ static void fill_extnum_info(struct elfh
40228 }
40229
40230 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
40231- unsigned long mm_flags)
40232+ struct coredump_params *cprm)
40233 {
40234 struct vm_area_struct *vma;
40235 size_t size = 0;
40236
40237 for (vma = first_vma(current, gate_vma); vma != NULL;
40238 vma = next_vma(vma, gate_vma))
40239- size += vma_dump_size(vma, mm_flags);
40240+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40241 return size;
40242 }
40243
40244@@ -1963,7 +2271,7 @@ static int elf_core_dump(struct coredump
40245
40246 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
40247
40248- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
40249+ offset += elf_core_vma_data_size(gate_vma, cprm);
40250 offset += elf_core_extra_data_size();
40251 e_shoff = offset;
40252
40253@@ -1977,10 +2285,12 @@ static int elf_core_dump(struct coredump
40254 offset = dataoff;
40255
40256 size += sizeof(*elf);
40257+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40258 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
40259 goto end_coredump;
40260
40261 size += sizeof(*phdr4note);
40262+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40263 if (size > cprm->limit
40264 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
40265 goto end_coredump;
40266@@ -1994,7 +2304,7 @@ static int elf_core_dump(struct coredump
40267 phdr.p_offset = offset;
40268 phdr.p_vaddr = vma->vm_start;
40269 phdr.p_paddr = 0;
40270- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
40271+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40272 phdr.p_memsz = vma->vm_end - vma->vm_start;
40273 offset += phdr.p_filesz;
40274 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
40275@@ -2005,6 +2315,7 @@ static int elf_core_dump(struct coredump
40276 phdr.p_align = ELF_EXEC_PAGESIZE;
40277
40278 size += sizeof(phdr);
40279+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40280 if (size > cprm->limit
40281 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
40282 goto end_coredump;
40283@@ -2029,7 +2340,7 @@ static int elf_core_dump(struct coredump
40284 unsigned long addr;
40285 unsigned long end;
40286
40287- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
40288+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40289
40290 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
40291 struct page *page;
40292@@ -2038,6 +2349,7 @@ static int elf_core_dump(struct coredump
40293 page = get_dump_page(addr);
40294 if (page) {
40295 void *kaddr = kmap(page);
40296+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
40297 stop = ((size += PAGE_SIZE) > cprm->limit) ||
40298 !dump_write(cprm->file, kaddr,
40299 PAGE_SIZE);
40300@@ -2055,6 +2367,7 @@ static int elf_core_dump(struct coredump
40301
40302 if (e_phnum == PN_XNUM) {
40303 size += sizeof(*shdr4extnum);
40304+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40305 if (size > cprm->limit
40306 || !dump_write(cprm->file, shdr4extnum,
40307 sizeof(*shdr4extnum)))
40308@@ -2075,6 +2388,97 @@ out:
40309
40310 #endif /* CONFIG_ELF_CORE */
40311
40312+#ifdef CONFIG_PAX_MPROTECT
40313+/* PaX: non-PIC ELF libraries need relocations on their executable segments
40314+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
40315+ * we'll remove VM_MAYWRITE for good on RELRO segments.
40316+ *
40317+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
40318+ * basis because we want to allow the common case and not the special ones.
40319+ */
40320+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
40321+{
40322+ struct elfhdr elf_h;
40323+ struct elf_phdr elf_p;
40324+ unsigned long i;
40325+ unsigned long oldflags;
40326+ bool is_textrel_rw, is_textrel_rx, is_relro;
40327+
40328+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
40329+ return;
40330+
40331+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
40332+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
40333+
40334+#ifdef CONFIG_PAX_ELFRELOCS
40335+ /* possible TEXTREL */
40336+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
40337+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
40338+#else
40339+ is_textrel_rw = false;
40340+ is_textrel_rx = false;
40341+#endif
40342+
40343+ /* possible RELRO */
40344+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
40345+
40346+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
40347+ return;
40348+
40349+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
40350+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
40351+
40352+#ifdef CONFIG_PAX_ETEXECRELOCS
40353+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40354+#else
40355+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
40356+#endif
40357+
40358+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40359+ !elf_check_arch(&elf_h) ||
40360+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
40361+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
40362+ return;
40363+
40364+ for (i = 0UL; i < elf_h.e_phnum; i++) {
40365+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
40366+ return;
40367+ switch (elf_p.p_type) {
40368+ case PT_DYNAMIC:
40369+ if (!is_textrel_rw && !is_textrel_rx)
40370+ continue;
40371+ i = 0UL;
40372+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
40373+ elf_dyn dyn;
40374+
40375+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
40376+ return;
40377+ if (dyn.d_tag == DT_NULL)
40378+ return;
40379+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
40380+ gr_log_textrel(vma);
40381+ if (is_textrel_rw)
40382+ vma->vm_flags |= VM_MAYWRITE;
40383+ else
40384+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
40385+ vma->vm_flags &= ~VM_MAYWRITE;
40386+ return;
40387+ }
40388+ i++;
40389+ }
40390+ return;
40391+
40392+ case PT_GNU_RELRO:
40393+ if (!is_relro)
40394+ continue;
40395+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
40396+ vma->vm_flags &= ~VM_MAYWRITE;
40397+ return;
40398+ }
40399+ }
40400+}
40401+#endif
40402+
40403 static int __init init_elf_binfmt(void)
40404 {
40405 return register_binfmt(&elf_format);
40406diff -urNp linux-3.1.1/fs/binfmt_flat.c linux-3.1.1/fs/binfmt_flat.c
40407--- linux-3.1.1/fs/binfmt_flat.c 2011-11-11 15:19:27.000000000 -0500
40408+++ linux-3.1.1/fs/binfmt_flat.c 2011-11-16 18:39:08.000000000 -0500
40409@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
40410 realdatastart = (unsigned long) -ENOMEM;
40411 printk("Unable to allocate RAM for process data, errno %d\n",
40412 (int)-realdatastart);
40413+ down_write(&current->mm->mmap_sem);
40414 do_munmap(current->mm, textpos, text_len);
40415+ up_write(&current->mm->mmap_sem);
40416 ret = realdatastart;
40417 goto err;
40418 }
40419@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
40420 }
40421 if (IS_ERR_VALUE(result)) {
40422 printk("Unable to read data+bss, errno %d\n", (int)-result);
40423+ down_write(&current->mm->mmap_sem);
40424 do_munmap(current->mm, textpos, text_len);
40425 do_munmap(current->mm, realdatastart, len);
40426+ up_write(&current->mm->mmap_sem);
40427 ret = result;
40428 goto err;
40429 }
40430@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
40431 }
40432 if (IS_ERR_VALUE(result)) {
40433 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
40434+ down_write(&current->mm->mmap_sem);
40435 do_munmap(current->mm, textpos, text_len + data_len + extra +
40436 MAX_SHARED_LIBS * sizeof(unsigned long));
40437+ up_write(&current->mm->mmap_sem);
40438 ret = result;
40439 goto err;
40440 }
40441diff -urNp linux-3.1.1/fs/bio.c linux-3.1.1/fs/bio.c
40442--- linux-3.1.1/fs/bio.c 2011-11-11 15:19:27.000000000 -0500
40443+++ linux-3.1.1/fs/bio.c 2011-11-16 18:39:08.000000000 -0500
40444@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
40445 const int read = bio_data_dir(bio) == READ;
40446 struct bio_map_data *bmd = bio->bi_private;
40447 int i;
40448- char *p = bmd->sgvecs[0].iov_base;
40449+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
40450
40451 __bio_for_each_segment(bvec, bio, i, 0) {
40452 char *addr = page_address(bvec->bv_page);
40453diff -urNp linux-3.1.1/fs/block_dev.c linux-3.1.1/fs/block_dev.c
40454--- linux-3.1.1/fs/block_dev.c 2011-11-11 15:19:27.000000000 -0500
40455+++ linux-3.1.1/fs/block_dev.c 2011-11-16 18:39:08.000000000 -0500
40456@@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_de
40457 else if (bdev->bd_contains == bdev)
40458 return true; /* is a whole device which isn't held */
40459
40460- else if (whole->bd_holder == bd_may_claim)
40461+ else if (whole->bd_holder == (void *)bd_may_claim)
40462 return true; /* is a partition of a device that is being partitioned */
40463 else if (whole->bd_holder != NULL)
40464 return false; /* is a partition of a held device */
40465diff -urNp linux-3.1.1/fs/btrfs/ctree.c linux-3.1.1/fs/btrfs/ctree.c
40466--- linux-3.1.1/fs/btrfs/ctree.c 2011-11-11 15:19:27.000000000 -0500
40467+++ linux-3.1.1/fs/btrfs/ctree.c 2011-11-16 18:39:08.000000000 -0500
40468@@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(st
40469 free_extent_buffer(buf);
40470 add_root_to_dirty_list(root);
40471 } else {
40472- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
40473- parent_start = parent->start;
40474- else
40475+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
40476+ if (parent)
40477+ parent_start = parent->start;
40478+ else
40479+ parent_start = 0;
40480+ } else
40481 parent_start = 0;
40482
40483 WARN_ON(trans->transid != btrfs_header_generation(parent));
40484diff -urNp linux-3.1.1/fs/btrfs/inode.c linux-3.1.1/fs/btrfs/inode.c
40485--- linux-3.1.1/fs/btrfs/inode.c 2011-11-11 15:19:27.000000000 -0500
40486+++ linux-3.1.1/fs/btrfs/inode.c 2011-11-17 18:12:11.000000000 -0500
40487@@ -6922,7 +6922,7 @@ fail:
40488 return -ENOMEM;
40489 }
40490
40491-static int btrfs_getattr(struct vfsmount *mnt,
40492+int btrfs_getattr(struct vfsmount *mnt,
40493 struct dentry *dentry, struct kstat *stat)
40494 {
40495 struct inode *inode = dentry->d_inode;
40496@@ -6934,6 +6934,14 @@ static int btrfs_getattr(struct vfsmount
40497 return 0;
40498 }
40499
40500+EXPORT_SYMBOL(btrfs_getattr);
40501+
40502+dev_t get_btrfs_dev_from_inode(struct inode *inode)
40503+{
40504+ return BTRFS_I(inode)->root->anon_dev;
40505+}
40506+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40507+
40508 /*
40509 * If a file is moved, it will inherit the cow and compression flags of the new
40510 * directory.
40511diff -urNp linux-3.1.1/fs/btrfs/ioctl.c linux-3.1.1/fs/btrfs/ioctl.c
40512--- linux-3.1.1/fs/btrfs/ioctl.c 2011-11-11 15:19:27.000000000 -0500
40513+++ linux-3.1.1/fs/btrfs/ioctl.c 2011-11-16 18:40:29.000000000 -0500
40514@@ -2704,9 +2704,12 @@ long btrfs_ioctl_space_info(struct btrfs
40515 for (i = 0; i < num_types; i++) {
40516 struct btrfs_space_info *tmp;
40517
40518+ /* Don't copy in more than we allocated */
40519 if (!slot_count)
40520 break;
40521
40522+ slot_count--;
40523+
40524 info = NULL;
40525 rcu_read_lock();
40526 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
40527@@ -2728,15 +2731,12 @@ long btrfs_ioctl_space_info(struct btrfs
40528 memcpy(dest, &space, sizeof(space));
40529 dest++;
40530 space_args.total_spaces++;
40531- slot_count--;
40532 }
40533- if (!slot_count)
40534- break;
40535 }
40536 up_read(&info->groups_sem);
40537 }
40538
40539- user_dest = (struct btrfs_ioctl_space_info *)
40540+ user_dest = (struct btrfs_ioctl_space_info __user *)
40541 (arg + sizeof(struct btrfs_ioctl_space_args));
40542
40543 if (copy_to_user(user_dest, dest_orig, alloc_size))
40544diff -urNp linux-3.1.1/fs/btrfs/relocation.c linux-3.1.1/fs/btrfs/relocation.c
40545--- linux-3.1.1/fs/btrfs/relocation.c 2011-11-11 15:19:27.000000000 -0500
40546+++ linux-3.1.1/fs/btrfs/relocation.c 2011-11-16 18:39:08.000000000 -0500
40547@@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct bt
40548 }
40549 spin_unlock(&rc->reloc_root_tree.lock);
40550
40551- BUG_ON((struct btrfs_root *)node->data != root);
40552+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
40553
40554 if (!del) {
40555 spin_lock(&rc->reloc_root_tree.lock);
40556diff -urNp linux-3.1.1/fs/cachefiles/bind.c linux-3.1.1/fs/cachefiles/bind.c
40557--- linux-3.1.1/fs/cachefiles/bind.c 2011-11-11 15:19:27.000000000 -0500
40558+++ linux-3.1.1/fs/cachefiles/bind.c 2011-11-16 18:39:08.000000000 -0500
40559@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
40560 args);
40561
40562 /* start by checking things over */
40563- ASSERT(cache->fstop_percent >= 0 &&
40564- cache->fstop_percent < cache->fcull_percent &&
40565+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
40566 cache->fcull_percent < cache->frun_percent &&
40567 cache->frun_percent < 100);
40568
40569- ASSERT(cache->bstop_percent >= 0 &&
40570- cache->bstop_percent < cache->bcull_percent &&
40571+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
40572 cache->bcull_percent < cache->brun_percent &&
40573 cache->brun_percent < 100);
40574
40575diff -urNp linux-3.1.1/fs/cachefiles/daemon.c linux-3.1.1/fs/cachefiles/daemon.c
40576--- linux-3.1.1/fs/cachefiles/daemon.c 2011-11-11 15:19:27.000000000 -0500
40577+++ linux-3.1.1/fs/cachefiles/daemon.c 2011-11-16 18:39:08.000000000 -0500
40578@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
40579 if (n > buflen)
40580 return -EMSGSIZE;
40581
40582- if (copy_to_user(_buffer, buffer, n) != 0)
40583+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
40584 return -EFAULT;
40585
40586 return n;
40587@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
40588 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40589 return -EIO;
40590
40591- if (datalen < 0 || datalen > PAGE_SIZE - 1)
40592+ if (datalen > PAGE_SIZE - 1)
40593 return -EOPNOTSUPP;
40594
40595 /* drag the command string into the kernel so we can parse it */
40596@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
40597 if (args[0] != '%' || args[1] != '\0')
40598 return -EINVAL;
40599
40600- if (fstop < 0 || fstop >= cache->fcull_percent)
40601+ if (fstop >= cache->fcull_percent)
40602 return cachefiles_daemon_range_error(cache, args);
40603
40604 cache->fstop_percent = fstop;
40605@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
40606 if (args[0] != '%' || args[1] != '\0')
40607 return -EINVAL;
40608
40609- if (bstop < 0 || bstop >= cache->bcull_percent)
40610+ if (bstop >= cache->bcull_percent)
40611 return cachefiles_daemon_range_error(cache, args);
40612
40613 cache->bstop_percent = bstop;
40614diff -urNp linux-3.1.1/fs/cachefiles/internal.h linux-3.1.1/fs/cachefiles/internal.h
40615--- linux-3.1.1/fs/cachefiles/internal.h 2011-11-11 15:19:27.000000000 -0500
40616+++ linux-3.1.1/fs/cachefiles/internal.h 2011-11-16 18:39:08.000000000 -0500
40617@@ -57,7 +57,7 @@ struct cachefiles_cache {
40618 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40619 struct rb_root active_nodes; /* active nodes (can't be culled) */
40620 rwlock_t active_lock; /* lock for active_nodes */
40621- atomic_t gravecounter; /* graveyard uniquifier */
40622+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40623 unsigned frun_percent; /* when to stop culling (% files) */
40624 unsigned fcull_percent; /* when to start culling (% files) */
40625 unsigned fstop_percent; /* when to stop allocating (% files) */
40626@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
40627 * proc.c
40628 */
40629 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40630-extern atomic_t cachefiles_lookup_histogram[HZ];
40631-extern atomic_t cachefiles_mkdir_histogram[HZ];
40632-extern atomic_t cachefiles_create_histogram[HZ];
40633+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40634+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40635+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40636
40637 extern int __init cachefiles_proc_init(void);
40638 extern void cachefiles_proc_cleanup(void);
40639 static inline
40640-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40641+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40642 {
40643 unsigned long jif = jiffies - start_jif;
40644 if (jif >= HZ)
40645 jif = HZ - 1;
40646- atomic_inc(&histogram[jif]);
40647+ atomic_inc_unchecked(&histogram[jif]);
40648 }
40649
40650 #else
40651diff -urNp linux-3.1.1/fs/cachefiles/namei.c linux-3.1.1/fs/cachefiles/namei.c
40652--- linux-3.1.1/fs/cachefiles/namei.c 2011-11-11 15:19:27.000000000 -0500
40653+++ linux-3.1.1/fs/cachefiles/namei.c 2011-11-16 18:39:08.000000000 -0500
40654@@ -318,7 +318,7 @@ try_again:
40655 /* first step is to make up a grave dentry in the graveyard */
40656 sprintf(nbuffer, "%08x%08x",
40657 (uint32_t) get_seconds(),
40658- (uint32_t) atomic_inc_return(&cache->gravecounter));
40659+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40660
40661 /* do the multiway lock magic */
40662 trap = lock_rename(cache->graveyard, dir);
40663diff -urNp linux-3.1.1/fs/cachefiles/proc.c linux-3.1.1/fs/cachefiles/proc.c
40664--- linux-3.1.1/fs/cachefiles/proc.c 2011-11-11 15:19:27.000000000 -0500
40665+++ linux-3.1.1/fs/cachefiles/proc.c 2011-11-16 18:39:08.000000000 -0500
40666@@ -14,9 +14,9 @@
40667 #include <linux/seq_file.h>
40668 #include "internal.h"
40669
40670-atomic_t cachefiles_lookup_histogram[HZ];
40671-atomic_t cachefiles_mkdir_histogram[HZ];
40672-atomic_t cachefiles_create_histogram[HZ];
40673+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40674+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40675+atomic_unchecked_t cachefiles_create_histogram[HZ];
40676
40677 /*
40678 * display the latency histogram
40679@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
40680 return 0;
40681 default:
40682 index = (unsigned long) v - 3;
40683- x = atomic_read(&cachefiles_lookup_histogram[index]);
40684- y = atomic_read(&cachefiles_mkdir_histogram[index]);
40685- z = atomic_read(&cachefiles_create_histogram[index]);
40686+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
40687+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
40688+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
40689 if (x == 0 && y == 0 && z == 0)
40690 return 0;
40691
40692diff -urNp linux-3.1.1/fs/cachefiles/rdwr.c linux-3.1.1/fs/cachefiles/rdwr.c
40693--- linux-3.1.1/fs/cachefiles/rdwr.c 2011-11-11 15:19:27.000000000 -0500
40694+++ linux-3.1.1/fs/cachefiles/rdwr.c 2011-11-16 18:39:08.000000000 -0500
40695@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
40696 old_fs = get_fs();
40697 set_fs(KERNEL_DS);
40698 ret = file->f_op->write(
40699- file, (const void __user *) data, len, &pos);
40700+ file, (const void __force_user *) data, len, &pos);
40701 set_fs(old_fs);
40702 kunmap(page);
40703 if (ret != len)
40704diff -urNp linux-3.1.1/fs/ceph/dir.c linux-3.1.1/fs/ceph/dir.c
40705--- linux-3.1.1/fs/ceph/dir.c 2011-11-11 15:19:27.000000000 -0500
40706+++ linux-3.1.1/fs/ceph/dir.c 2011-11-16 18:39:08.000000000 -0500
40707@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *fil
40708 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
40709 struct ceph_mds_client *mdsc = fsc->mdsc;
40710 unsigned frag = fpos_frag(filp->f_pos);
40711- int off = fpos_off(filp->f_pos);
40712+ unsigned int off = fpos_off(filp->f_pos);
40713 int err;
40714 u32 ftype;
40715 struct ceph_mds_reply_info_parsed *rinfo;
40716diff -urNp linux-3.1.1/fs/cifs/cifs_debug.c linux-3.1.1/fs/cifs/cifs_debug.c
40717--- linux-3.1.1/fs/cifs/cifs_debug.c 2011-11-11 15:19:27.000000000 -0500
40718+++ linux-3.1.1/fs/cifs/cifs_debug.c 2011-11-16 18:39:08.000000000 -0500
40719@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(str
40720
40721 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
40722 #ifdef CONFIG_CIFS_STATS2
40723- atomic_set(&totBufAllocCount, 0);
40724- atomic_set(&totSmBufAllocCount, 0);
40725+ atomic_set_unchecked(&totBufAllocCount, 0);
40726+ atomic_set_unchecked(&totSmBufAllocCount, 0);
40727 #endif /* CONFIG_CIFS_STATS2 */
40728 spin_lock(&cifs_tcp_ses_lock);
40729 list_for_each(tmp1, &cifs_tcp_ses_list) {
40730@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
40731 tcon = list_entry(tmp3,
40732 struct cifs_tcon,
40733 tcon_list);
40734- atomic_set(&tcon->num_smbs_sent, 0);
40735- atomic_set(&tcon->num_writes, 0);
40736- atomic_set(&tcon->num_reads, 0);
40737- atomic_set(&tcon->num_oplock_brks, 0);
40738- atomic_set(&tcon->num_opens, 0);
40739- atomic_set(&tcon->num_posixopens, 0);
40740- atomic_set(&tcon->num_posixmkdirs, 0);
40741- atomic_set(&tcon->num_closes, 0);
40742- atomic_set(&tcon->num_deletes, 0);
40743- atomic_set(&tcon->num_mkdirs, 0);
40744- atomic_set(&tcon->num_rmdirs, 0);
40745- atomic_set(&tcon->num_renames, 0);
40746- atomic_set(&tcon->num_t2renames, 0);
40747- atomic_set(&tcon->num_ffirst, 0);
40748- atomic_set(&tcon->num_fnext, 0);
40749- atomic_set(&tcon->num_fclose, 0);
40750- atomic_set(&tcon->num_hardlinks, 0);
40751- atomic_set(&tcon->num_symlinks, 0);
40752- atomic_set(&tcon->num_locks, 0);
40753+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
40754+ atomic_set_unchecked(&tcon->num_writes, 0);
40755+ atomic_set_unchecked(&tcon->num_reads, 0);
40756+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
40757+ atomic_set_unchecked(&tcon->num_opens, 0);
40758+ atomic_set_unchecked(&tcon->num_posixopens, 0);
40759+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
40760+ atomic_set_unchecked(&tcon->num_closes, 0);
40761+ atomic_set_unchecked(&tcon->num_deletes, 0);
40762+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
40763+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
40764+ atomic_set_unchecked(&tcon->num_renames, 0);
40765+ atomic_set_unchecked(&tcon->num_t2renames, 0);
40766+ atomic_set_unchecked(&tcon->num_ffirst, 0);
40767+ atomic_set_unchecked(&tcon->num_fnext, 0);
40768+ atomic_set_unchecked(&tcon->num_fclose, 0);
40769+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
40770+ atomic_set_unchecked(&tcon->num_symlinks, 0);
40771+ atomic_set_unchecked(&tcon->num_locks, 0);
40772 }
40773 }
40774 }
40775@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct s
40776 smBufAllocCount.counter, cifs_min_small);
40777 #ifdef CONFIG_CIFS_STATS2
40778 seq_printf(m, "Total Large %d Small %d Allocations\n",
40779- atomic_read(&totBufAllocCount),
40780- atomic_read(&totSmBufAllocCount));
40781+ atomic_read_unchecked(&totBufAllocCount),
40782+ atomic_read_unchecked(&totSmBufAllocCount));
40783 #endif /* CONFIG_CIFS_STATS2 */
40784
40785 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
40786@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
40787 if (tcon->need_reconnect)
40788 seq_puts(m, "\tDISCONNECTED ");
40789 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
40790- atomic_read(&tcon->num_smbs_sent),
40791- atomic_read(&tcon->num_oplock_brks));
40792+ atomic_read_unchecked(&tcon->num_smbs_sent),
40793+ atomic_read_unchecked(&tcon->num_oplock_brks));
40794 seq_printf(m, "\nReads: %d Bytes: %lld",
40795- atomic_read(&tcon->num_reads),
40796+ atomic_read_unchecked(&tcon->num_reads),
40797 (long long)(tcon->bytes_read));
40798 seq_printf(m, "\nWrites: %d Bytes: %lld",
40799- atomic_read(&tcon->num_writes),
40800+ atomic_read_unchecked(&tcon->num_writes),
40801 (long long)(tcon->bytes_written));
40802 seq_printf(m, "\nFlushes: %d",
40803- atomic_read(&tcon->num_flushes));
40804+ atomic_read_unchecked(&tcon->num_flushes));
40805 seq_printf(m, "\nLocks: %d HardLinks: %d "
40806 "Symlinks: %d",
40807- atomic_read(&tcon->num_locks),
40808- atomic_read(&tcon->num_hardlinks),
40809- atomic_read(&tcon->num_symlinks));
40810+ atomic_read_unchecked(&tcon->num_locks),
40811+ atomic_read_unchecked(&tcon->num_hardlinks),
40812+ atomic_read_unchecked(&tcon->num_symlinks));
40813 seq_printf(m, "\nOpens: %d Closes: %d "
40814 "Deletes: %d",
40815- atomic_read(&tcon->num_opens),
40816- atomic_read(&tcon->num_closes),
40817- atomic_read(&tcon->num_deletes));
40818+ atomic_read_unchecked(&tcon->num_opens),
40819+ atomic_read_unchecked(&tcon->num_closes),
40820+ atomic_read_unchecked(&tcon->num_deletes));
40821 seq_printf(m, "\nPosix Opens: %d "
40822 "Posix Mkdirs: %d",
40823- atomic_read(&tcon->num_posixopens),
40824- atomic_read(&tcon->num_posixmkdirs));
40825+ atomic_read_unchecked(&tcon->num_posixopens),
40826+ atomic_read_unchecked(&tcon->num_posixmkdirs));
40827 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40828- atomic_read(&tcon->num_mkdirs),
40829- atomic_read(&tcon->num_rmdirs));
40830+ atomic_read_unchecked(&tcon->num_mkdirs),
40831+ atomic_read_unchecked(&tcon->num_rmdirs));
40832 seq_printf(m, "\nRenames: %d T2 Renames %d",
40833- atomic_read(&tcon->num_renames),
40834- atomic_read(&tcon->num_t2renames));
40835+ atomic_read_unchecked(&tcon->num_renames),
40836+ atomic_read_unchecked(&tcon->num_t2renames));
40837 seq_printf(m, "\nFindFirst: %d FNext %d "
40838 "FClose %d",
40839- atomic_read(&tcon->num_ffirst),
40840- atomic_read(&tcon->num_fnext),
40841- atomic_read(&tcon->num_fclose));
40842+ atomic_read_unchecked(&tcon->num_ffirst),
40843+ atomic_read_unchecked(&tcon->num_fnext),
40844+ atomic_read_unchecked(&tcon->num_fclose));
40845 }
40846 }
40847 }
40848diff -urNp linux-3.1.1/fs/cifs/cifsfs.c linux-3.1.1/fs/cifs/cifsfs.c
40849--- linux-3.1.1/fs/cifs/cifsfs.c 2011-11-11 15:19:27.000000000 -0500
40850+++ linux-3.1.1/fs/cifs/cifsfs.c 2011-11-16 18:39:08.000000000 -0500
40851@@ -981,7 +981,7 @@ cifs_init_request_bufs(void)
40852 cifs_req_cachep = kmem_cache_create("cifs_request",
40853 CIFSMaxBufSize +
40854 MAX_CIFS_HDR_SIZE, 0,
40855- SLAB_HWCACHE_ALIGN, NULL);
40856+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
40857 if (cifs_req_cachep == NULL)
40858 return -ENOMEM;
40859
40860@@ -1008,7 +1008,7 @@ cifs_init_request_bufs(void)
40861 efficient to alloc 1 per page off the slab compared to 17K (5page)
40862 alloc of large cifs buffers even when page debugging is on */
40863 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
40864- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
40865+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
40866 NULL);
40867 if (cifs_sm_req_cachep == NULL) {
40868 mempool_destroy(cifs_req_poolp);
40869@@ -1093,8 +1093,8 @@ init_cifs(void)
40870 atomic_set(&bufAllocCount, 0);
40871 atomic_set(&smBufAllocCount, 0);
40872 #ifdef CONFIG_CIFS_STATS2
40873- atomic_set(&totBufAllocCount, 0);
40874- atomic_set(&totSmBufAllocCount, 0);
40875+ atomic_set_unchecked(&totBufAllocCount, 0);
40876+ atomic_set_unchecked(&totSmBufAllocCount, 0);
40877 #endif /* CONFIG_CIFS_STATS2 */
40878
40879 atomic_set(&midCount, 0);
40880diff -urNp linux-3.1.1/fs/cifs/cifsglob.h linux-3.1.1/fs/cifs/cifsglob.h
40881--- linux-3.1.1/fs/cifs/cifsglob.h 2011-11-11 15:19:27.000000000 -0500
40882+++ linux-3.1.1/fs/cifs/cifsglob.h 2011-11-16 18:39:08.000000000 -0500
40883@@ -381,28 +381,28 @@ struct cifs_tcon {
40884 __u16 Flags; /* optional support bits */
40885 enum statusEnum tidStatus;
40886 #ifdef CONFIG_CIFS_STATS
40887- atomic_t num_smbs_sent;
40888- atomic_t num_writes;
40889- atomic_t num_reads;
40890- atomic_t num_flushes;
40891- atomic_t num_oplock_brks;
40892- atomic_t num_opens;
40893- atomic_t num_closes;
40894- atomic_t num_deletes;
40895- atomic_t num_mkdirs;
40896- atomic_t num_posixopens;
40897- atomic_t num_posixmkdirs;
40898- atomic_t num_rmdirs;
40899- atomic_t num_renames;
40900- atomic_t num_t2renames;
40901- atomic_t num_ffirst;
40902- atomic_t num_fnext;
40903- atomic_t num_fclose;
40904- atomic_t num_hardlinks;
40905- atomic_t num_symlinks;
40906- atomic_t num_locks;
40907- atomic_t num_acl_get;
40908- atomic_t num_acl_set;
40909+ atomic_unchecked_t num_smbs_sent;
40910+ atomic_unchecked_t num_writes;
40911+ atomic_unchecked_t num_reads;
40912+ atomic_unchecked_t num_flushes;
40913+ atomic_unchecked_t num_oplock_brks;
40914+ atomic_unchecked_t num_opens;
40915+ atomic_unchecked_t num_closes;
40916+ atomic_unchecked_t num_deletes;
40917+ atomic_unchecked_t num_mkdirs;
40918+ atomic_unchecked_t num_posixopens;
40919+ atomic_unchecked_t num_posixmkdirs;
40920+ atomic_unchecked_t num_rmdirs;
40921+ atomic_unchecked_t num_renames;
40922+ atomic_unchecked_t num_t2renames;
40923+ atomic_unchecked_t num_ffirst;
40924+ atomic_unchecked_t num_fnext;
40925+ atomic_unchecked_t num_fclose;
40926+ atomic_unchecked_t num_hardlinks;
40927+ atomic_unchecked_t num_symlinks;
40928+ atomic_unchecked_t num_locks;
40929+ atomic_unchecked_t num_acl_get;
40930+ atomic_unchecked_t num_acl_set;
40931 #ifdef CONFIG_CIFS_STATS2
40932 unsigned long long time_writes;
40933 unsigned long long time_reads;
40934@@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim
40935 }
40936
40937 #ifdef CONFIG_CIFS_STATS
40938-#define cifs_stats_inc atomic_inc
40939+#define cifs_stats_inc atomic_inc_unchecked
40940
40941 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
40942 unsigned int bytes)
40943@@ -953,8 +953,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
40944 /* Various Debug counters */
40945 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
40946 #ifdef CONFIG_CIFS_STATS2
40947-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
40948-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
40949+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
40950+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
40951 #endif
40952 GLOBAL_EXTERN atomic_t smBufAllocCount;
40953 GLOBAL_EXTERN atomic_t midCount;
40954diff -urNp linux-3.1.1/fs/cifs/link.c linux-3.1.1/fs/cifs/link.c
40955--- linux-3.1.1/fs/cifs/link.c 2011-11-11 15:19:27.000000000 -0500
40956+++ linux-3.1.1/fs/cifs/link.c 2011-11-16 18:39:08.000000000 -0500
40957@@ -593,7 +593,7 @@ symlink_exit:
40958
40959 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
40960 {
40961- char *p = nd_get_link(nd);
40962+ const char *p = nd_get_link(nd);
40963 if (!IS_ERR(p))
40964 kfree(p);
40965 }
40966diff -urNp linux-3.1.1/fs/cifs/misc.c linux-3.1.1/fs/cifs/misc.c
40967--- linux-3.1.1/fs/cifs/misc.c 2011-11-11 15:19:27.000000000 -0500
40968+++ linux-3.1.1/fs/cifs/misc.c 2011-11-16 18:39:08.000000000 -0500
40969@@ -156,7 +156,7 @@ cifs_buf_get(void)
40970 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
40971 atomic_inc(&bufAllocCount);
40972 #ifdef CONFIG_CIFS_STATS2
40973- atomic_inc(&totBufAllocCount);
40974+ atomic_inc_unchecked(&totBufAllocCount);
40975 #endif /* CONFIG_CIFS_STATS2 */
40976 }
40977
40978@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
40979 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
40980 atomic_inc(&smBufAllocCount);
40981 #ifdef CONFIG_CIFS_STATS2
40982- atomic_inc(&totSmBufAllocCount);
40983+ atomic_inc_unchecked(&totSmBufAllocCount);
40984 #endif /* CONFIG_CIFS_STATS2 */
40985
40986 }
40987diff -urNp linux-3.1.1/fs/coda/cache.c linux-3.1.1/fs/coda/cache.c
40988--- linux-3.1.1/fs/coda/cache.c 2011-11-11 15:19:27.000000000 -0500
40989+++ linux-3.1.1/fs/coda/cache.c 2011-11-16 18:39:08.000000000 -0500
40990@@ -24,7 +24,7 @@
40991 #include "coda_linux.h"
40992 #include "coda_cache.h"
40993
40994-static atomic_t permission_epoch = ATOMIC_INIT(0);
40995+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
40996
40997 /* replace or extend an acl cache hit */
40998 void coda_cache_enter(struct inode *inode, int mask)
40999@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
41000 struct coda_inode_info *cii = ITOC(inode);
41001
41002 spin_lock(&cii->c_lock);
41003- cii->c_cached_epoch = atomic_read(&permission_epoch);
41004+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
41005 if (cii->c_uid != current_fsuid()) {
41006 cii->c_uid = current_fsuid();
41007 cii->c_cached_perm = mask;
41008@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
41009 {
41010 struct coda_inode_info *cii = ITOC(inode);
41011 spin_lock(&cii->c_lock);
41012- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
41013+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
41014 spin_unlock(&cii->c_lock);
41015 }
41016
41017 /* remove all acl caches */
41018 void coda_cache_clear_all(struct super_block *sb)
41019 {
41020- atomic_inc(&permission_epoch);
41021+ atomic_inc_unchecked(&permission_epoch);
41022 }
41023
41024
41025@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
41026 spin_lock(&cii->c_lock);
41027 hit = (mask & cii->c_cached_perm) == mask &&
41028 cii->c_uid == current_fsuid() &&
41029- cii->c_cached_epoch == atomic_read(&permission_epoch);
41030+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
41031 spin_unlock(&cii->c_lock);
41032
41033 return hit;
41034diff -urNp linux-3.1.1/fs/compat_binfmt_elf.c linux-3.1.1/fs/compat_binfmt_elf.c
41035--- linux-3.1.1/fs/compat_binfmt_elf.c 2011-11-11 15:19:27.000000000 -0500
41036+++ linux-3.1.1/fs/compat_binfmt_elf.c 2011-11-16 18:39:08.000000000 -0500
41037@@ -30,11 +30,13 @@
41038 #undef elf_phdr
41039 #undef elf_shdr
41040 #undef elf_note
41041+#undef elf_dyn
41042 #undef elf_addr_t
41043 #define elfhdr elf32_hdr
41044 #define elf_phdr elf32_phdr
41045 #define elf_shdr elf32_shdr
41046 #define elf_note elf32_note
41047+#define elf_dyn Elf32_Dyn
41048 #define elf_addr_t Elf32_Addr
41049
41050 /*
41051diff -urNp linux-3.1.1/fs/compat.c linux-3.1.1/fs/compat.c
41052--- linux-3.1.1/fs/compat.c 2011-11-11 15:19:27.000000000 -0500
41053+++ linux-3.1.1/fs/compat.c 2011-11-16 18:40:29.000000000 -0500
41054@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(const
41055 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
41056 {
41057 compat_ino_t ino = stat->ino;
41058- typeof(ubuf->st_uid) uid = 0;
41059- typeof(ubuf->st_gid) gid = 0;
41060+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
41061+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
41062 int err;
41063
41064 SET_UID(uid, stat->uid);
41065@@ -508,7 +508,7 @@ compat_sys_io_setup(unsigned nr_reqs, u3
41066
41067 set_fs(KERNEL_DS);
41068 /* The __user pointer cast is valid because of the set_fs() */
41069- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
41070+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
41071 set_fs(oldfs);
41072 /* truncating is ok because it's a user address */
41073 if (!ret)
41074@@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
41075 goto out;
41076
41077 ret = -EINVAL;
41078- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
41079+ if (nr_segs > UIO_MAXIOV)
41080 goto out;
41081 if (nr_segs > fast_segs) {
41082 ret = -ENOMEM;
41083@@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
41084
41085 struct compat_readdir_callback {
41086 struct compat_old_linux_dirent __user *dirent;
41087+ struct file * file;
41088 int result;
41089 };
41090
41091@@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
41092 buf->result = -EOVERFLOW;
41093 return -EOVERFLOW;
41094 }
41095+
41096+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41097+ return 0;
41098+
41099 buf->result++;
41100 dirent = buf->dirent;
41101 if (!access_ok(VERIFY_WRITE, dirent,
41102@@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
41103
41104 buf.result = 0;
41105 buf.dirent = dirent;
41106+ buf.file = file;
41107
41108 error = vfs_readdir(file, compat_fillonedir, &buf);
41109 if (buf.result)
41110@@ -917,6 +923,7 @@ struct compat_linux_dirent {
41111 struct compat_getdents_callback {
41112 struct compat_linux_dirent __user *current_dir;
41113 struct compat_linux_dirent __user *previous;
41114+ struct file * file;
41115 int count;
41116 int error;
41117 };
41118@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
41119 buf->error = -EOVERFLOW;
41120 return -EOVERFLOW;
41121 }
41122+
41123+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41124+ return 0;
41125+
41126 dirent = buf->previous;
41127 if (dirent) {
41128 if (__put_user(offset, &dirent->d_off))
41129@@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
41130 buf.previous = NULL;
41131 buf.count = count;
41132 buf.error = 0;
41133+ buf.file = file;
41134
41135 error = vfs_readdir(file, compat_filldir, &buf);
41136 if (error >= 0)
41137@@ -1006,6 +1018,7 @@ out:
41138 struct compat_getdents_callback64 {
41139 struct linux_dirent64 __user *current_dir;
41140 struct linux_dirent64 __user *previous;
41141+ struct file * file;
41142 int count;
41143 int error;
41144 };
41145@@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
41146 buf->error = -EINVAL; /* only used if we fail.. */
41147 if (reclen > buf->count)
41148 return -EINVAL;
41149+
41150+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41151+ return 0;
41152+
41153 dirent = buf->previous;
41154
41155 if (dirent) {
41156@@ -1073,13 +1090,14 @@ asmlinkage long compat_sys_getdents64(un
41157 buf.previous = NULL;
41158 buf.count = count;
41159 buf.error = 0;
41160+ buf.file = file;
41161
41162 error = vfs_readdir(file, compat_filldir64, &buf);
41163 if (error >= 0)
41164 error = buf.error;
41165 lastdirent = buf.previous;
41166 if (lastdirent) {
41167- typeof(lastdirent->d_off) d_off = file->f_pos;
41168+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
41169 if (__put_user_unaligned(d_off, &lastdirent->d_off))
41170 error = -EFAULT;
41171 else
41172@@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat
41173 struct fdtable *fdt;
41174 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
41175
41176+ pax_track_stack();
41177+
41178 if (n < 0)
41179 goto out_nofds;
41180
41181diff -urNp linux-3.1.1/fs/compat_ioctl.c linux-3.1.1/fs/compat_ioctl.c
41182--- linux-3.1.1/fs/compat_ioctl.c 2011-11-11 15:19:27.000000000 -0500
41183+++ linux-3.1.1/fs/compat_ioctl.c 2011-11-16 18:39:08.000000000 -0500
41184@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsi
41185
41186 err = get_user(palp, &up->palette);
41187 err |= get_user(length, &up->length);
41188+ if (err)
41189+ return -EFAULT;
41190
41191 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
41192 err = put_user(compat_ptr(palp), &up_native->palette);
41193@@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned
41194 return -EFAULT;
41195 if (__get_user(udata, &ss32->iomem_base))
41196 return -EFAULT;
41197- ss.iomem_base = compat_ptr(udata);
41198+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
41199 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
41200 __get_user(ss.port_high, &ss32->port_high))
41201 return -EFAULT;
41202@@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(stru
41203 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
41204 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
41205 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
41206- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41207+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41208 return -EFAULT;
41209
41210 return ioctl_preallocate(file, p);
41211@@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigne
41212 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
41213 {
41214 unsigned int a, b;
41215- a = *(unsigned int *)p;
41216- b = *(unsigned int *)q;
41217+ a = *(const unsigned int *)p;
41218+ b = *(const unsigned int *)q;
41219 if (a > b)
41220 return 1;
41221 if (a < b)
41222diff -urNp linux-3.1.1/fs/configfs/dir.c linux-3.1.1/fs/configfs/dir.c
41223--- linux-3.1.1/fs/configfs/dir.c 2011-11-11 15:19:27.000000000 -0500
41224+++ linux-3.1.1/fs/configfs/dir.c 2011-11-16 18:39:08.000000000 -0500
41225@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
41226 }
41227 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
41228 struct configfs_dirent *next;
41229- const char * name;
41230+ const unsigned char * name;
41231+ char d_name[sizeof(next->s_dentry->d_iname)];
41232 int len;
41233 struct inode *inode = NULL;
41234
41235@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
41236 continue;
41237
41238 name = configfs_get_name(next);
41239- len = strlen(name);
41240+ if (next->s_dentry && name == next->s_dentry->d_iname) {
41241+ len = next->s_dentry->d_name.len;
41242+ memcpy(d_name, name, len);
41243+ name = d_name;
41244+ } else
41245+ len = strlen(name);
41246
41247 /*
41248 * We'll have a dentry and an inode for
41249diff -urNp linux-3.1.1/fs/dcache.c linux-3.1.1/fs/dcache.c
41250--- linux-3.1.1/fs/dcache.c 2011-11-11 15:19:27.000000000 -0500
41251+++ linux-3.1.1/fs/dcache.c 2011-11-16 18:39:08.000000000 -0500
41252@@ -2998,7 +2998,7 @@ void __init vfs_caches_init(unsigned lon
41253 mempages -= reserve;
41254
41255 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
41256- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
41257+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
41258
41259 dcache_init();
41260 inode_init();
41261diff -urNp linux-3.1.1/fs/ecryptfs/inode.c linux-3.1.1/fs/ecryptfs/inode.c
41262--- linux-3.1.1/fs/ecryptfs/inode.c 2011-11-11 15:19:27.000000000 -0500
41263+++ linux-3.1.1/fs/ecryptfs/inode.c 2011-11-16 18:39:08.000000000 -0500
41264@@ -681,7 +681,7 @@ static int ecryptfs_readlink_lower(struc
41265 old_fs = get_fs();
41266 set_fs(get_ds());
41267 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
41268- (char __user *)lower_buf,
41269+ (char __force_user *)lower_buf,
41270 lower_bufsiz);
41271 set_fs(old_fs);
41272 if (rc < 0)
41273@@ -727,7 +727,7 @@ static void *ecryptfs_follow_link(struct
41274 }
41275 old_fs = get_fs();
41276 set_fs(get_ds());
41277- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
41278+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
41279 set_fs(old_fs);
41280 if (rc < 0) {
41281 kfree(buf);
41282@@ -742,7 +742,7 @@ out:
41283 static void
41284 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
41285 {
41286- char *buf = nd_get_link(nd);
41287+ const char *buf = nd_get_link(nd);
41288 if (!IS_ERR(buf)) {
41289 /* Free the char* */
41290 kfree(buf);
41291diff -urNp linux-3.1.1/fs/ecryptfs/miscdev.c linux-3.1.1/fs/ecryptfs/miscdev.c
41292--- linux-3.1.1/fs/ecryptfs/miscdev.c 2011-11-11 15:19:27.000000000 -0500
41293+++ linux-3.1.1/fs/ecryptfs/miscdev.c 2011-11-16 18:39:08.000000000 -0500
41294@@ -328,7 +328,7 @@ check_list:
41295 goto out_unlock_msg_ctx;
41296 i = 5;
41297 if (msg_ctx->msg) {
41298- if (copy_to_user(&buf[i], packet_length, packet_length_size))
41299+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
41300 goto out_unlock_msg_ctx;
41301 i += packet_length_size;
41302 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
41303diff -urNp linux-3.1.1/fs/ecryptfs/read_write.c linux-3.1.1/fs/ecryptfs/read_write.c
41304--- linux-3.1.1/fs/ecryptfs/read_write.c 2011-11-11 15:19:27.000000000 -0500
41305+++ linux-3.1.1/fs/ecryptfs/read_write.c 2011-11-16 18:39:08.000000000 -0500
41306@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *e
41307 return -EIO;
41308 fs_save = get_fs();
41309 set_fs(get_ds());
41310- rc = vfs_write(lower_file, data, size, &offset);
41311+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
41312 set_fs(fs_save);
41313 mark_inode_dirty_sync(ecryptfs_inode);
41314 return rc;
41315@@ -235,7 +235,7 @@ int ecryptfs_read_lower(char *data, loff
41316 return -EIO;
41317 fs_save = get_fs();
41318 set_fs(get_ds());
41319- rc = vfs_read(lower_file, data, size, &offset);
41320+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
41321 set_fs(fs_save);
41322 return rc;
41323 }
41324diff -urNp linux-3.1.1/fs/exec.c linux-3.1.1/fs/exec.c
41325--- linux-3.1.1/fs/exec.c 2011-11-11 15:19:27.000000000 -0500
41326+++ linux-3.1.1/fs/exec.c 2011-11-17 18:40:47.000000000 -0500
41327@@ -55,12 +55,24 @@
41328 #include <linux/pipe_fs_i.h>
41329 #include <linux/oom.h>
41330 #include <linux/compat.h>
41331+#include <linux/random.h>
41332+#include <linux/seq_file.h>
41333+
41334+#ifdef CONFIG_PAX_REFCOUNT
41335+#include <linux/kallsyms.h>
41336+#include <linux/kdebug.h>
41337+#endif
41338
41339 #include <asm/uaccess.h>
41340 #include <asm/mmu_context.h>
41341 #include <asm/tlb.h>
41342 #include "internal.h"
41343
41344+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
41345+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
41346+EXPORT_SYMBOL(pax_set_initial_flags_func);
41347+#endif
41348+
41349 int core_uses_pid;
41350 char core_pattern[CORENAME_MAX_SIZE] = "core";
41351 unsigned int core_pipe_limit;
41352@@ -70,7 +82,7 @@ struct core_name {
41353 char *corename;
41354 int used, size;
41355 };
41356-static atomic_t call_count = ATOMIC_INIT(1);
41357+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
41358
41359 /* The maximal length of core_pattern is also specified in sysctl.c */
41360
41361@@ -188,18 +200,10 @@ static struct page *get_arg_page(struct
41362 int write)
41363 {
41364 struct page *page;
41365- int ret;
41366
41367-#ifdef CONFIG_STACK_GROWSUP
41368- if (write) {
41369- ret = expand_downwards(bprm->vma, pos);
41370- if (ret < 0)
41371- return NULL;
41372- }
41373-#endif
41374- ret = get_user_pages(current, bprm->mm, pos,
41375- 1, write, 1, &page, NULL);
41376- if (ret <= 0)
41377+ if (0 > expand_downwards(bprm->vma, pos))
41378+ return NULL;
41379+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
41380 return NULL;
41381
41382 if (write) {
41383@@ -274,6 +278,11 @@ static int __bprm_mm_init(struct linux_b
41384 vma->vm_end = STACK_TOP_MAX;
41385 vma->vm_start = vma->vm_end - PAGE_SIZE;
41386 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
41387+
41388+#ifdef CONFIG_PAX_SEGMEXEC
41389+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
41390+#endif
41391+
41392 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
41393 INIT_LIST_HEAD(&vma->anon_vma_chain);
41394
41395@@ -288,6 +297,12 @@ static int __bprm_mm_init(struct linux_b
41396 mm->stack_vm = mm->total_vm = 1;
41397 up_write(&mm->mmap_sem);
41398 bprm->p = vma->vm_end - sizeof(void *);
41399+
41400+#ifdef CONFIG_PAX_RANDUSTACK
41401+ if (randomize_va_space)
41402+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
41403+#endif
41404+
41405 return 0;
41406 err:
41407 up_write(&mm->mmap_sem);
41408@@ -396,19 +411,7 @@ err:
41409 return err;
41410 }
41411
41412-struct user_arg_ptr {
41413-#ifdef CONFIG_COMPAT
41414- bool is_compat;
41415-#endif
41416- union {
41417- const char __user *const __user *native;
41418-#ifdef CONFIG_COMPAT
41419- compat_uptr_t __user *compat;
41420-#endif
41421- } ptr;
41422-};
41423-
41424-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41425+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41426 {
41427 const char __user *native;
41428
41429@@ -417,14 +420,14 @@ static const char __user *get_user_arg_p
41430 compat_uptr_t compat;
41431
41432 if (get_user(compat, argv.ptr.compat + nr))
41433- return ERR_PTR(-EFAULT);
41434+ return (const char __force_user *)ERR_PTR(-EFAULT);
41435
41436 return compat_ptr(compat);
41437 }
41438 #endif
41439
41440 if (get_user(native, argv.ptr.native + nr))
41441- return ERR_PTR(-EFAULT);
41442+ return (const char __force_user *)ERR_PTR(-EFAULT);
41443
41444 return native;
41445 }
41446@@ -443,7 +446,7 @@ static int count(struct user_arg_ptr arg
41447 if (!p)
41448 break;
41449
41450- if (IS_ERR(p))
41451+ if (IS_ERR((const char __force_kernel *)p))
41452 return -EFAULT;
41453
41454 if (i++ >= max)
41455@@ -477,7 +480,7 @@ static int copy_strings(int argc, struct
41456
41457 ret = -EFAULT;
41458 str = get_user_arg_ptr(argv, argc);
41459- if (IS_ERR(str))
41460+ if (IS_ERR((const char __force_kernel *)str))
41461 goto out;
41462
41463 len = strnlen_user(str, MAX_ARG_STRLEN);
41464@@ -559,7 +562,7 @@ int copy_strings_kernel(int argc, const
41465 int r;
41466 mm_segment_t oldfs = get_fs();
41467 struct user_arg_ptr argv = {
41468- .ptr.native = (const char __user *const __user *)__argv,
41469+ .ptr.native = (const char __force_user *const __force_user *)__argv,
41470 };
41471
41472 set_fs(KERNEL_DS);
41473@@ -594,7 +597,8 @@ static int shift_arg_pages(struct vm_are
41474 unsigned long new_end = old_end - shift;
41475 struct mmu_gather tlb;
41476
41477- BUG_ON(new_start > new_end);
41478+ if (new_start >= new_end || new_start < mmap_min_addr)
41479+ return -ENOMEM;
41480
41481 /*
41482 * ensure there are no vmas between where we want to go
41483@@ -603,6 +607,10 @@ static int shift_arg_pages(struct vm_are
41484 if (vma != find_vma(mm, new_start))
41485 return -EFAULT;
41486
41487+#ifdef CONFIG_PAX_SEGMEXEC
41488+ BUG_ON(pax_find_mirror_vma(vma));
41489+#endif
41490+
41491 /*
41492 * cover the whole range: [new_start, old_end)
41493 */
41494@@ -683,10 +691,6 @@ int setup_arg_pages(struct linux_binprm
41495 stack_top = arch_align_stack(stack_top);
41496 stack_top = PAGE_ALIGN(stack_top);
41497
41498- if (unlikely(stack_top < mmap_min_addr) ||
41499- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
41500- return -ENOMEM;
41501-
41502 stack_shift = vma->vm_end - stack_top;
41503
41504 bprm->p -= stack_shift;
41505@@ -698,8 +702,28 @@ int setup_arg_pages(struct linux_binprm
41506 bprm->exec -= stack_shift;
41507
41508 down_write(&mm->mmap_sem);
41509+
41510+ /* Move stack pages down in memory. */
41511+ if (stack_shift) {
41512+ ret = shift_arg_pages(vma, stack_shift);
41513+ if (ret)
41514+ goto out_unlock;
41515+ }
41516+
41517 vm_flags = VM_STACK_FLAGS;
41518
41519+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41520+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41521+ vm_flags &= ~VM_EXEC;
41522+
41523+#ifdef CONFIG_PAX_MPROTECT
41524+ if (mm->pax_flags & MF_PAX_MPROTECT)
41525+ vm_flags &= ~VM_MAYEXEC;
41526+#endif
41527+
41528+ }
41529+#endif
41530+
41531 /*
41532 * Adjust stack execute permissions; explicitly enable for
41533 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
41534@@ -718,13 +742,6 @@ int setup_arg_pages(struct linux_binprm
41535 goto out_unlock;
41536 BUG_ON(prev != vma);
41537
41538- /* Move stack pages down in memory. */
41539- if (stack_shift) {
41540- ret = shift_arg_pages(vma, stack_shift);
41541- if (ret)
41542- goto out_unlock;
41543- }
41544-
41545 /* mprotect_fixup is overkill to remove the temporary stack flags */
41546 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
41547
41548@@ -805,7 +822,7 @@ int kernel_read(struct file *file, loff_
41549 old_fs = get_fs();
41550 set_fs(get_ds());
41551 /* The cast to a user pointer is valid due to the set_fs() */
41552- result = vfs_read(file, (void __user *)addr, count, &pos);
41553+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
41554 set_fs(old_fs);
41555 return result;
41556 }
41557@@ -1251,7 +1268,7 @@ int check_unsafe_exec(struct linux_binpr
41558 }
41559 rcu_read_unlock();
41560
41561- if (p->fs->users > n_fs) {
41562+ if (atomic_read(&p->fs->users) > n_fs) {
41563 bprm->unsafe |= LSM_UNSAFE_SHARE;
41564 } else {
41565 res = -EAGAIN;
41566@@ -1454,6 +1471,11 @@ static int do_execve_common(const char *
41567 struct user_arg_ptr envp,
41568 struct pt_regs *regs)
41569 {
41570+#ifdef CONFIG_GRKERNSEC
41571+ struct file *old_exec_file;
41572+ struct acl_subject_label *old_acl;
41573+ struct rlimit old_rlim[RLIM_NLIMITS];
41574+#endif
41575 struct linux_binprm *bprm;
41576 struct file *file;
41577 struct files_struct *displaced;
41578@@ -1461,6 +1483,8 @@ static int do_execve_common(const char *
41579 int retval;
41580 const struct cred *cred = current_cred();
41581
41582+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
41583+
41584 /*
41585 * We move the actual failure in case of RLIMIT_NPROC excess from
41586 * set*uid() to execve() because too many poorly written programs
41587@@ -1507,6 +1531,16 @@ static int do_execve_common(const char *
41588 bprm->filename = filename;
41589 bprm->interp = filename;
41590
41591+ if (gr_process_user_ban()) {
41592+ retval = -EPERM;
41593+ goto out_file;
41594+ }
41595+
41596+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
41597+ retval = -EACCES;
41598+ goto out_file;
41599+ }
41600+
41601 retval = bprm_mm_init(bprm);
41602 if (retval)
41603 goto out_file;
41604@@ -1536,9 +1570,40 @@ static int do_execve_common(const char *
41605 if (retval < 0)
41606 goto out;
41607
41608+ if (!gr_tpe_allow(file)) {
41609+ retval = -EACCES;
41610+ goto out;
41611+ }
41612+
41613+ if (gr_check_crash_exec(file)) {
41614+ retval = -EACCES;
41615+ goto out;
41616+ }
41617+
41618+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
41619+
41620+ gr_handle_exec_args(bprm, argv);
41621+
41622+#ifdef CONFIG_GRKERNSEC
41623+ old_acl = current->acl;
41624+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
41625+ old_exec_file = current->exec_file;
41626+ get_file(file);
41627+ current->exec_file = file;
41628+#endif
41629+
41630+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
41631+ bprm->unsafe & LSM_UNSAFE_SHARE);
41632+ if (retval < 0)
41633+ goto out_fail;
41634+
41635 retval = search_binary_handler(bprm,regs);
41636 if (retval < 0)
41637- goto out;
41638+ goto out_fail;
41639+#ifdef CONFIG_GRKERNSEC
41640+ if (old_exec_file)
41641+ fput(old_exec_file);
41642+#endif
41643
41644 /* execve succeeded */
41645 current->fs->in_exec = 0;
41646@@ -1549,6 +1614,14 @@ static int do_execve_common(const char *
41647 put_files_struct(displaced);
41648 return retval;
41649
41650+out_fail:
41651+#ifdef CONFIG_GRKERNSEC
41652+ current->acl = old_acl;
41653+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
41654+ fput(current->exec_file);
41655+ current->exec_file = old_exec_file;
41656+#endif
41657+
41658 out:
41659 if (bprm->mm) {
41660 acct_arg_size(bprm, 0);
41661@@ -1622,7 +1695,7 @@ static int expand_corename(struct core_n
41662 {
41663 char *old_corename = cn->corename;
41664
41665- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
41666+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
41667 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
41668
41669 if (!cn->corename) {
41670@@ -1719,7 +1792,7 @@ static int format_corename(struct core_n
41671 int pid_in_pattern = 0;
41672 int err = 0;
41673
41674- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
41675+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
41676 cn->corename = kmalloc(cn->size, GFP_KERNEL);
41677 cn->used = 0;
41678
41679@@ -1816,6 +1889,218 @@ out:
41680 return ispipe;
41681 }
41682
41683+int pax_check_flags(unsigned long *flags)
41684+{
41685+ int retval = 0;
41686+
41687+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
41688+ if (*flags & MF_PAX_SEGMEXEC)
41689+ {
41690+ *flags &= ~MF_PAX_SEGMEXEC;
41691+ retval = -EINVAL;
41692+ }
41693+#endif
41694+
41695+ if ((*flags & MF_PAX_PAGEEXEC)
41696+
41697+#ifdef CONFIG_PAX_PAGEEXEC
41698+ && (*flags & MF_PAX_SEGMEXEC)
41699+#endif
41700+
41701+ )
41702+ {
41703+ *flags &= ~MF_PAX_PAGEEXEC;
41704+ retval = -EINVAL;
41705+ }
41706+
41707+ if ((*flags & MF_PAX_MPROTECT)
41708+
41709+#ifdef CONFIG_PAX_MPROTECT
41710+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41711+#endif
41712+
41713+ )
41714+ {
41715+ *flags &= ~MF_PAX_MPROTECT;
41716+ retval = -EINVAL;
41717+ }
41718+
41719+ if ((*flags & MF_PAX_EMUTRAMP)
41720+
41721+#ifdef CONFIG_PAX_EMUTRAMP
41722+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41723+#endif
41724+
41725+ )
41726+ {
41727+ *flags &= ~MF_PAX_EMUTRAMP;
41728+ retval = -EINVAL;
41729+ }
41730+
41731+ return retval;
41732+}
41733+
41734+EXPORT_SYMBOL(pax_check_flags);
41735+
41736+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41737+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
41738+{
41739+ struct task_struct *tsk = current;
41740+ struct mm_struct *mm = current->mm;
41741+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
41742+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
41743+ char *path_exec = NULL;
41744+ char *path_fault = NULL;
41745+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
41746+
41747+ if (buffer_exec && buffer_fault) {
41748+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
41749+
41750+ down_read(&mm->mmap_sem);
41751+ vma = mm->mmap;
41752+ while (vma && (!vma_exec || !vma_fault)) {
41753+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
41754+ vma_exec = vma;
41755+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
41756+ vma_fault = vma;
41757+ vma = vma->vm_next;
41758+ }
41759+ if (vma_exec) {
41760+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
41761+ if (IS_ERR(path_exec))
41762+ path_exec = "<path too long>";
41763+ else {
41764+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
41765+ if (path_exec) {
41766+ *path_exec = 0;
41767+ path_exec = buffer_exec;
41768+ } else
41769+ path_exec = "<path too long>";
41770+ }
41771+ }
41772+ if (vma_fault) {
41773+ start = vma_fault->vm_start;
41774+ end = vma_fault->vm_end;
41775+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
41776+ if (vma_fault->vm_file) {
41777+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
41778+ if (IS_ERR(path_fault))
41779+ path_fault = "<path too long>";
41780+ else {
41781+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
41782+ if (path_fault) {
41783+ *path_fault = 0;
41784+ path_fault = buffer_fault;
41785+ } else
41786+ path_fault = "<path too long>";
41787+ }
41788+ } else
41789+ path_fault = "<anonymous mapping>";
41790+ }
41791+ up_read(&mm->mmap_sem);
41792+ }
41793+ if (tsk->signal->curr_ip)
41794+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
41795+ else
41796+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
41797+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
41798+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
41799+ task_uid(tsk), task_euid(tsk), pc, sp);
41800+ free_page((unsigned long)buffer_exec);
41801+ free_page((unsigned long)buffer_fault);
41802+ pax_report_insns(regs, pc, sp);
41803+ do_coredump(SIGKILL, SIGKILL, regs);
41804+}
41805+#endif
41806+
41807+#ifdef CONFIG_PAX_REFCOUNT
41808+void pax_report_refcount_overflow(struct pt_regs *regs)
41809+{
41810+ if (current->signal->curr_ip)
41811+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41812+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
41813+ else
41814+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41815+ current->comm, task_pid_nr(current), current_uid(), current_euid());
41816+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
41817+ show_regs(regs);
41818+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
41819+}
41820+#endif
41821+
41822+#ifdef CONFIG_PAX_USERCOPY
41823+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
41824+int object_is_on_stack(const void *obj, unsigned long len)
41825+{
41826+ const void * const stack = task_stack_page(current);
41827+ const void * const stackend = stack + THREAD_SIZE;
41828+
41829+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41830+ const void *frame = NULL;
41831+ const void *oldframe;
41832+#endif
41833+
41834+ if (obj + len < obj)
41835+ return -1;
41836+
41837+ if (obj + len <= stack || stackend <= obj)
41838+ return 0;
41839+
41840+ if (obj < stack || stackend < obj + len)
41841+ return -1;
41842+
41843+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41844+ oldframe = __builtin_frame_address(1);
41845+ if (oldframe)
41846+ frame = __builtin_frame_address(2);
41847+ /*
41848+ low ----------------------------------------------> high
41849+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
41850+ ^----------------^
41851+ allow copies only within here
41852+ */
41853+ while (stack <= frame && frame < stackend) {
41854+ /* if obj + len extends past the last frame, this
41855+ check won't pass and the next frame will be 0,
41856+ causing us to bail out and correctly report
41857+ the copy as invalid
41858+ */
41859+ if (obj + len <= frame)
41860+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
41861+ oldframe = frame;
41862+ frame = *(const void * const *)frame;
41863+ }
41864+ return -1;
41865+#else
41866+ return 1;
41867+#endif
41868+}
41869+
41870+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
41871+{
41872+ if (current->signal->curr_ip)
41873+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41874+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41875+ else
41876+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41877+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41878+ dump_stack();
41879+ gr_handle_kernel_exploit();
41880+ do_group_exit(SIGKILL);
41881+}
41882+#endif
41883+
41884+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
41885+void pax_track_stack(void)
41886+{
41887+ unsigned long sp = (unsigned long)&sp;
41888+ if (sp < current_thread_info()->lowest_stack &&
41889+ sp > (unsigned long)task_stack_page(current))
41890+ current_thread_info()->lowest_stack = sp;
41891+}
41892+EXPORT_SYMBOL(pax_track_stack);
41893+#endif
41894+
41895 static int zap_process(struct task_struct *start, int exit_code)
41896 {
41897 struct task_struct *t;
41898@@ -2027,17 +2312,17 @@ static void wait_for_dump_helpers(struct
41899 pipe = file->f_path.dentry->d_inode->i_pipe;
41900
41901 pipe_lock(pipe);
41902- pipe->readers++;
41903- pipe->writers--;
41904+ atomic_inc(&pipe->readers);
41905+ atomic_dec(&pipe->writers);
41906
41907- while ((pipe->readers > 1) && (!signal_pending(current))) {
41908+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
41909 wake_up_interruptible_sync(&pipe->wait);
41910 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41911 pipe_wait(pipe);
41912 }
41913
41914- pipe->readers--;
41915- pipe->writers++;
41916+ atomic_dec(&pipe->readers);
41917+ atomic_inc(&pipe->writers);
41918 pipe_unlock(pipe);
41919
41920 }
41921@@ -2098,7 +2383,7 @@ void do_coredump(long signr, int exit_co
41922 int retval = 0;
41923 int flag = 0;
41924 int ispipe;
41925- static atomic_t core_dump_count = ATOMIC_INIT(0);
41926+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
41927 struct coredump_params cprm = {
41928 .signr = signr,
41929 .regs = regs,
41930@@ -2113,6 +2398,9 @@ void do_coredump(long signr, int exit_co
41931
41932 audit_core_dumps(signr);
41933
41934+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
41935+ gr_handle_brute_attach(current, cprm.mm_flags);
41936+
41937 binfmt = mm->binfmt;
41938 if (!binfmt || !binfmt->core_dump)
41939 goto fail;
41940@@ -2180,7 +2468,7 @@ void do_coredump(long signr, int exit_co
41941 }
41942 cprm.limit = RLIM_INFINITY;
41943
41944- dump_count = atomic_inc_return(&core_dump_count);
41945+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
41946 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
41947 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
41948 task_tgid_vnr(current), current->comm);
41949@@ -2207,6 +2495,8 @@ void do_coredump(long signr, int exit_co
41950 } else {
41951 struct inode *inode;
41952
41953+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
41954+
41955 if (cprm.limit < binfmt->min_coredump)
41956 goto fail_unlock;
41957
41958@@ -2250,7 +2540,7 @@ close_fail:
41959 filp_close(cprm.file, NULL);
41960 fail_dropcount:
41961 if (ispipe)
41962- atomic_dec(&core_dump_count);
41963+ atomic_dec_unchecked(&core_dump_count);
41964 fail_unlock:
41965 kfree(cn.corename);
41966 fail_corename:
41967@@ -2269,7 +2559,7 @@ fail:
41968 */
41969 int dump_write(struct file *file, const void *addr, int nr)
41970 {
41971- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
41972+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
41973 }
41974 EXPORT_SYMBOL(dump_write);
41975
41976diff -urNp linux-3.1.1/fs/ext2/balloc.c linux-3.1.1/fs/ext2/balloc.c
41977--- linux-3.1.1/fs/ext2/balloc.c 2011-11-11 15:19:27.000000000 -0500
41978+++ linux-3.1.1/fs/ext2/balloc.c 2011-11-16 18:40:29.000000000 -0500
41979@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
41980
41981 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41982 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41983- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41984+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41985 sbi->s_resuid != current_fsuid() &&
41986 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41987 return 0;
41988diff -urNp linux-3.1.1/fs/ext3/balloc.c linux-3.1.1/fs/ext3/balloc.c
41989--- linux-3.1.1/fs/ext3/balloc.c 2011-11-11 15:19:27.000000000 -0500
41990+++ linux-3.1.1/fs/ext3/balloc.c 2011-11-16 18:40:29.000000000 -0500
41991@@ -1446,7 +1446,7 @@ static int ext3_has_free_blocks(struct e
41992
41993 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41994 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41995- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41996+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41997 sbi->s_resuid != current_fsuid() &&
41998 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41999 return 0;
42000diff -urNp linux-3.1.1/fs/ext4/balloc.c linux-3.1.1/fs/ext4/balloc.c
42001--- linux-3.1.1/fs/ext4/balloc.c 2011-11-11 15:19:27.000000000 -0500
42002+++ linux-3.1.1/fs/ext4/balloc.c 2011-11-16 18:40:29.000000000 -0500
42003@@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct e
42004 /* Hm, nope. Are (enough) root reserved blocks available? */
42005 if (sbi->s_resuid == current_fsuid() ||
42006 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
42007- capable(CAP_SYS_RESOURCE) ||
42008- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
42009+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
42010+ capable_nolog(CAP_SYS_RESOURCE)) {
42011
42012 if (free_blocks >= (nblocks + dirty_blocks))
42013 return 1;
42014diff -urNp linux-3.1.1/fs/ext4/ext4.h linux-3.1.1/fs/ext4/ext4.h
42015--- linux-3.1.1/fs/ext4/ext4.h 2011-11-11 15:19:27.000000000 -0500
42016+++ linux-3.1.1/fs/ext4/ext4.h 2011-11-16 18:39:08.000000000 -0500
42017@@ -1180,19 +1180,19 @@ struct ext4_sb_info {
42018 unsigned long s_mb_last_start;
42019
42020 /* stats for buddy allocator */
42021- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
42022- atomic_t s_bal_success; /* we found long enough chunks */
42023- atomic_t s_bal_allocated; /* in blocks */
42024- atomic_t s_bal_ex_scanned; /* total extents scanned */
42025- atomic_t s_bal_goals; /* goal hits */
42026- atomic_t s_bal_breaks; /* too long searches */
42027- atomic_t s_bal_2orders; /* 2^order hits */
42028+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
42029+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
42030+ atomic_unchecked_t s_bal_allocated; /* in blocks */
42031+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
42032+ atomic_unchecked_t s_bal_goals; /* goal hits */
42033+ atomic_unchecked_t s_bal_breaks; /* too long searches */
42034+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
42035 spinlock_t s_bal_lock;
42036 unsigned long s_mb_buddies_generated;
42037 unsigned long long s_mb_generation_time;
42038- atomic_t s_mb_lost_chunks;
42039- atomic_t s_mb_preallocated;
42040- atomic_t s_mb_discarded;
42041+ atomic_unchecked_t s_mb_lost_chunks;
42042+ atomic_unchecked_t s_mb_preallocated;
42043+ atomic_unchecked_t s_mb_discarded;
42044 atomic_t s_lock_busy;
42045
42046 /* locality groups */
42047diff -urNp linux-3.1.1/fs/ext4/file.c linux-3.1.1/fs/ext4/file.c
42048--- linux-3.1.1/fs/ext4/file.c 2011-11-11 15:19:27.000000000 -0500
42049+++ linux-3.1.1/fs/ext4/file.c 2011-11-16 18:40:29.000000000 -0500
42050@@ -181,8 +181,8 @@ static int ext4_file_open(struct inode *
42051 path.dentry = mnt->mnt_root;
42052 cp = d_path(&path, buf, sizeof(buf));
42053 if (!IS_ERR(cp)) {
42054- memcpy(sbi->s_es->s_last_mounted, cp,
42055- sizeof(sbi->s_es->s_last_mounted));
42056+ strlcpy(sbi->s_es->s_last_mounted, cp,
42057+ sizeof(sbi->s_es->s_last_mounted));
42058 ext4_mark_super_dirty(sb);
42059 }
42060 }
42061diff -urNp linux-3.1.1/fs/ext4/ioctl.c linux-3.1.1/fs/ext4/ioctl.c
42062--- linux-3.1.1/fs/ext4/ioctl.c 2011-11-11 15:19:27.000000000 -0500
42063+++ linux-3.1.1/fs/ext4/ioctl.c 2011-11-16 18:39:08.000000000 -0500
42064@@ -348,7 +348,7 @@ mext_out:
42065 if (!blk_queue_discard(q))
42066 return -EOPNOTSUPP;
42067
42068- if (copy_from_user(&range, (struct fstrim_range *)arg,
42069+ if (copy_from_user(&range, (struct fstrim_range __user *)arg,
42070 sizeof(range)))
42071 return -EFAULT;
42072
42073@@ -358,7 +358,7 @@ mext_out:
42074 if (ret < 0)
42075 return ret;
42076
42077- if (copy_to_user((struct fstrim_range *)arg, &range,
42078+ if (copy_to_user((struct fstrim_range __user *)arg, &range,
42079 sizeof(range)))
42080 return -EFAULT;
42081
42082diff -urNp linux-3.1.1/fs/ext4/mballoc.c linux-3.1.1/fs/ext4/mballoc.c
42083--- linux-3.1.1/fs/ext4/mballoc.c 2011-11-11 15:19:27.000000000 -0500
42084+++ linux-3.1.1/fs/ext4/mballoc.c 2011-11-16 18:40:29.000000000 -0500
42085@@ -1795,7 +1795,7 @@ void ext4_mb_simple_scan_group(struct ex
42086 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
42087
42088 if (EXT4_SB(sb)->s_mb_stats)
42089- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
42090+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
42091
42092 break;
42093 }
42094@@ -2089,7 +2089,7 @@ repeat:
42095 ac->ac_status = AC_STATUS_CONTINUE;
42096 ac->ac_flags |= EXT4_MB_HINT_FIRST;
42097 cr = 3;
42098- atomic_inc(&sbi->s_mb_lost_chunks);
42099+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
42100 goto repeat;
42101 }
42102 }
42103@@ -2132,6 +2132,8 @@ static int ext4_mb_seq_groups_show(struc
42104 ext4_grpblk_t counters[16];
42105 } sg;
42106
42107+ pax_track_stack();
42108+
42109 group--;
42110 if (group == 0)
42111 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
42112@@ -2573,25 +2575,25 @@ int ext4_mb_release(struct super_block *
42113 if (sbi->s_mb_stats) {
42114 ext4_msg(sb, KERN_INFO,
42115 "mballoc: %u blocks %u reqs (%u success)",
42116- atomic_read(&sbi->s_bal_allocated),
42117- atomic_read(&sbi->s_bal_reqs),
42118- atomic_read(&sbi->s_bal_success));
42119+ atomic_read_unchecked(&sbi->s_bal_allocated),
42120+ atomic_read_unchecked(&sbi->s_bal_reqs),
42121+ atomic_read_unchecked(&sbi->s_bal_success));
42122 ext4_msg(sb, KERN_INFO,
42123 "mballoc: %u extents scanned, %u goal hits, "
42124 "%u 2^N hits, %u breaks, %u lost",
42125- atomic_read(&sbi->s_bal_ex_scanned),
42126- atomic_read(&sbi->s_bal_goals),
42127- atomic_read(&sbi->s_bal_2orders),
42128- atomic_read(&sbi->s_bal_breaks),
42129- atomic_read(&sbi->s_mb_lost_chunks));
42130+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
42131+ atomic_read_unchecked(&sbi->s_bal_goals),
42132+ atomic_read_unchecked(&sbi->s_bal_2orders),
42133+ atomic_read_unchecked(&sbi->s_bal_breaks),
42134+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
42135 ext4_msg(sb, KERN_INFO,
42136 "mballoc: %lu generated and it took %Lu",
42137 sbi->s_mb_buddies_generated,
42138 sbi->s_mb_generation_time);
42139 ext4_msg(sb, KERN_INFO,
42140 "mballoc: %u preallocated, %u discarded",
42141- atomic_read(&sbi->s_mb_preallocated),
42142- atomic_read(&sbi->s_mb_discarded));
42143+ atomic_read_unchecked(&sbi->s_mb_preallocated),
42144+ atomic_read_unchecked(&sbi->s_mb_discarded));
42145 }
42146
42147 free_percpu(sbi->s_locality_groups);
42148@@ -3070,16 +3072,16 @@ static void ext4_mb_collect_stats(struct
42149 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
42150
42151 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
42152- atomic_inc(&sbi->s_bal_reqs);
42153- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42154+ atomic_inc_unchecked(&sbi->s_bal_reqs);
42155+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42156 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
42157- atomic_inc(&sbi->s_bal_success);
42158- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
42159+ atomic_inc_unchecked(&sbi->s_bal_success);
42160+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
42161 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
42162 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
42163- atomic_inc(&sbi->s_bal_goals);
42164+ atomic_inc_unchecked(&sbi->s_bal_goals);
42165 if (ac->ac_found > sbi->s_mb_max_to_scan)
42166- atomic_inc(&sbi->s_bal_breaks);
42167+ atomic_inc_unchecked(&sbi->s_bal_breaks);
42168 }
42169
42170 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
42171@@ -3477,7 +3479,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
42172 trace_ext4_mb_new_inode_pa(ac, pa);
42173
42174 ext4_mb_use_inode_pa(ac, pa);
42175- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42176+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42177
42178 ei = EXT4_I(ac->ac_inode);
42179 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42180@@ -3537,7 +3539,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
42181 trace_ext4_mb_new_group_pa(ac, pa);
42182
42183 ext4_mb_use_group_pa(ac, pa);
42184- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42185+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42186
42187 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42188 lg = ac->ac_lg;
42189@@ -3625,7 +3627,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
42190 * from the bitmap and continue.
42191 */
42192 }
42193- atomic_add(free, &sbi->s_mb_discarded);
42194+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
42195
42196 return err;
42197 }
42198@@ -3643,7 +3645,7 @@ ext4_mb_release_group_pa(struct ext4_bud
42199 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
42200 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
42201 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
42202- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42203+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42204 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
42205
42206 return 0;
42207diff -urNp linux-3.1.1/fs/fcntl.c linux-3.1.1/fs/fcntl.c
42208--- linux-3.1.1/fs/fcntl.c 2011-11-11 15:19:27.000000000 -0500
42209+++ linux-3.1.1/fs/fcntl.c 2011-11-16 23:40:25.000000000 -0500
42210@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
42211 if (err)
42212 return err;
42213
42214+ if (gr_handle_chroot_fowner(pid, type))
42215+ return -ENOENT;
42216+ if (gr_check_protected_task_fowner(pid, type))
42217+ return -EACCES;
42218+
42219 f_modown(filp, pid, type, force);
42220 return 0;
42221 }
42222@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
42223
42224 static int f_setown_ex(struct file *filp, unsigned long arg)
42225 {
42226- struct f_owner_ex * __user owner_p = (void * __user)arg;
42227+ struct f_owner_ex __user *owner_p = (void __user *)arg;
42228 struct f_owner_ex owner;
42229 struct pid *pid;
42230 int type;
42231@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp
42232
42233 static int f_getown_ex(struct file *filp, unsigned long arg)
42234 {
42235- struct f_owner_ex * __user owner_p = (void * __user)arg;
42236+ struct f_owner_ex __user *owner_p = (void __user *)arg;
42237 struct f_owner_ex owner;
42238 int ret = 0;
42239
42240@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
42241 switch (cmd) {
42242 case F_DUPFD:
42243 case F_DUPFD_CLOEXEC:
42244+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
42245 if (arg >= rlimit(RLIMIT_NOFILE))
42246 break;
42247 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
42248diff -urNp linux-3.1.1/fs/fifo.c linux-3.1.1/fs/fifo.c
42249--- linux-3.1.1/fs/fifo.c 2011-11-11 15:19:27.000000000 -0500
42250+++ linux-3.1.1/fs/fifo.c 2011-11-16 18:39:08.000000000 -0500
42251@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
42252 */
42253 filp->f_op = &read_pipefifo_fops;
42254 pipe->r_counter++;
42255- if (pipe->readers++ == 0)
42256+ if (atomic_inc_return(&pipe->readers) == 1)
42257 wake_up_partner(inode);
42258
42259- if (!pipe->writers) {
42260+ if (!atomic_read(&pipe->writers)) {
42261 if ((filp->f_flags & O_NONBLOCK)) {
42262 /* suppress POLLHUP until we have
42263 * seen a writer */
42264@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
42265 * errno=ENXIO when there is no process reading the FIFO.
42266 */
42267 ret = -ENXIO;
42268- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
42269+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
42270 goto err;
42271
42272 filp->f_op = &write_pipefifo_fops;
42273 pipe->w_counter++;
42274- if (!pipe->writers++)
42275+ if (atomic_inc_return(&pipe->writers) == 1)
42276 wake_up_partner(inode);
42277
42278- if (!pipe->readers) {
42279+ if (!atomic_read(&pipe->readers)) {
42280 wait_for_partner(inode, &pipe->r_counter);
42281 if (signal_pending(current))
42282 goto err_wr;
42283@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
42284 */
42285 filp->f_op = &rdwr_pipefifo_fops;
42286
42287- pipe->readers++;
42288- pipe->writers++;
42289+ atomic_inc(&pipe->readers);
42290+ atomic_inc(&pipe->writers);
42291 pipe->r_counter++;
42292 pipe->w_counter++;
42293- if (pipe->readers == 1 || pipe->writers == 1)
42294+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
42295 wake_up_partner(inode);
42296 break;
42297
42298@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
42299 return 0;
42300
42301 err_rd:
42302- if (!--pipe->readers)
42303+ if (atomic_dec_and_test(&pipe->readers))
42304 wake_up_interruptible(&pipe->wait);
42305 ret = -ERESTARTSYS;
42306 goto err;
42307
42308 err_wr:
42309- if (!--pipe->writers)
42310+ if (atomic_dec_and_test(&pipe->writers))
42311 wake_up_interruptible(&pipe->wait);
42312 ret = -ERESTARTSYS;
42313 goto err;
42314
42315 err:
42316- if (!pipe->readers && !pipe->writers)
42317+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
42318 free_pipe_info(inode);
42319
42320 err_nocleanup:
42321diff -urNp linux-3.1.1/fs/file.c linux-3.1.1/fs/file.c
42322--- linux-3.1.1/fs/file.c 2011-11-11 15:19:27.000000000 -0500
42323+++ linux-3.1.1/fs/file.c 2011-11-16 18:40:29.000000000 -0500
42324@@ -15,6 +15,7 @@
42325 #include <linux/slab.h>
42326 #include <linux/vmalloc.h>
42327 #include <linux/file.h>
42328+#include <linux/security.h>
42329 #include <linux/fdtable.h>
42330 #include <linux/bitops.h>
42331 #include <linux/interrupt.h>
42332@@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
42333 * N.B. For clone tasks sharing a files structure, this test
42334 * will limit the total number of files that can be opened.
42335 */
42336+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
42337 if (nr >= rlimit(RLIMIT_NOFILE))
42338 return -EMFILE;
42339
42340diff -urNp linux-3.1.1/fs/filesystems.c linux-3.1.1/fs/filesystems.c
42341--- linux-3.1.1/fs/filesystems.c 2011-11-11 15:19:27.000000000 -0500
42342+++ linux-3.1.1/fs/filesystems.c 2011-11-16 18:40:29.000000000 -0500
42343@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
42344 int len = dot ? dot - name : strlen(name);
42345
42346 fs = __get_fs_type(name, len);
42347+
42348+#ifdef CONFIG_GRKERNSEC_MODHARDEN
42349+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
42350+#else
42351 if (!fs && (request_module("%.*s", len, name) == 0))
42352+#endif
42353 fs = __get_fs_type(name, len);
42354
42355 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
42356diff -urNp linux-3.1.1/fs/fscache/cookie.c linux-3.1.1/fs/fscache/cookie.c
42357--- linux-3.1.1/fs/fscache/cookie.c 2011-11-11 15:19:27.000000000 -0500
42358+++ linux-3.1.1/fs/fscache/cookie.c 2011-11-16 18:39:08.000000000 -0500
42359@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
42360 parent ? (char *) parent->def->name : "<no-parent>",
42361 def->name, netfs_data);
42362
42363- fscache_stat(&fscache_n_acquires);
42364+ fscache_stat_unchecked(&fscache_n_acquires);
42365
42366 /* if there's no parent cookie, then we don't create one here either */
42367 if (!parent) {
42368- fscache_stat(&fscache_n_acquires_null);
42369+ fscache_stat_unchecked(&fscache_n_acquires_null);
42370 _leave(" [no parent]");
42371 return NULL;
42372 }
42373@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
42374 /* allocate and initialise a cookie */
42375 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
42376 if (!cookie) {
42377- fscache_stat(&fscache_n_acquires_oom);
42378+ fscache_stat_unchecked(&fscache_n_acquires_oom);
42379 _leave(" [ENOMEM]");
42380 return NULL;
42381 }
42382@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
42383
42384 switch (cookie->def->type) {
42385 case FSCACHE_COOKIE_TYPE_INDEX:
42386- fscache_stat(&fscache_n_cookie_index);
42387+ fscache_stat_unchecked(&fscache_n_cookie_index);
42388 break;
42389 case FSCACHE_COOKIE_TYPE_DATAFILE:
42390- fscache_stat(&fscache_n_cookie_data);
42391+ fscache_stat_unchecked(&fscache_n_cookie_data);
42392 break;
42393 default:
42394- fscache_stat(&fscache_n_cookie_special);
42395+ fscache_stat_unchecked(&fscache_n_cookie_special);
42396 break;
42397 }
42398
42399@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
42400 if (fscache_acquire_non_index_cookie(cookie) < 0) {
42401 atomic_dec(&parent->n_children);
42402 __fscache_cookie_put(cookie);
42403- fscache_stat(&fscache_n_acquires_nobufs);
42404+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
42405 _leave(" = NULL");
42406 return NULL;
42407 }
42408 }
42409
42410- fscache_stat(&fscache_n_acquires_ok);
42411+ fscache_stat_unchecked(&fscache_n_acquires_ok);
42412 _leave(" = %p", cookie);
42413 return cookie;
42414 }
42415@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
42416 cache = fscache_select_cache_for_object(cookie->parent);
42417 if (!cache) {
42418 up_read(&fscache_addremove_sem);
42419- fscache_stat(&fscache_n_acquires_no_cache);
42420+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
42421 _leave(" = -ENOMEDIUM [no cache]");
42422 return -ENOMEDIUM;
42423 }
42424@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
42425 object = cache->ops->alloc_object(cache, cookie);
42426 fscache_stat_d(&fscache_n_cop_alloc_object);
42427 if (IS_ERR(object)) {
42428- fscache_stat(&fscache_n_object_no_alloc);
42429+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
42430 ret = PTR_ERR(object);
42431 goto error;
42432 }
42433
42434- fscache_stat(&fscache_n_object_alloc);
42435+ fscache_stat_unchecked(&fscache_n_object_alloc);
42436
42437 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
42438
42439@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
42440 struct fscache_object *object;
42441 struct hlist_node *_p;
42442
42443- fscache_stat(&fscache_n_updates);
42444+ fscache_stat_unchecked(&fscache_n_updates);
42445
42446 if (!cookie) {
42447- fscache_stat(&fscache_n_updates_null);
42448+ fscache_stat_unchecked(&fscache_n_updates_null);
42449 _leave(" [no cookie]");
42450 return;
42451 }
42452@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
42453 struct fscache_object *object;
42454 unsigned long event;
42455
42456- fscache_stat(&fscache_n_relinquishes);
42457+ fscache_stat_unchecked(&fscache_n_relinquishes);
42458 if (retire)
42459- fscache_stat(&fscache_n_relinquishes_retire);
42460+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
42461
42462 if (!cookie) {
42463- fscache_stat(&fscache_n_relinquishes_null);
42464+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
42465 _leave(" [no cookie]");
42466 return;
42467 }
42468@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
42469
42470 /* wait for the cookie to finish being instantiated (or to fail) */
42471 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
42472- fscache_stat(&fscache_n_relinquishes_waitcrt);
42473+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
42474 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
42475 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
42476 }
42477diff -urNp linux-3.1.1/fs/fscache/internal.h linux-3.1.1/fs/fscache/internal.h
42478--- linux-3.1.1/fs/fscache/internal.h 2011-11-11 15:19:27.000000000 -0500
42479+++ linux-3.1.1/fs/fscache/internal.h 2011-11-16 18:39:08.000000000 -0500
42480@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
42481 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
42482 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
42483
42484-extern atomic_t fscache_n_op_pend;
42485-extern atomic_t fscache_n_op_run;
42486-extern atomic_t fscache_n_op_enqueue;
42487-extern atomic_t fscache_n_op_deferred_release;
42488-extern atomic_t fscache_n_op_release;
42489-extern atomic_t fscache_n_op_gc;
42490-extern atomic_t fscache_n_op_cancelled;
42491-extern atomic_t fscache_n_op_rejected;
42492-
42493-extern atomic_t fscache_n_attr_changed;
42494-extern atomic_t fscache_n_attr_changed_ok;
42495-extern atomic_t fscache_n_attr_changed_nobufs;
42496-extern atomic_t fscache_n_attr_changed_nomem;
42497-extern atomic_t fscache_n_attr_changed_calls;
42498-
42499-extern atomic_t fscache_n_allocs;
42500-extern atomic_t fscache_n_allocs_ok;
42501-extern atomic_t fscache_n_allocs_wait;
42502-extern atomic_t fscache_n_allocs_nobufs;
42503-extern atomic_t fscache_n_allocs_intr;
42504-extern atomic_t fscache_n_allocs_object_dead;
42505-extern atomic_t fscache_n_alloc_ops;
42506-extern atomic_t fscache_n_alloc_op_waits;
42507-
42508-extern atomic_t fscache_n_retrievals;
42509-extern atomic_t fscache_n_retrievals_ok;
42510-extern atomic_t fscache_n_retrievals_wait;
42511-extern atomic_t fscache_n_retrievals_nodata;
42512-extern atomic_t fscache_n_retrievals_nobufs;
42513-extern atomic_t fscache_n_retrievals_intr;
42514-extern atomic_t fscache_n_retrievals_nomem;
42515-extern atomic_t fscache_n_retrievals_object_dead;
42516-extern atomic_t fscache_n_retrieval_ops;
42517-extern atomic_t fscache_n_retrieval_op_waits;
42518-
42519-extern atomic_t fscache_n_stores;
42520-extern atomic_t fscache_n_stores_ok;
42521-extern atomic_t fscache_n_stores_again;
42522-extern atomic_t fscache_n_stores_nobufs;
42523-extern atomic_t fscache_n_stores_oom;
42524-extern atomic_t fscache_n_store_ops;
42525-extern atomic_t fscache_n_store_calls;
42526-extern atomic_t fscache_n_store_pages;
42527-extern atomic_t fscache_n_store_radix_deletes;
42528-extern atomic_t fscache_n_store_pages_over_limit;
42529-
42530-extern atomic_t fscache_n_store_vmscan_not_storing;
42531-extern atomic_t fscache_n_store_vmscan_gone;
42532-extern atomic_t fscache_n_store_vmscan_busy;
42533-extern atomic_t fscache_n_store_vmscan_cancelled;
42534-
42535-extern atomic_t fscache_n_marks;
42536-extern atomic_t fscache_n_uncaches;
42537-
42538-extern atomic_t fscache_n_acquires;
42539-extern atomic_t fscache_n_acquires_null;
42540-extern atomic_t fscache_n_acquires_no_cache;
42541-extern atomic_t fscache_n_acquires_ok;
42542-extern atomic_t fscache_n_acquires_nobufs;
42543-extern atomic_t fscache_n_acquires_oom;
42544-
42545-extern atomic_t fscache_n_updates;
42546-extern atomic_t fscache_n_updates_null;
42547-extern atomic_t fscache_n_updates_run;
42548-
42549-extern atomic_t fscache_n_relinquishes;
42550-extern atomic_t fscache_n_relinquishes_null;
42551-extern atomic_t fscache_n_relinquishes_waitcrt;
42552-extern atomic_t fscache_n_relinquishes_retire;
42553-
42554-extern atomic_t fscache_n_cookie_index;
42555-extern atomic_t fscache_n_cookie_data;
42556-extern atomic_t fscache_n_cookie_special;
42557-
42558-extern atomic_t fscache_n_object_alloc;
42559-extern atomic_t fscache_n_object_no_alloc;
42560-extern atomic_t fscache_n_object_lookups;
42561-extern atomic_t fscache_n_object_lookups_negative;
42562-extern atomic_t fscache_n_object_lookups_positive;
42563-extern atomic_t fscache_n_object_lookups_timed_out;
42564-extern atomic_t fscache_n_object_created;
42565-extern atomic_t fscache_n_object_avail;
42566-extern atomic_t fscache_n_object_dead;
42567-
42568-extern atomic_t fscache_n_checkaux_none;
42569-extern atomic_t fscache_n_checkaux_okay;
42570-extern atomic_t fscache_n_checkaux_update;
42571-extern atomic_t fscache_n_checkaux_obsolete;
42572+extern atomic_unchecked_t fscache_n_op_pend;
42573+extern atomic_unchecked_t fscache_n_op_run;
42574+extern atomic_unchecked_t fscache_n_op_enqueue;
42575+extern atomic_unchecked_t fscache_n_op_deferred_release;
42576+extern atomic_unchecked_t fscache_n_op_release;
42577+extern atomic_unchecked_t fscache_n_op_gc;
42578+extern atomic_unchecked_t fscache_n_op_cancelled;
42579+extern atomic_unchecked_t fscache_n_op_rejected;
42580+
42581+extern atomic_unchecked_t fscache_n_attr_changed;
42582+extern atomic_unchecked_t fscache_n_attr_changed_ok;
42583+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
42584+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
42585+extern atomic_unchecked_t fscache_n_attr_changed_calls;
42586+
42587+extern atomic_unchecked_t fscache_n_allocs;
42588+extern atomic_unchecked_t fscache_n_allocs_ok;
42589+extern atomic_unchecked_t fscache_n_allocs_wait;
42590+extern atomic_unchecked_t fscache_n_allocs_nobufs;
42591+extern atomic_unchecked_t fscache_n_allocs_intr;
42592+extern atomic_unchecked_t fscache_n_allocs_object_dead;
42593+extern atomic_unchecked_t fscache_n_alloc_ops;
42594+extern atomic_unchecked_t fscache_n_alloc_op_waits;
42595+
42596+extern atomic_unchecked_t fscache_n_retrievals;
42597+extern atomic_unchecked_t fscache_n_retrievals_ok;
42598+extern atomic_unchecked_t fscache_n_retrievals_wait;
42599+extern atomic_unchecked_t fscache_n_retrievals_nodata;
42600+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
42601+extern atomic_unchecked_t fscache_n_retrievals_intr;
42602+extern atomic_unchecked_t fscache_n_retrievals_nomem;
42603+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
42604+extern atomic_unchecked_t fscache_n_retrieval_ops;
42605+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
42606+
42607+extern atomic_unchecked_t fscache_n_stores;
42608+extern atomic_unchecked_t fscache_n_stores_ok;
42609+extern atomic_unchecked_t fscache_n_stores_again;
42610+extern atomic_unchecked_t fscache_n_stores_nobufs;
42611+extern atomic_unchecked_t fscache_n_stores_oom;
42612+extern atomic_unchecked_t fscache_n_store_ops;
42613+extern atomic_unchecked_t fscache_n_store_calls;
42614+extern atomic_unchecked_t fscache_n_store_pages;
42615+extern atomic_unchecked_t fscache_n_store_radix_deletes;
42616+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
42617+
42618+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42619+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
42620+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
42621+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42622+
42623+extern atomic_unchecked_t fscache_n_marks;
42624+extern atomic_unchecked_t fscache_n_uncaches;
42625+
42626+extern atomic_unchecked_t fscache_n_acquires;
42627+extern atomic_unchecked_t fscache_n_acquires_null;
42628+extern atomic_unchecked_t fscache_n_acquires_no_cache;
42629+extern atomic_unchecked_t fscache_n_acquires_ok;
42630+extern atomic_unchecked_t fscache_n_acquires_nobufs;
42631+extern atomic_unchecked_t fscache_n_acquires_oom;
42632+
42633+extern atomic_unchecked_t fscache_n_updates;
42634+extern atomic_unchecked_t fscache_n_updates_null;
42635+extern atomic_unchecked_t fscache_n_updates_run;
42636+
42637+extern atomic_unchecked_t fscache_n_relinquishes;
42638+extern atomic_unchecked_t fscache_n_relinquishes_null;
42639+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42640+extern atomic_unchecked_t fscache_n_relinquishes_retire;
42641+
42642+extern atomic_unchecked_t fscache_n_cookie_index;
42643+extern atomic_unchecked_t fscache_n_cookie_data;
42644+extern atomic_unchecked_t fscache_n_cookie_special;
42645+
42646+extern atomic_unchecked_t fscache_n_object_alloc;
42647+extern atomic_unchecked_t fscache_n_object_no_alloc;
42648+extern atomic_unchecked_t fscache_n_object_lookups;
42649+extern atomic_unchecked_t fscache_n_object_lookups_negative;
42650+extern atomic_unchecked_t fscache_n_object_lookups_positive;
42651+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
42652+extern atomic_unchecked_t fscache_n_object_created;
42653+extern atomic_unchecked_t fscache_n_object_avail;
42654+extern atomic_unchecked_t fscache_n_object_dead;
42655+
42656+extern atomic_unchecked_t fscache_n_checkaux_none;
42657+extern atomic_unchecked_t fscache_n_checkaux_okay;
42658+extern atomic_unchecked_t fscache_n_checkaux_update;
42659+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
42660
42661 extern atomic_t fscache_n_cop_alloc_object;
42662 extern atomic_t fscache_n_cop_lookup_object;
42663@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
42664 atomic_inc(stat);
42665 }
42666
42667+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
42668+{
42669+ atomic_inc_unchecked(stat);
42670+}
42671+
42672 static inline void fscache_stat_d(atomic_t *stat)
42673 {
42674 atomic_dec(stat);
42675@@ -267,6 +272,7 @@ extern const struct file_operations fsca
42676
42677 #define __fscache_stat(stat) (NULL)
42678 #define fscache_stat(stat) do {} while (0)
42679+#define fscache_stat_unchecked(stat) do {} while (0)
42680 #define fscache_stat_d(stat) do {} while (0)
42681 #endif
42682
42683diff -urNp linux-3.1.1/fs/fscache/object.c linux-3.1.1/fs/fscache/object.c
42684--- linux-3.1.1/fs/fscache/object.c 2011-11-11 15:19:27.000000000 -0500
42685+++ linux-3.1.1/fs/fscache/object.c 2011-11-16 18:39:08.000000000 -0500
42686@@ -128,7 +128,7 @@ static void fscache_object_state_machine
42687 /* update the object metadata on disk */
42688 case FSCACHE_OBJECT_UPDATING:
42689 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
42690- fscache_stat(&fscache_n_updates_run);
42691+ fscache_stat_unchecked(&fscache_n_updates_run);
42692 fscache_stat(&fscache_n_cop_update_object);
42693 object->cache->ops->update_object(object);
42694 fscache_stat_d(&fscache_n_cop_update_object);
42695@@ -217,7 +217,7 @@ static void fscache_object_state_machine
42696 spin_lock(&object->lock);
42697 object->state = FSCACHE_OBJECT_DEAD;
42698 spin_unlock(&object->lock);
42699- fscache_stat(&fscache_n_object_dead);
42700+ fscache_stat_unchecked(&fscache_n_object_dead);
42701 goto terminal_transit;
42702
42703 /* handle the parent cache of this object being withdrawn from
42704@@ -232,7 +232,7 @@ static void fscache_object_state_machine
42705 spin_lock(&object->lock);
42706 object->state = FSCACHE_OBJECT_DEAD;
42707 spin_unlock(&object->lock);
42708- fscache_stat(&fscache_n_object_dead);
42709+ fscache_stat_unchecked(&fscache_n_object_dead);
42710 goto terminal_transit;
42711
42712 /* complain about the object being woken up once it is
42713@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
42714 parent->cookie->def->name, cookie->def->name,
42715 object->cache->tag->name);
42716
42717- fscache_stat(&fscache_n_object_lookups);
42718+ fscache_stat_unchecked(&fscache_n_object_lookups);
42719 fscache_stat(&fscache_n_cop_lookup_object);
42720 ret = object->cache->ops->lookup_object(object);
42721 fscache_stat_d(&fscache_n_cop_lookup_object);
42722@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
42723 if (ret == -ETIMEDOUT) {
42724 /* probably stuck behind another object, so move this one to
42725 * the back of the queue */
42726- fscache_stat(&fscache_n_object_lookups_timed_out);
42727+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
42728 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42729 }
42730
42731@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
42732
42733 spin_lock(&object->lock);
42734 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42735- fscache_stat(&fscache_n_object_lookups_negative);
42736+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
42737
42738 /* transit here to allow write requests to begin stacking up
42739 * and read requests to begin returning ENODATA */
42740@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
42741 * result, in which case there may be data available */
42742 spin_lock(&object->lock);
42743 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42744- fscache_stat(&fscache_n_object_lookups_positive);
42745+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
42746
42747 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
42748
42749@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
42750 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42751 } else {
42752 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
42753- fscache_stat(&fscache_n_object_created);
42754+ fscache_stat_unchecked(&fscache_n_object_created);
42755
42756 object->state = FSCACHE_OBJECT_AVAILABLE;
42757 spin_unlock(&object->lock);
42758@@ -602,7 +602,7 @@ static void fscache_object_available(str
42759 fscache_enqueue_dependents(object);
42760
42761 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
42762- fscache_stat(&fscache_n_object_avail);
42763+ fscache_stat_unchecked(&fscache_n_object_avail);
42764
42765 _leave("");
42766 }
42767@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
42768 enum fscache_checkaux result;
42769
42770 if (!object->cookie->def->check_aux) {
42771- fscache_stat(&fscache_n_checkaux_none);
42772+ fscache_stat_unchecked(&fscache_n_checkaux_none);
42773 return FSCACHE_CHECKAUX_OKAY;
42774 }
42775
42776@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
42777 switch (result) {
42778 /* entry okay as is */
42779 case FSCACHE_CHECKAUX_OKAY:
42780- fscache_stat(&fscache_n_checkaux_okay);
42781+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
42782 break;
42783
42784 /* entry requires update */
42785 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
42786- fscache_stat(&fscache_n_checkaux_update);
42787+ fscache_stat_unchecked(&fscache_n_checkaux_update);
42788 break;
42789
42790 /* entry requires deletion */
42791 case FSCACHE_CHECKAUX_OBSOLETE:
42792- fscache_stat(&fscache_n_checkaux_obsolete);
42793+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
42794 break;
42795
42796 default:
42797diff -urNp linux-3.1.1/fs/fscache/operation.c linux-3.1.1/fs/fscache/operation.c
42798--- linux-3.1.1/fs/fscache/operation.c 2011-11-11 15:19:27.000000000 -0500
42799+++ linux-3.1.1/fs/fscache/operation.c 2011-11-16 18:39:08.000000000 -0500
42800@@ -17,7 +17,7 @@
42801 #include <linux/slab.h>
42802 #include "internal.h"
42803
42804-atomic_t fscache_op_debug_id;
42805+atomic_unchecked_t fscache_op_debug_id;
42806 EXPORT_SYMBOL(fscache_op_debug_id);
42807
42808 /**
42809@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fs
42810 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
42811 ASSERTCMP(atomic_read(&op->usage), >, 0);
42812
42813- fscache_stat(&fscache_n_op_enqueue);
42814+ fscache_stat_unchecked(&fscache_n_op_enqueue);
42815 switch (op->flags & FSCACHE_OP_TYPE) {
42816 case FSCACHE_OP_ASYNC:
42817 _debug("queue async");
42818@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscach
42819 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
42820 if (op->processor)
42821 fscache_enqueue_operation(op);
42822- fscache_stat(&fscache_n_op_run);
42823+ fscache_stat_unchecked(&fscache_n_op_run);
42824 }
42825
42826 /*
42827@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct f
42828 if (object->n_ops > 1) {
42829 atomic_inc(&op->usage);
42830 list_add_tail(&op->pend_link, &object->pending_ops);
42831- fscache_stat(&fscache_n_op_pend);
42832+ fscache_stat_unchecked(&fscache_n_op_pend);
42833 } else if (!list_empty(&object->pending_ops)) {
42834 atomic_inc(&op->usage);
42835 list_add_tail(&op->pend_link, &object->pending_ops);
42836- fscache_stat(&fscache_n_op_pend);
42837+ fscache_stat_unchecked(&fscache_n_op_pend);
42838 fscache_start_operations(object);
42839 } else {
42840 ASSERTCMP(object->n_in_progress, ==, 0);
42841@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct f
42842 object->n_exclusive++; /* reads and writes must wait */
42843 atomic_inc(&op->usage);
42844 list_add_tail(&op->pend_link, &object->pending_ops);
42845- fscache_stat(&fscache_n_op_pend);
42846+ fscache_stat_unchecked(&fscache_n_op_pend);
42847 ret = 0;
42848 } else {
42849 /* not allowed to submit ops in any other state */
42850@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_obj
42851 if (object->n_exclusive > 0) {
42852 atomic_inc(&op->usage);
42853 list_add_tail(&op->pend_link, &object->pending_ops);
42854- fscache_stat(&fscache_n_op_pend);
42855+ fscache_stat_unchecked(&fscache_n_op_pend);
42856 } else if (!list_empty(&object->pending_ops)) {
42857 atomic_inc(&op->usage);
42858 list_add_tail(&op->pend_link, &object->pending_ops);
42859- fscache_stat(&fscache_n_op_pend);
42860+ fscache_stat_unchecked(&fscache_n_op_pend);
42861 fscache_start_operations(object);
42862 } else {
42863 ASSERTCMP(object->n_exclusive, ==, 0);
42864@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_obj
42865 object->n_ops++;
42866 atomic_inc(&op->usage);
42867 list_add_tail(&op->pend_link, &object->pending_ops);
42868- fscache_stat(&fscache_n_op_pend);
42869+ fscache_stat_unchecked(&fscache_n_op_pend);
42870 ret = 0;
42871 } else if (object->state == FSCACHE_OBJECT_DYING ||
42872 object->state == FSCACHE_OBJECT_LC_DYING ||
42873 object->state == FSCACHE_OBJECT_WITHDRAWING) {
42874- fscache_stat(&fscache_n_op_rejected);
42875+ fscache_stat_unchecked(&fscache_n_op_rejected);
42876 ret = -ENOBUFS;
42877 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
42878 fscache_report_unexpected_submission(object, op, ostate);
42879@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_ope
42880
42881 ret = -EBUSY;
42882 if (!list_empty(&op->pend_link)) {
42883- fscache_stat(&fscache_n_op_cancelled);
42884+ fscache_stat_unchecked(&fscache_n_op_cancelled);
42885 list_del_init(&op->pend_link);
42886 object->n_ops--;
42887 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
42888@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscach
42889 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
42890 BUG();
42891
42892- fscache_stat(&fscache_n_op_release);
42893+ fscache_stat_unchecked(&fscache_n_op_release);
42894
42895 if (op->release) {
42896 op->release(op);
42897@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscach
42898 * lock, and defer it otherwise */
42899 if (!spin_trylock(&object->lock)) {
42900 _debug("defer put");
42901- fscache_stat(&fscache_n_op_deferred_release);
42902+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
42903
42904 cache = object->cache;
42905 spin_lock(&cache->op_gc_list_lock);
42906@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_st
42907
42908 _debug("GC DEFERRED REL OBJ%x OP%x",
42909 object->debug_id, op->debug_id);
42910- fscache_stat(&fscache_n_op_gc);
42911+ fscache_stat_unchecked(&fscache_n_op_gc);
42912
42913 ASSERTCMP(atomic_read(&op->usage), ==, 0);
42914
42915diff -urNp linux-3.1.1/fs/fscache/page.c linux-3.1.1/fs/fscache/page.c
42916--- linux-3.1.1/fs/fscache/page.c 2011-11-11 15:19:27.000000000 -0500
42917+++ linux-3.1.1/fs/fscache/page.c 2011-11-16 18:39:08.000000000 -0500
42918@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
42919 val = radix_tree_lookup(&cookie->stores, page->index);
42920 if (!val) {
42921 rcu_read_unlock();
42922- fscache_stat(&fscache_n_store_vmscan_not_storing);
42923+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
42924 __fscache_uncache_page(cookie, page);
42925 return true;
42926 }
42927@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
42928 spin_unlock(&cookie->stores_lock);
42929
42930 if (xpage) {
42931- fscache_stat(&fscache_n_store_vmscan_cancelled);
42932- fscache_stat(&fscache_n_store_radix_deletes);
42933+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
42934+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42935 ASSERTCMP(xpage, ==, page);
42936 } else {
42937- fscache_stat(&fscache_n_store_vmscan_gone);
42938+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
42939 }
42940
42941 wake_up_bit(&cookie->flags, 0);
42942@@ -107,7 +107,7 @@ page_busy:
42943 /* we might want to wait here, but that could deadlock the allocator as
42944 * the work threads writing to the cache may all end up sleeping
42945 * on memory allocation */
42946- fscache_stat(&fscache_n_store_vmscan_busy);
42947+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
42948 return false;
42949 }
42950 EXPORT_SYMBOL(__fscache_maybe_release_page);
42951@@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
42952 FSCACHE_COOKIE_STORING_TAG);
42953 if (!radix_tree_tag_get(&cookie->stores, page->index,
42954 FSCACHE_COOKIE_PENDING_TAG)) {
42955- fscache_stat(&fscache_n_store_radix_deletes);
42956+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42957 xpage = radix_tree_delete(&cookie->stores, page->index);
42958 }
42959 spin_unlock(&cookie->stores_lock);
42960@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
42961
42962 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
42963
42964- fscache_stat(&fscache_n_attr_changed_calls);
42965+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
42966
42967 if (fscache_object_is_active(object)) {
42968 fscache_stat(&fscache_n_cop_attr_changed);
42969@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscach
42970
42971 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42972
42973- fscache_stat(&fscache_n_attr_changed);
42974+ fscache_stat_unchecked(&fscache_n_attr_changed);
42975
42976 op = kzalloc(sizeof(*op), GFP_KERNEL);
42977 if (!op) {
42978- fscache_stat(&fscache_n_attr_changed_nomem);
42979+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
42980 _leave(" = -ENOMEM");
42981 return -ENOMEM;
42982 }
42983@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscach
42984 if (fscache_submit_exclusive_op(object, op) < 0)
42985 goto nobufs;
42986 spin_unlock(&cookie->lock);
42987- fscache_stat(&fscache_n_attr_changed_ok);
42988+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
42989 fscache_put_operation(op);
42990 _leave(" = 0");
42991 return 0;
42992@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscach
42993 nobufs:
42994 spin_unlock(&cookie->lock);
42995 kfree(op);
42996- fscache_stat(&fscache_n_attr_changed_nobufs);
42997+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
42998 _leave(" = %d", -ENOBUFS);
42999 return -ENOBUFS;
43000 }
43001@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache
43002 /* allocate a retrieval operation and attempt to submit it */
43003 op = kzalloc(sizeof(*op), GFP_NOIO);
43004 if (!op) {
43005- fscache_stat(&fscache_n_retrievals_nomem);
43006+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43007 return NULL;
43008 }
43009
43010@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_loo
43011 return 0;
43012 }
43013
43014- fscache_stat(&fscache_n_retrievals_wait);
43015+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
43016
43017 jif = jiffies;
43018 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
43019 fscache_wait_bit_interruptible,
43020 TASK_INTERRUPTIBLE) != 0) {
43021- fscache_stat(&fscache_n_retrievals_intr);
43022+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43023 _leave(" = -ERESTARTSYS");
43024 return -ERESTARTSYS;
43025 }
43026@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_loo
43027 */
43028 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43029 struct fscache_retrieval *op,
43030- atomic_t *stat_op_waits,
43031- atomic_t *stat_object_dead)
43032+ atomic_unchecked_t *stat_op_waits,
43033+ atomic_unchecked_t *stat_object_dead)
43034 {
43035 int ret;
43036
43037@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_ac
43038 goto check_if_dead;
43039
43040 _debug(">>> WT");
43041- fscache_stat(stat_op_waits);
43042+ fscache_stat_unchecked(stat_op_waits);
43043 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
43044 fscache_wait_bit_interruptible,
43045 TASK_INTERRUPTIBLE) < 0) {
43046@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_ac
43047
43048 check_if_dead:
43049 if (unlikely(fscache_object_is_dead(object))) {
43050- fscache_stat(stat_object_dead);
43051+ fscache_stat_unchecked(stat_object_dead);
43052 return -ENOBUFS;
43053 }
43054 return 0;
43055@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct
43056
43057 _enter("%p,%p,,,", cookie, page);
43058
43059- fscache_stat(&fscache_n_retrievals);
43060+ fscache_stat_unchecked(&fscache_n_retrievals);
43061
43062 if (hlist_empty(&cookie->backing_objects))
43063 goto nobufs;
43064@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct
43065 goto nobufs_unlock;
43066 spin_unlock(&cookie->lock);
43067
43068- fscache_stat(&fscache_n_retrieval_ops);
43069+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
43070
43071 /* pin the netfs read context in case we need to do the actual netfs
43072 * read because we've encountered a cache read failure */
43073@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct
43074
43075 error:
43076 if (ret == -ENOMEM)
43077- fscache_stat(&fscache_n_retrievals_nomem);
43078+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43079 else if (ret == -ERESTARTSYS)
43080- fscache_stat(&fscache_n_retrievals_intr);
43081+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43082 else if (ret == -ENODATA)
43083- fscache_stat(&fscache_n_retrievals_nodata);
43084+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43085 else if (ret < 0)
43086- fscache_stat(&fscache_n_retrievals_nobufs);
43087+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43088 else
43089- fscache_stat(&fscache_n_retrievals_ok);
43090+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
43091
43092 fscache_put_retrieval(op);
43093 _leave(" = %d", ret);
43094@@ -429,7 +429,7 @@ nobufs_unlock:
43095 spin_unlock(&cookie->lock);
43096 kfree(op);
43097 nobufs:
43098- fscache_stat(&fscache_n_retrievals_nobufs);
43099+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43100 _leave(" = -ENOBUFS");
43101 return -ENOBUFS;
43102 }
43103@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct
43104
43105 _enter("%p,,%d,,,", cookie, *nr_pages);
43106
43107- fscache_stat(&fscache_n_retrievals);
43108+ fscache_stat_unchecked(&fscache_n_retrievals);
43109
43110 if (hlist_empty(&cookie->backing_objects))
43111 goto nobufs;
43112@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct
43113 goto nobufs_unlock;
43114 spin_unlock(&cookie->lock);
43115
43116- fscache_stat(&fscache_n_retrieval_ops);
43117+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
43118
43119 /* pin the netfs read context in case we need to do the actual netfs
43120 * read because we've encountered a cache read failure */
43121@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct
43122
43123 error:
43124 if (ret == -ENOMEM)
43125- fscache_stat(&fscache_n_retrievals_nomem);
43126+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43127 else if (ret == -ERESTARTSYS)
43128- fscache_stat(&fscache_n_retrievals_intr);
43129+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43130 else if (ret == -ENODATA)
43131- fscache_stat(&fscache_n_retrievals_nodata);
43132+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43133 else if (ret < 0)
43134- fscache_stat(&fscache_n_retrievals_nobufs);
43135+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43136 else
43137- fscache_stat(&fscache_n_retrievals_ok);
43138+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
43139
43140 fscache_put_retrieval(op);
43141 _leave(" = %d", ret);
43142@@ -545,7 +545,7 @@ nobufs_unlock:
43143 spin_unlock(&cookie->lock);
43144 kfree(op);
43145 nobufs:
43146- fscache_stat(&fscache_n_retrievals_nobufs);
43147+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43148 _leave(" = -ENOBUFS");
43149 return -ENOBUFS;
43150 }
43151@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_
43152
43153 _enter("%p,%p,,,", cookie, page);
43154
43155- fscache_stat(&fscache_n_allocs);
43156+ fscache_stat_unchecked(&fscache_n_allocs);
43157
43158 if (hlist_empty(&cookie->backing_objects))
43159 goto nobufs;
43160@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_
43161 goto nobufs_unlock;
43162 spin_unlock(&cookie->lock);
43163
43164- fscache_stat(&fscache_n_alloc_ops);
43165+ fscache_stat_unchecked(&fscache_n_alloc_ops);
43166
43167 ret = fscache_wait_for_retrieval_activation(
43168 object, op,
43169@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_
43170
43171 error:
43172 if (ret == -ERESTARTSYS)
43173- fscache_stat(&fscache_n_allocs_intr);
43174+ fscache_stat_unchecked(&fscache_n_allocs_intr);
43175 else if (ret < 0)
43176- fscache_stat(&fscache_n_allocs_nobufs);
43177+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43178 else
43179- fscache_stat(&fscache_n_allocs_ok);
43180+ fscache_stat_unchecked(&fscache_n_allocs_ok);
43181
43182 fscache_put_retrieval(op);
43183 _leave(" = %d", ret);
43184@@ -625,7 +625,7 @@ nobufs_unlock:
43185 spin_unlock(&cookie->lock);
43186 kfree(op);
43187 nobufs:
43188- fscache_stat(&fscache_n_allocs_nobufs);
43189+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43190 _leave(" = -ENOBUFS");
43191 return -ENOBUFS;
43192 }
43193@@ -666,7 +666,7 @@ static void fscache_write_op(struct fsca
43194
43195 spin_lock(&cookie->stores_lock);
43196
43197- fscache_stat(&fscache_n_store_calls);
43198+ fscache_stat_unchecked(&fscache_n_store_calls);
43199
43200 /* find a page to store */
43201 page = NULL;
43202@@ -677,7 +677,7 @@ static void fscache_write_op(struct fsca
43203 page = results[0];
43204 _debug("gang %d [%lx]", n, page->index);
43205 if (page->index > op->store_limit) {
43206- fscache_stat(&fscache_n_store_pages_over_limit);
43207+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
43208 goto superseded;
43209 }
43210
43211@@ -689,7 +689,7 @@ static void fscache_write_op(struct fsca
43212 spin_unlock(&cookie->stores_lock);
43213 spin_unlock(&object->lock);
43214
43215- fscache_stat(&fscache_n_store_pages);
43216+ fscache_stat_unchecked(&fscache_n_store_pages);
43217 fscache_stat(&fscache_n_cop_write_page);
43218 ret = object->cache->ops->write_page(op, page);
43219 fscache_stat_d(&fscache_n_cop_write_page);
43220@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_
43221 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43222 ASSERT(PageFsCache(page));
43223
43224- fscache_stat(&fscache_n_stores);
43225+ fscache_stat_unchecked(&fscache_n_stores);
43226
43227 op = kzalloc(sizeof(*op), GFP_NOIO);
43228 if (!op)
43229@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_
43230 spin_unlock(&cookie->stores_lock);
43231 spin_unlock(&object->lock);
43232
43233- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
43234+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
43235 op->store_limit = object->store_limit;
43236
43237 if (fscache_submit_op(object, &op->op) < 0)
43238@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_
43239
43240 spin_unlock(&cookie->lock);
43241 radix_tree_preload_end();
43242- fscache_stat(&fscache_n_store_ops);
43243- fscache_stat(&fscache_n_stores_ok);
43244+ fscache_stat_unchecked(&fscache_n_store_ops);
43245+ fscache_stat_unchecked(&fscache_n_stores_ok);
43246
43247 /* the work queue now carries its own ref on the object */
43248 fscache_put_operation(&op->op);
43249@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_
43250 return 0;
43251
43252 already_queued:
43253- fscache_stat(&fscache_n_stores_again);
43254+ fscache_stat_unchecked(&fscache_n_stores_again);
43255 already_pending:
43256 spin_unlock(&cookie->stores_lock);
43257 spin_unlock(&object->lock);
43258 spin_unlock(&cookie->lock);
43259 radix_tree_preload_end();
43260 kfree(op);
43261- fscache_stat(&fscache_n_stores_ok);
43262+ fscache_stat_unchecked(&fscache_n_stores_ok);
43263 _leave(" = 0");
43264 return 0;
43265
43266@@ -851,14 +851,14 @@ nobufs:
43267 spin_unlock(&cookie->lock);
43268 radix_tree_preload_end();
43269 kfree(op);
43270- fscache_stat(&fscache_n_stores_nobufs);
43271+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
43272 _leave(" = -ENOBUFS");
43273 return -ENOBUFS;
43274
43275 nomem_free:
43276 kfree(op);
43277 nomem:
43278- fscache_stat(&fscache_n_stores_oom);
43279+ fscache_stat_unchecked(&fscache_n_stores_oom);
43280 _leave(" = -ENOMEM");
43281 return -ENOMEM;
43282 }
43283@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscac
43284 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43285 ASSERTCMP(page, !=, NULL);
43286
43287- fscache_stat(&fscache_n_uncaches);
43288+ fscache_stat_unchecked(&fscache_n_uncaches);
43289
43290 /* cache withdrawal may beat us to it */
43291 if (!PageFsCache(page))
43292@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fs
43293 unsigned long loop;
43294
43295 #ifdef CONFIG_FSCACHE_STATS
43296- atomic_add(pagevec->nr, &fscache_n_marks);
43297+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
43298 #endif
43299
43300 for (loop = 0; loop < pagevec->nr; loop++) {
43301diff -urNp linux-3.1.1/fs/fscache/stats.c linux-3.1.1/fs/fscache/stats.c
43302--- linux-3.1.1/fs/fscache/stats.c 2011-11-11 15:19:27.000000000 -0500
43303+++ linux-3.1.1/fs/fscache/stats.c 2011-11-16 18:39:08.000000000 -0500
43304@@ -18,95 +18,95 @@
43305 /*
43306 * operation counters
43307 */
43308-atomic_t fscache_n_op_pend;
43309-atomic_t fscache_n_op_run;
43310-atomic_t fscache_n_op_enqueue;
43311-atomic_t fscache_n_op_requeue;
43312-atomic_t fscache_n_op_deferred_release;
43313-atomic_t fscache_n_op_release;
43314-atomic_t fscache_n_op_gc;
43315-atomic_t fscache_n_op_cancelled;
43316-atomic_t fscache_n_op_rejected;
43317-
43318-atomic_t fscache_n_attr_changed;
43319-atomic_t fscache_n_attr_changed_ok;
43320-atomic_t fscache_n_attr_changed_nobufs;
43321-atomic_t fscache_n_attr_changed_nomem;
43322-atomic_t fscache_n_attr_changed_calls;
43323-
43324-atomic_t fscache_n_allocs;
43325-atomic_t fscache_n_allocs_ok;
43326-atomic_t fscache_n_allocs_wait;
43327-atomic_t fscache_n_allocs_nobufs;
43328-atomic_t fscache_n_allocs_intr;
43329-atomic_t fscache_n_allocs_object_dead;
43330-atomic_t fscache_n_alloc_ops;
43331-atomic_t fscache_n_alloc_op_waits;
43332-
43333-atomic_t fscache_n_retrievals;
43334-atomic_t fscache_n_retrievals_ok;
43335-atomic_t fscache_n_retrievals_wait;
43336-atomic_t fscache_n_retrievals_nodata;
43337-atomic_t fscache_n_retrievals_nobufs;
43338-atomic_t fscache_n_retrievals_intr;
43339-atomic_t fscache_n_retrievals_nomem;
43340-atomic_t fscache_n_retrievals_object_dead;
43341-atomic_t fscache_n_retrieval_ops;
43342-atomic_t fscache_n_retrieval_op_waits;
43343-
43344-atomic_t fscache_n_stores;
43345-atomic_t fscache_n_stores_ok;
43346-atomic_t fscache_n_stores_again;
43347-atomic_t fscache_n_stores_nobufs;
43348-atomic_t fscache_n_stores_oom;
43349-atomic_t fscache_n_store_ops;
43350-atomic_t fscache_n_store_calls;
43351-atomic_t fscache_n_store_pages;
43352-atomic_t fscache_n_store_radix_deletes;
43353-atomic_t fscache_n_store_pages_over_limit;
43354-
43355-atomic_t fscache_n_store_vmscan_not_storing;
43356-atomic_t fscache_n_store_vmscan_gone;
43357-atomic_t fscache_n_store_vmscan_busy;
43358-atomic_t fscache_n_store_vmscan_cancelled;
43359-
43360-atomic_t fscache_n_marks;
43361-atomic_t fscache_n_uncaches;
43362-
43363-atomic_t fscache_n_acquires;
43364-atomic_t fscache_n_acquires_null;
43365-atomic_t fscache_n_acquires_no_cache;
43366-atomic_t fscache_n_acquires_ok;
43367-atomic_t fscache_n_acquires_nobufs;
43368-atomic_t fscache_n_acquires_oom;
43369-
43370-atomic_t fscache_n_updates;
43371-atomic_t fscache_n_updates_null;
43372-atomic_t fscache_n_updates_run;
43373-
43374-atomic_t fscache_n_relinquishes;
43375-atomic_t fscache_n_relinquishes_null;
43376-atomic_t fscache_n_relinquishes_waitcrt;
43377-atomic_t fscache_n_relinquishes_retire;
43378-
43379-atomic_t fscache_n_cookie_index;
43380-atomic_t fscache_n_cookie_data;
43381-atomic_t fscache_n_cookie_special;
43382-
43383-atomic_t fscache_n_object_alloc;
43384-atomic_t fscache_n_object_no_alloc;
43385-atomic_t fscache_n_object_lookups;
43386-atomic_t fscache_n_object_lookups_negative;
43387-atomic_t fscache_n_object_lookups_positive;
43388-atomic_t fscache_n_object_lookups_timed_out;
43389-atomic_t fscache_n_object_created;
43390-atomic_t fscache_n_object_avail;
43391-atomic_t fscache_n_object_dead;
43392-
43393-atomic_t fscache_n_checkaux_none;
43394-atomic_t fscache_n_checkaux_okay;
43395-atomic_t fscache_n_checkaux_update;
43396-atomic_t fscache_n_checkaux_obsolete;
43397+atomic_unchecked_t fscache_n_op_pend;
43398+atomic_unchecked_t fscache_n_op_run;
43399+atomic_unchecked_t fscache_n_op_enqueue;
43400+atomic_unchecked_t fscache_n_op_requeue;
43401+atomic_unchecked_t fscache_n_op_deferred_release;
43402+atomic_unchecked_t fscache_n_op_release;
43403+atomic_unchecked_t fscache_n_op_gc;
43404+atomic_unchecked_t fscache_n_op_cancelled;
43405+atomic_unchecked_t fscache_n_op_rejected;
43406+
43407+atomic_unchecked_t fscache_n_attr_changed;
43408+atomic_unchecked_t fscache_n_attr_changed_ok;
43409+atomic_unchecked_t fscache_n_attr_changed_nobufs;
43410+atomic_unchecked_t fscache_n_attr_changed_nomem;
43411+atomic_unchecked_t fscache_n_attr_changed_calls;
43412+
43413+atomic_unchecked_t fscache_n_allocs;
43414+atomic_unchecked_t fscache_n_allocs_ok;
43415+atomic_unchecked_t fscache_n_allocs_wait;
43416+atomic_unchecked_t fscache_n_allocs_nobufs;
43417+atomic_unchecked_t fscache_n_allocs_intr;
43418+atomic_unchecked_t fscache_n_allocs_object_dead;
43419+atomic_unchecked_t fscache_n_alloc_ops;
43420+atomic_unchecked_t fscache_n_alloc_op_waits;
43421+
43422+atomic_unchecked_t fscache_n_retrievals;
43423+atomic_unchecked_t fscache_n_retrievals_ok;
43424+atomic_unchecked_t fscache_n_retrievals_wait;
43425+atomic_unchecked_t fscache_n_retrievals_nodata;
43426+atomic_unchecked_t fscache_n_retrievals_nobufs;
43427+atomic_unchecked_t fscache_n_retrievals_intr;
43428+atomic_unchecked_t fscache_n_retrievals_nomem;
43429+atomic_unchecked_t fscache_n_retrievals_object_dead;
43430+atomic_unchecked_t fscache_n_retrieval_ops;
43431+atomic_unchecked_t fscache_n_retrieval_op_waits;
43432+
43433+atomic_unchecked_t fscache_n_stores;
43434+atomic_unchecked_t fscache_n_stores_ok;
43435+atomic_unchecked_t fscache_n_stores_again;
43436+atomic_unchecked_t fscache_n_stores_nobufs;
43437+atomic_unchecked_t fscache_n_stores_oom;
43438+atomic_unchecked_t fscache_n_store_ops;
43439+atomic_unchecked_t fscache_n_store_calls;
43440+atomic_unchecked_t fscache_n_store_pages;
43441+atomic_unchecked_t fscache_n_store_radix_deletes;
43442+atomic_unchecked_t fscache_n_store_pages_over_limit;
43443+
43444+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43445+atomic_unchecked_t fscache_n_store_vmscan_gone;
43446+atomic_unchecked_t fscache_n_store_vmscan_busy;
43447+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43448+
43449+atomic_unchecked_t fscache_n_marks;
43450+atomic_unchecked_t fscache_n_uncaches;
43451+
43452+atomic_unchecked_t fscache_n_acquires;
43453+atomic_unchecked_t fscache_n_acquires_null;
43454+atomic_unchecked_t fscache_n_acquires_no_cache;
43455+atomic_unchecked_t fscache_n_acquires_ok;
43456+atomic_unchecked_t fscache_n_acquires_nobufs;
43457+atomic_unchecked_t fscache_n_acquires_oom;
43458+
43459+atomic_unchecked_t fscache_n_updates;
43460+atomic_unchecked_t fscache_n_updates_null;
43461+atomic_unchecked_t fscache_n_updates_run;
43462+
43463+atomic_unchecked_t fscache_n_relinquishes;
43464+atomic_unchecked_t fscache_n_relinquishes_null;
43465+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43466+atomic_unchecked_t fscache_n_relinquishes_retire;
43467+
43468+atomic_unchecked_t fscache_n_cookie_index;
43469+atomic_unchecked_t fscache_n_cookie_data;
43470+atomic_unchecked_t fscache_n_cookie_special;
43471+
43472+atomic_unchecked_t fscache_n_object_alloc;
43473+atomic_unchecked_t fscache_n_object_no_alloc;
43474+atomic_unchecked_t fscache_n_object_lookups;
43475+atomic_unchecked_t fscache_n_object_lookups_negative;
43476+atomic_unchecked_t fscache_n_object_lookups_positive;
43477+atomic_unchecked_t fscache_n_object_lookups_timed_out;
43478+atomic_unchecked_t fscache_n_object_created;
43479+atomic_unchecked_t fscache_n_object_avail;
43480+atomic_unchecked_t fscache_n_object_dead;
43481+
43482+atomic_unchecked_t fscache_n_checkaux_none;
43483+atomic_unchecked_t fscache_n_checkaux_okay;
43484+atomic_unchecked_t fscache_n_checkaux_update;
43485+atomic_unchecked_t fscache_n_checkaux_obsolete;
43486
43487 atomic_t fscache_n_cop_alloc_object;
43488 atomic_t fscache_n_cop_lookup_object;
43489@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
43490 seq_puts(m, "FS-Cache statistics\n");
43491
43492 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
43493- atomic_read(&fscache_n_cookie_index),
43494- atomic_read(&fscache_n_cookie_data),
43495- atomic_read(&fscache_n_cookie_special));
43496+ atomic_read_unchecked(&fscache_n_cookie_index),
43497+ atomic_read_unchecked(&fscache_n_cookie_data),
43498+ atomic_read_unchecked(&fscache_n_cookie_special));
43499
43500 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
43501- atomic_read(&fscache_n_object_alloc),
43502- atomic_read(&fscache_n_object_no_alloc),
43503- atomic_read(&fscache_n_object_avail),
43504- atomic_read(&fscache_n_object_dead));
43505+ atomic_read_unchecked(&fscache_n_object_alloc),
43506+ atomic_read_unchecked(&fscache_n_object_no_alloc),
43507+ atomic_read_unchecked(&fscache_n_object_avail),
43508+ atomic_read_unchecked(&fscache_n_object_dead));
43509 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
43510- atomic_read(&fscache_n_checkaux_none),
43511- atomic_read(&fscache_n_checkaux_okay),
43512- atomic_read(&fscache_n_checkaux_update),
43513- atomic_read(&fscache_n_checkaux_obsolete));
43514+ atomic_read_unchecked(&fscache_n_checkaux_none),
43515+ atomic_read_unchecked(&fscache_n_checkaux_okay),
43516+ atomic_read_unchecked(&fscache_n_checkaux_update),
43517+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
43518
43519 seq_printf(m, "Pages : mrk=%u unc=%u\n",
43520- atomic_read(&fscache_n_marks),
43521- atomic_read(&fscache_n_uncaches));
43522+ atomic_read_unchecked(&fscache_n_marks),
43523+ atomic_read_unchecked(&fscache_n_uncaches));
43524
43525 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
43526 " oom=%u\n",
43527- atomic_read(&fscache_n_acquires),
43528- atomic_read(&fscache_n_acquires_null),
43529- atomic_read(&fscache_n_acquires_no_cache),
43530- atomic_read(&fscache_n_acquires_ok),
43531- atomic_read(&fscache_n_acquires_nobufs),
43532- atomic_read(&fscache_n_acquires_oom));
43533+ atomic_read_unchecked(&fscache_n_acquires),
43534+ atomic_read_unchecked(&fscache_n_acquires_null),
43535+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
43536+ atomic_read_unchecked(&fscache_n_acquires_ok),
43537+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
43538+ atomic_read_unchecked(&fscache_n_acquires_oom));
43539
43540 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
43541- atomic_read(&fscache_n_object_lookups),
43542- atomic_read(&fscache_n_object_lookups_negative),
43543- atomic_read(&fscache_n_object_lookups_positive),
43544- atomic_read(&fscache_n_object_created),
43545- atomic_read(&fscache_n_object_lookups_timed_out));
43546+ atomic_read_unchecked(&fscache_n_object_lookups),
43547+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
43548+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
43549+ atomic_read_unchecked(&fscache_n_object_created),
43550+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
43551
43552 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
43553- atomic_read(&fscache_n_updates),
43554- atomic_read(&fscache_n_updates_null),
43555- atomic_read(&fscache_n_updates_run));
43556+ atomic_read_unchecked(&fscache_n_updates),
43557+ atomic_read_unchecked(&fscache_n_updates_null),
43558+ atomic_read_unchecked(&fscache_n_updates_run));
43559
43560 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
43561- atomic_read(&fscache_n_relinquishes),
43562- atomic_read(&fscache_n_relinquishes_null),
43563- atomic_read(&fscache_n_relinquishes_waitcrt),
43564- atomic_read(&fscache_n_relinquishes_retire));
43565+ atomic_read_unchecked(&fscache_n_relinquishes),
43566+ atomic_read_unchecked(&fscache_n_relinquishes_null),
43567+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
43568+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
43569
43570 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
43571- atomic_read(&fscache_n_attr_changed),
43572- atomic_read(&fscache_n_attr_changed_ok),
43573- atomic_read(&fscache_n_attr_changed_nobufs),
43574- atomic_read(&fscache_n_attr_changed_nomem),
43575- atomic_read(&fscache_n_attr_changed_calls));
43576+ atomic_read_unchecked(&fscache_n_attr_changed),
43577+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
43578+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
43579+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
43580+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
43581
43582 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
43583- atomic_read(&fscache_n_allocs),
43584- atomic_read(&fscache_n_allocs_ok),
43585- atomic_read(&fscache_n_allocs_wait),
43586- atomic_read(&fscache_n_allocs_nobufs),
43587- atomic_read(&fscache_n_allocs_intr));
43588+ atomic_read_unchecked(&fscache_n_allocs),
43589+ atomic_read_unchecked(&fscache_n_allocs_ok),
43590+ atomic_read_unchecked(&fscache_n_allocs_wait),
43591+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
43592+ atomic_read_unchecked(&fscache_n_allocs_intr));
43593 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
43594- atomic_read(&fscache_n_alloc_ops),
43595- atomic_read(&fscache_n_alloc_op_waits),
43596- atomic_read(&fscache_n_allocs_object_dead));
43597+ atomic_read_unchecked(&fscache_n_alloc_ops),
43598+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
43599+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
43600
43601 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
43602 " int=%u oom=%u\n",
43603- atomic_read(&fscache_n_retrievals),
43604- atomic_read(&fscache_n_retrievals_ok),
43605- atomic_read(&fscache_n_retrievals_wait),
43606- atomic_read(&fscache_n_retrievals_nodata),
43607- atomic_read(&fscache_n_retrievals_nobufs),
43608- atomic_read(&fscache_n_retrievals_intr),
43609- atomic_read(&fscache_n_retrievals_nomem));
43610+ atomic_read_unchecked(&fscache_n_retrievals),
43611+ atomic_read_unchecked(&fscache_n_retrievals_ok),
43612+ atomic_read_unchecked(&fscache_n_retrievals_wait),
43613+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
43614+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
43615+ atomic_read_unchecked(&fscache_n_retrievals_intr),
43616+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
43617 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
43618- atomic_read(&fscache_n_retrieval_ops),
43619- atomic_read(&fscache_n_retrieval_op_waits),
43620- atomic_read(&fscache_n_retrievals_object_dead));
43621+ atomic_read_unchecked(&fscache_n_retrieval_ops),
43622+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
43623+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
43624
43625 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
43626- atomic_read(&fscache_n_stores),
43627- atomic_read(&fscache_n_stores_ok),
43628- atomic_read(&fscache_n_stores_again),
43629- atomic_read(&fscache_n_stores_nobufs),
43630- atomic_read(&fscache_n_stores_oom));
43631+ atomic_read_unchecked(&fscache_n_stores),
43632+ atomic_read_unchecked(&fscache_n_stores_ok),
43633+ atomic_read_unchecked(&fscache_n_stores_again),
43634+ atomic_read_unchecked(&fscache_n_stores_nobufs),
43635+ atomic_read_unchecked(&fscache_n_stores_oom));
43636 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
43637- atomic_read(&fscache_n_store_ops),
43638- atomic_read(&fscache_n_store_calls),
43639- atomic_read(&fscache_n_store_pages),
43640- atomic_read(&fscache_n_store_radix_deletes),
43641- atomic_read(&fscache_n_store_pages_over_limit));
43642+ atomic_read_unchecked(&fscache_n_store_ops),
43643+ atomic_read_unchecked(&fscache_n_store_calls),
43644+ atomic_read_unchecked(&fscache_n_store_pages),
43645+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
43646+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
43647
43648 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
43649- atomic_read(&fscache_n_store_vmscan_not_storing),
43650- atomic_read(&fscache_n_store_vmscan_gone),
43651- atomic_read(&fscache_n_store_vmscan_busy),
43652- atomic_read(&fscache_n_store_vmscan_cancelled));
43653+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
43654+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
43655+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
43656+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
43657
43658 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
43659- atomic_read(&fscache_n_op_pend),
43660- atomic_read(&fscache_n_op_run),
43661- atomic_read(&fscache_n_op_enqueue),
43662- atomic_read(&fscache_n_op_cancelled),
43663- atomic_read(&fscache_n_op_rejected));
43664+ atomic_read_unchecked(&fscache_n_op_pend),
43665+ atomic_read_unchecked(&fscache_n_op_run),
43666+ atomic_read_unchecked(&fscache_n_op_enqueue),
43667+ atomic_read_unchecked(&fscache_n_op_cancelled),
43668+ atomic_read_unchecked(&fscache_n_op_rejected));
43669 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
43670- atomic_read(&fscache_n_op_deferred_release),
43671- atomic_read(&fscache_n_op_release),
43672- atomic_read(&fscache_n_op_gc));
43673+ atomic_read_unchecked(&fscache_n_op_deferred_release),
43674+ atomic_read_unchecked(&fscache_n_op_release),
43675+ atomic_read_unchecked(&fscache_n_op_gc));
43676
43677 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
43678 atomic_read(&fscache_n_cop_alloc_object),
43679diff -urNp linux-3.1.1/fs/fs_struct.c linux-3.1.1/fs/fs_struct.c
43680--- linux-3.1.1/fs/fs_struct.c 2011-11-11 15:19:27.000000000 -0500
43681+++ linux-3.1.1/fs/fs_struct.c 2011-11-16 18:40:29.000000000 -0500
43682@@ -4,6 +4,7 @@
43683 #include <linux/path.h>
43684 #include <linux/slab.h>
43685 #include <linux/fs_struct.h>
43686+#include <linux/grsecurity.h>
43687 #include "internal.h"
43688
43689 static inline void path_get_longterm(struct path *path)
43690@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
43691 old_root = fs->root;
43692 fs->root = *path;
43693 path_get_longterm(path);
43694+ gr_set_chroot_entries(current, path);
43695 write_seqcount_end(&fs->seq);
43696 spin_unlock(&fs->lock);
43697 if (old_root.dentry)
43698@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
43699 && fs->root.mnt == old_root->mnt) {
43700 path_get_longterm(new_root);
43701 fs->root = *new_root;
43702+ gr_set_chroot_entries(p, new_root);
43703 count++;
43704 }
43705 if (fs->pwd.dentry == old_root->dentry
43706@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
43707 spin_lock(&fs->lock);
43708 write_seqcount_begin(&fs->seq);
43709 tsk->fs = NULL;
43710- kill = !--fs->users;
43711+ gr_clear_chroot_entries(tsk);
43712+ kill = !atomic_dec_return(&fs->users);
43713 write_seqcount_end(&fs->seq);
43714 spin_unlock(&fs->lock);
43715 task_unlock(tsk);
43716@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
43717 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
43718 /* We don't need to lock fs - think why ;-) */
43719 if (fs) {
43720- fs->users = 1;
43721+ atomic_set(&fs->users, 1);
43722 fs->in_exec = 0;
43723 spin_lock_init(&fs->lock);
43724 seqcount_init(&fs->seq);
43725@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
43726 spin_lock(&old->lock);
43727 fs->root = old->root;
43728 path_get_longterm(&fs->root);
43729+ /* instead of calling gr_set_chroot_entries here,
43730+ we call it from every caller of this function
43731+ */
43732 fs->pwd = old->pwd;
43733 path_get_longterm(&fs->pwd);
43734 spin_unlock(&old->lock);
43735@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
43736
43737 task_lock(current);
43738 spin_lock(&fs->lock);
43739- kill = !--fs->users;
43740+ kill = !atomic_dec_return(&fs->users);
43741 current->fs = new_fs;
43742+ gr_set_chroot_entries(current, &new_fs->root);
43743 spin_unlock(&fs->lock);
43744 task_unlock(current);
43745
43746@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
43747
43748 /* to be mentioned only in INIT_TASK */
43749 struct fs_struct init_fs = {
43750- .users = 1,
43751+ .users = ATOMIC_INIT(1),
43752 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
43753 .seq = SEQCNT_ZERO,
43754 .umask = 0022,
43755@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
43756 task_lock(current);
43757
43758 spin_lock(&init_fs.lock);
43759- init_fs.users++;
43760+ atomic_inc(&init_fs.users);
43761 spin_unlock(&init_fs.lock);
43762
43763 spin_lock(&fs->lock);
43764 current->fs = &init_fs;
43765- kill = !--fs->users;
43766+ gr_set_chroot_entries(current, &current->fs->root);
43767+ kill = !atomic_dec_return(&fs->users);
43768 spin_unlock(&fs->lock);
43769
43770 task_unlock(current);
43771diff -urNp linux-3.1.1/fs/fuse/cuse.c linux-3.1.1/fs/fuse/cuse.c
43772--- linux-3.1.1/fs/fuse/cuse.c 2011-11-11 15:19:27.000000000 -0500
43773+++ linux-3.1.1/fs/fuse/cuse.c 2011-11-16 18:39:08.000000000 -0500
43774@@ -586,10 +586,12 @@ static int __init cuse_init(void)
43775 INIT_LIST_HEAD(&cuse_conntbl[i]);
43776
43777 /* inherit and extend fuse_dev_operations */
43778- cuse_channel_fops = fuse_dev_operations;
43779- cuse_channel_fops.owner = THIS_MODULE;
43780- cuse_channel_fops.open = cuse_channel_open;
43781- cuse_channel_fops.release = cuse_channel_release;
43782+ pax_open_kernel();
43783+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
43784+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
43785+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
43786+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
43787+ pax_close_kernel();
43788
43789 cuse_class = class_create(THIS_MODULE, "cuse");
43790 if (IS_ERR(cuse_class))
43791diff -urNp linux-3.1.1/fs/fuse/dev.c linux-3.1.1/fs/fuse/dev.c
43792--- linux-3.1.1/fs/fuse/dev.c 2011-11-11 15:19:27.000000000 -0500
43793+++ linux-3.1.1/fs/fuse/dev.c 2011-11-16 18:39:08.000000000 -0500
43794@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(stru
43795 ret = 0;
43796 pipe_lock(pipe);
43797
43798- if (!pipe->readers) {
43799+ if (!atomic_read(&pipe->readers)) {
43800 send_sig(SIGPIPE, current, 0);
43801 if (!ret)
43802 ret = -EPIPE;
43803diff -urNp linux-3.1.1/fs/fuse/dir.c linux-3.1.1/fs/fuse/dir.c
43804--- linux-3.1.1/fs/fuse/dir.c 2011-11-11 15:19:27.000000000 -0500
43805+++ linux-3.1.1/fs/fuse/dir.c 2011-11-16 18:39:08.000000000 -0500
43806@@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *de
43807 return link;
43808 }
43809
43810-static void free_link(char *link)
43811+static void free_link(const char *link)
43812 {
43813 if (!IS_ERR(link))
43814 free_page((unsigned long) link);
43815diff -urNp linux-3.1.1/fs/gfs2/inode.c linux-3.1.1/fs/gfs2/inode.c
43816--- linux-3.1.1/fs/gfs2/inode.c 2011-11-11 15:19:27.000000000 -0500
43817+++ linux-3.1.1/fs/gfs2/inode.c 2011-11-16 18:39:08.000000000 -0500
43818@@ -1517,7 +1517,7 @@ out:
43819
43820 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
43821 {
43822- char *s = nd_get_link(nd);
43823+ const char *s = nd_get_link(nd);
43824 if (!IS_ERR(s))
43825 kfree(s);
43826 }
43827diff -urNp linux-3.1.1/fs/hfsplus/catalog.c linux-3.1.1/fs/hfsplus/catalog.c
43828--- linux-3.1.1/fs/hfsplus/catalog.c 2011-11-11 15:19:27.000000000 -0500
43829+++ linux-3.1.1/fs/hfsplus/catalog.c 2011-11-16 19:23:09.000000000 -0500
43830@@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
43831 int err;
43832 u16 type;
43833
43834+ pax_track_stack();
43835+
43836 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
43837 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
43838 if (err)
43839@@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
43840 int entry_size;
43841 int err;
43842
43843+ pax_track_stack();
43844+
43845 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
43846 str->name, cnid, inode->i_nlink);
43847 err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
43848@@ -353,6 +357,8 @@ int hfsplus_rename_cat(u32 cnid,
43849 int entry_size, type;
43850 int err;
43851
43852+ pax_track_stack();
43853+
43854 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
43855 cnid, src_dir->i_ino, src_name->name,
43856 dst_dir->i_ino, dst_name->name);
43857diff -urNp linux-3.1.1/fs/hfsplus/dir.c linux-3.1.1/fs/hfsplus/dir.c
43858--- linux-3.1.1/fs/hfsplus/dir.c 2011-11-11 15:19:27.000000000 -0500
43859+++ linux-3.1.1/fs/hfsplus/dir.c 2011-11-16 18:40:29.000000000 -0500
43860@@ -131,6 +131,8 @@ static int hfsplus_readdir(struct file *
43861 struct hfsplus_readdir_data *rd;
43862 u16 type;
43863
43864+ pax_track_stack();
43865+
43866 if (filp->f_pos >= inode->i_size)
43867 return 0;
43868
43869diff -urNp linux-3.1.1/fs/hfsplus/inode.c linux-3.1.1/fs/hfsplus/inode.c
43870--- linux-3.1.1/fs/hfsplus/inode.c 2011-11-11 15:19:27.000000000 -0500
43871+++ linux-3.1.1/fs/hfsplus/inode.c 2011-11-16 18:40:29.000000000 -0500
43872@@ -501,6 +501,8 @@ int hfsplus_cat_read_inode(struct inode
43873 int res = 0;
43874 u16 type;
43875
43876+ pax_track_stack();
43877+
43878 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
43879
43880 HFSPLUS_I(inode)->linkid = 0;
43881@@ -564,6 +566,8 @@ int hfsplus_cat_write_inode(struct inode
43882 struct hfs_find_data fd;
43883 hfsplus_cat_entry entry;
43884
43885+ pax_track_stack();
43886+
43887 if (HFSPLUS_IS_RSRC(inode))
43888 main_inode = HFSPLUS_I(inode)->rsrc_inode;
43889
43890diff -urNp linux-3.1.1/fs/hfsplus/ioctl.c linux-3.1.1/fs/hfsplus/ioctl.c
43891--- linux-3.1.1/fs/hfsplus/ioctl.c 2011-11-11 15:19:27.000000000 -0500
43892+++ linux-3.1.1/fs/hfsplus/ioctl.c 2011-11-16 18:40:29.000000000 -0500
43893@@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
43894 struct hfsplus_cat_file *file;
43895 int res;
43896
43897+ pax_track_stack();
43898+
43899 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43900 return -EOPNOTSUPP;
43901
43902@@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
43903 struct hfsplus_cat_file *file;
43904 ssize_t res = 0;
43905
43906+ pax_track_stack();
43907+
43908 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43909 return -EOPNOTSUPP;
43910
43911diff -urNp linux-3.1.1/fs/hfsplus/super.c linux-3.1.1/fs/hfsplus/super.c
43912--- linux-3.1.1/fs/hfsplus/super.c 2011-11-11 15:19:27.000000000 -0500
43913+++ linux-3.1.1/fs/hfsplus/super.c 2011-11-16 19:23:30.000000000 -0500
43914@@ -347,6 +347,8 @@ static int hfsplus_fill_super(struct sup
43915 u64 last_fs_block, last_fs_page;
43916 int err;
43917
43918+ pax_track_stack();
43919+
43920 err = -EINVAL;
43921 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
43922 if (!sbi)
43923diff -urNp linux-3.1.1/fs/hugetlbfs/inode.c linux-3.1.1/fs/hugetlbfs/inode.c
43924--- linux-3.1.1/fs/hugetlbfs/inode.c 2011-11-11 15:19:27.000000000 -0500
43925+++ linux-3.1.1/fs/hugetlbfs/inode.c 2011-11-16 18:40:29.000000000 -0500
43926@@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs
43927 .kill_sb = kill_litter_super,
43928 };
43929
43930-static struct vfsmount *hugetlbfs_vfsmount;
43931+struct vfsmount *hugetlbfs_vfsmount;
43932
43933 static int can_do_hugetlb_shm(void)
43934 {
43935diff -urNp linux-3.1.1/fs/inode.c linux-3.1.1/fs/inode.c
43936--- linux-3.1.1/fs/inode.c 2011-11-11 15:19:27.000000000 -0500
43937+++ linux-3.1.1/fs/inode.c 2011-11-16 18:39:08.000000000 -0500
43938@@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
43939
43940 #ifdef CONFIG_SMP
43941 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
43942- static atomic_t shared_last_ino;
43943- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
43944+ static atomic_unchecked_t shared_last_ino;
43945+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
43946
43947 res = next - LAST_INO_BATCH;
43948 }
43949diff -urNp linux-3.1.1/fs/jbd/checkpoint.c linux-3.1.1/fs/jbd/checkpoint.c
43950--- linux-3.1.1/fs/jbd/checkpoint.c 2011-11-11 15:19:27.000000000 -0500
43951+++ linux-3.1.1/fs/jbd/checkpoint.c 2011-11-16 18:40:29.000000000 -0500
43952@@ -358,6 +358,8 @@ int log_do_checkpoint(journal_t *journal
43953 tid_t this_tid;
43954 int result;
43955
43956+ pax_track_stack();
43957+
43958 jbd_debug(1, "Start checkpoint\n");
43959
43960 /*
43961diff -urNp linux-3.1.1/fs/jffs2/compr_rtime.c linux-3.1.1/fs/jffs2/compr_rtime.c
43962--- linux-3.1.1/fs/jffs2/compr_rtime.c 2011-11-11 15:19:27.000000000 -0500
43963+++ linux-3.1.1/fs/jffs2/compr_rtime.c 2011-11-16 18:40:29.000000000 -0500
43964@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
43965 int outpos = 0;
43966 int pos=0;
43967
43968+ pax_track_stack();
43969+
43970 memset(positions,0,sizeof(positions));
43971
43972 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
43973@@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
43974 int outpos = 0;
43975 int pos=0;
43976
43977+ pax_track_stack();
43978+
43979 memset(positions,0,sizeof(positions));
43980
43981 while (outpos<destlen) {
43982diff -urNp linux-3.1.1/fs/jffs2/compr_rubin.c linux-3.1.1/fs/jffs2/compr_rubin.c
43983--- linux-3.1.1/fs/jffs2/compr_rubin.c 2011-11-11 15:19:27.000000000 -0500
43984+++ linux-3.1.1/fs/jffs2/compr_rubin.c 2011-11-16 18:40:29.000000000 -0500
43985@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
43986 int ret;
43987 uint32_t mysrclen, mydstlen;
43988
43989+ pax_track_stack();
43990+
43991 mysrclen = *sourcelen;
43992 mydstlen = *dstlen - 8;
43993
43994diff -urNp linux-3.1.1/fs/jffs2/erase.c linux-3.1.1/fs/jffs2/erase.c
43995--- linux-3.1.1/fs/jffs2/erase.c 2011-11-11 15:19:27.000000000 -0500
43996+++ linux-3.1.1/fs/jffs2/erase.c 2011-11-16 18:39:08.000000000 -0500
43997@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
43998 struct jffs2_unknown_node marker = {
43999 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
44000 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44001- .totlen = cpu_to_je32(c->cleanmarker_size)
44002+ .totlen = cpu_to_je32(c->cleanmarker_size),
44003+ .hdr_crc = cpu_to_je32(0)
44004 };
44005
44006 jffs2_prealloc_raw_node_refs(c, jeb, 1);
44007diff -urNp linux-3.1.1/fs/jffs2/wbuf.c linux-3.1.1/fs/jffs2/wbuf.c
44008--- linux-3.1.1/fs/jffs2/wbuf.c 2011-11-11 15:19:27.000000000 -0500
44009+++ linux-3.1.1/fs/jffs2/wbuf.c 2011-11-16 18:39:08.000000000 -0500
44010@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
44011 {
44012 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
44013 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44014- .totlen = constant_cpu_to_je32(8)
44015+ .totlen = constant_cpu_to_je32(8),
44016+ .hdr_crc = constant_cpu_to_je32(0)
44017 };
44018
44019 /*
44020diff -urNp linux-3.1.1/fs/jffs2/xattr.c linux-3.1.1/fs/jffs2/xattr.c
44021--- linux-3.1.1/fs/jffs2/xattr.c 2011-11-11 15:19:27.000000000 -0500
44022+++ linux-3.1.1/fs/jffs2/xattr.c 2011-11-16 18:40:29.000000000 -0500
44023@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
44024
44025 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
44026
44027+ pax_track_stack();
44028+
44029 /* Phase.1 : Merge same xref */
44030 for (i=0; i < XREF_TMPHASH_SIZE; i++)
44031 xref_tmphash[i] = NULL;
44032diff -urNp linux-3.1.1/fs/jfs/super.c linux-3.1.1/fs/jfs/super.c
44033--- linux-3.1.1/fs/jfs/super.c 2011-11-11 15:19:27.000000000 -0500
44034+++ linux-3.1.1/fs/jfs/super.c 2011-11-16 18:39:08.000000000 -0500
44035@@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
44036
44037 jfs_inode_cachep =
44038 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
44039- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
44040+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
44041 init_once);
44042 if (jfs_inode_cachep == NULL)
44043 return -ENOMEM;
44044diff -urNp linux-3.1.1/fs/Kconfig.binfmt linux-3.1.1/fs/Kconfig.binfmt
44045--- linux-3.1.1/fs/Kconfig.binfmt 2011-11-11 15:19:27.000000000 -0500
44046+++ linux-3.1.1/fs/Kconfig.binfmt 2011-11-16 18:39:08.000000000 -0500
44047@@ -86,7 +86,7 @@ config HAVE_AOUT
44048
44049 config BINFMT_AOUT
44050 tristate "Kernel support for a.out and ECOFF binaries"
44051- depends on HAVE_AOUT
44052+ depends on HAVE_AOUT && BROKEN
44053 ---help---
44054 A.out (Assembler.OUTput) is a set of formats for libraries and
44055 executables used in the earliest versions of UNIX. Linux used
44056diff -urNp linux-3.1.1/fs/libfs.c linux-3.1.1/fs/libfs.c
44057--- linux-3.1.1/fs/libfs.c 2011-11-11 15:19:27.000000000 -0500
44058+++ linux-3.1.1/fs/libfs.c 2011-11-16 18:39:08.000000000 -0500
44059@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, v
44060
44061 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
44062 struct dentry *next;
44063+ char d_name[sizeof(next->d_iname)];
44064+ const unsigned char *name;
44065+
44066 next = list_entry(p, struct dentry, d_u.d_child);
44067 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
44068 if (!simple_positive(next)) {
44069@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, v
44070
44071 spin_unlock(&next->d_lock);
44072 spin_unlock(&dentry->d_lock);
44073- if (filldir(dirent, next->d_name.name,
44074+ name = next->d_name.name;
44075+ if (name == next->d_iname) {
44076+ memcpy(d_name, name, next->d_name.len);
44077+ name = d_name;
44078+ }
44079+ if (filldir(dirent, name,
44080 next->d_name.len, filp->f_pos,
44081 next->d_inode->i_ino,
44082 dt_type(next->d_inode)) < 0)
44083diff -urNp linux-3.1.1/fs/lockd/clntproc.c linux-3.1.1/fs/lockd/clntproc.c
44084--- linux-3.1.1/fs/lockd/clntproc.c 2011-11-11 15:19:27.000000000 -0500
44085+++ linux-3.1.1/fs/lockd/clntproc.c 2011-11-16 18:40:29.000000000 -0500
44086@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
44087 /*
44088 * Cookie counter for NLM requests
44089 */
44090-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
44091+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
44092
44093 void nlmclnt_next_cookie(struct nlm_cookie *c)
44094 {
44095- u32 cookie = atomic_inc_return(&nlm_cookie);
44096+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
44097
44098 memcpy(c->data, &cookie, 4);
44099 c->len=4;
44100@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
44101 struct nlm_rqst reqst, *req;
44102 int status;
44103
44104+ pax_track_stack();
44105+
44106 req = &reqst;
44107 memset(req, 0, sizeof(*req));
44108 locks_init_lock(&req->a_args.lock.fl);
44109diff -urNp linux-3.1.1/fs/locks.c linux-3.1.1/fs/locks.c
44110--- linux-3.1.1/fs/locks.c 2011-11-11 15:19:27.000000000 -0500
44111+++ linux-3.1.1/fs/locks.c 2011-11-16 18:39:08.000000000 -0500
44112@@ -2022,16 +2022,16 @@ void locks_remove_flock(struct file *fil
44113 return;
44114
44115 if (filp->f_op && filp->f_op->flock) {
44116- struct file_lock fl = {
44117+ struct file_lock flock = {
44118 .fl_pid = current->tgid,
44119 .fl_file = filp,
44120 .fl_flags = FL_FLOCK,
44121 .fl_type = F_UNLCK,
44122 .fl_end = OFFSET_MAX,
44123 };
44124- filp->f_op->flock(filp, F_SETLKW, &fl);
44125- if (fl.fl_ops && fl.fl_ops->fl_release_private)
44126- fl.fl_ops->fl_release_private(&fl);
44127+ filp->f_op->flock(filp, F_SETLKW, &flock);
44128+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
44129+ flock.fl_ops->fl_release_private(&flock);
44130 }
44131
44132 lock_flocks();
44133diff -urNp linux-3.1.1/fs/logfs/super.c linux-3.1.1/fs/logfs/super.c
44134--- linux-3.1.1/fs/logfs/super.c 2011-11-11 15:19:27.000000000 -0500
44135+++ linux-3.1.1/fs/logfs/super.c 2011-11-16 18:40:29.000000000 -0500
44136@@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
44137 struct logfs_disk_super _ds1, *ds1 = &_ds1;
44138 int err, valid0, valid1;
44139
44140+ pax_track_stack();
44141+
44142 /* read first superblock */
44143 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
44144 if (err)
44145diff -urNp linux-3.1.1/fs/namei.c linux-3.1.1/fs/namei.c
44146--- linux-3.1.1/fs/namei.c 2011-11-11 15:19:27.000000000 -0500
44147+++ linux-3.1.1/fs/namei.c 2011-11-17 00:36:54.000000000 -0500
44148@@ -283,14 +283,22 @@ int generic_permission(struct inode *ino
44149
44150 if (S_ISDIR(inode->i_mode)) {
44151 /* DACs are overridable for directories */
44152- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44153- return 0;
44154 if (!(mask & MAY_WRITE))
44155 if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44156 return 0;
44157+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44158+ return 0;
44159 return -EACCES;
44160 }
44161 /*
44162+ * Searching includes executable on directories, else just read.
44163+ */
44164+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44165+ if (mask == MAY_READ)
44166+ if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44167+ return 0;
44168+
44169+ /*
44170 * Read/write DACs are always overridable.
44171 * Executable DACs are overridable when there is
44172 * at least one exec bit set.
44173@@ -299,14 +307,6 @@ int generic_permission(struct inode *ino
44174 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44175 return 0;
44176
44177- /*
44178- * Searching includes executable on directories, else just read.
44179- */
44180- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44181- if (mask == MAY_READ)
44182- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44183- return 0;
44184-
44185 return -EACCES;
44186 }
44187
44188@@ -653,11 +653,19 @@ follow_link(struct path *link, struct na
44189 return error;
44190 }
44191
44192+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
44193+ dentry->d_inode, dentry, nd->path.mnt)) {
44194+ error = -EACCES;
44195+ *p = ERR_PTR(error); /* no ->put_link(), please */
44196+ path_put(&nd->path);
44197+ return error;
44198+ }
44199+
44200 nd->last_type = LAST_BIND;
44201 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
44202 error = PTR_ERR(*p);
44203 if (!IS_ERR(*p)) {
44204- char *s = nd_get_link(nd);
44205+ const char *s = nd_get_link(nd);
44206 error = 0;
44207 if (s)
44208 error = __vfs_follow_link(nd, s);
44209@@ -1622,6 +1630,12 @@ static int path_lookupat(int dfd, const
44210 if (!err)
44211 err = complete_walk(nd);
44212
44213+ if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44214+ if (!err)
44215+ path_put(&nd->path);
44216+ err = -ENOENT;
44217+ }
44218+
44219 if (!err && nd->flags & LOOKUP_DIRECTORY) {
44220 if (!nd->inode->i_op->lookup) {
44221 path_put(&nd->path);
44222@@ -1649,6 +1663,9 @@ static int do_path_lookup(int dfd, const
44223 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
44224
44225 if (likely(!retval)) {
44226+ if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
44227+ return -ENOENT;
44228+
44229 if (unlikely(!audit_dummy_context())) {
44230 if (nd->path.dentry && nd->inode)
44231 audit_inode(name, nd->path.dentry);
44232@@ -2049,7 +2066,27 @@ static int may_open(struct path *path, i
44233 /*
44234 * Ensure there are no outstanding leases on the file.
44235 */
44236- return break_lease(inode, flag);
44237+ error = break_lease(inode, flag);
44238+
44239+ if (error)
44240+ return error;
44241+
44242+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
44243+ error = -EPERM;
44244+ goto exit;
44245+ }
44246+
44247+ if (gr_handle_rawio(inode)) {
44248+ error = -EPERM;
44249+ goto exit;
44250+ }
44251+
44252+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
44253+ error = -EACCES;
44254+ goto exit;
44255+ }
44256+exit:
44257+ return error;
44258 }
44259
44260 static int handle_truncate(struct file *filp)
44261@@ -2110,6 +2147,10 @@ static struct file *do_last(struct namei
44262 error = complete_walk(nd);
44263 if (error)
44264 return ERR_PTR(error);
44265+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44266+ error = -ENOENT;
44267+ goto exit;
44268+ }
44269 audit_inode(pathname, nd->path.dentry);
44270 if (open_flag & O_CREAT) {
44271 error = -EISDIR;
44272@@ -2120,6 +2161,10 @@ static struct file *do_last(struct namei
44273 error = complete_walk(nd);
44274 if (error)
44275 return ERR_PTR(error);
44276+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
44277+ error = -ENOENT;
44278+ goto exit;
44279+ }
44280 audit_inode(pathname, dir);
44281 goto ok;
44282 }
44283@@ -2142,6 +2187,11 @@ static struct file *do_last(struct namei
44284 if (error)
44285 return ERR_PTR(-ECHILD);
44286
44287+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44288+ error = -ENOENT;
44289+ goto exit;
44290+ }
44291+
44292 error = -ENOTDIR;
44293 if (nd->flags & LOOKUP_DIRECTORY) {
44294 if (!nd->inode->i_op->lookup)
44295@@ -2181,6 +2231,12 @@ static struct file *do_last(struct namei
44296 /* Negative dentry, just create the file */
44297 if (!dentry->d_inode) {
44298 int mode = op->mode;
44299+
44300+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
44301+ error = -EACCES;
44302+ goto exit_mutex_unlock;
44303+ }
44304+
44305 if (!IS_POSIXACL(dir->d_inode))
44306 mode &= ~current_umask();
44307 /*
44308@@ -2204,6 +2260,8 @@ static struct file *do_last(struct namei
44309 error = vfs_create(dir->d_inode, dentry, mode, nd);
44310 if (error)
44311 goto exit_mutex_unlock;
44312+ else
44313+ gr_handle_create(path->dentry, path->mnt);
44314 mutex_unlock(&dir->d_inode->i_mutex);
44315 dput(nd->path.dentry);
44316 nd->path.dentry = dentry;
44317@@ -2213,6 +2271,19 @@ static struct file *do_last(struct namei
44318 /*
44319 * It already exists.
44320 */
44321+
44322+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
44323+ error = -ENOENT;
44324+ goto exit_mutex_unlock;
44325+ }
44326+
44327+ /* only check if O_CREAT is specified, all other checks need to go
44328+ into may_open */
44329+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
44330+ error = -EACCES;
44331+ goto exit_mutex_unlock;
44332+ }
44333+
44334 mutex_unlock(&dir->d_inode->i_mutex);
44335 audit_inode(pathname, path->dentry);
44336
44337@@ -2425,6 +2496,11 @@ struct dentry *kern_path_create(int dfd,
44338 *path = nd.path;
44339 return dentry;
44340 eexist:
44341+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
44342+ dput(dentry);
44343+ dentry = ERR_PTR(-ENOENT);
44344+ goto fail;
44345+ }
44346 dput(dentry);
44347 dentry = ERR_PTR(-EEXIST);
44348 fail:
44349@@ -2447,6 +2523,20 @@ struct dentry *user_path_create(int dfd,
44350 }
44351 EXPORT_SYMBOL(user_path_create);
44352
44353+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
44354+{
44355+ char *tmp = getname(pathname);
44356+ struct dentry *res;
44357+ if (IS_ERR(tmp))
44358+ return ERR_CAST(tmp);
44359+ res = kern_path_create(dfd, tmp, path, is_dir);
44360+ if (IS_ERR(res))
44361+ putname(tmp);
44362+ else
44363+ *to = tmp;
44364+ return res;
44365+}
44366+
44367 int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
44368 {
44369 int error = may_create(dir, dentry);
44370@@ -2514,6 +2604,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
44371 error = mnt_want_write(path.mnt);
44372 if (error)
44373 goto out_dput;
44374+
44375+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
44376+ error = -EPERM;
44377+ goto out_drop_write;
44378+ }
44379+
44380+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
44381+ error = -EACCES;
44382+ goto out_drop_write;
44383+ }
44384+
44385 error = security_path_mknod(&path, dentry, mode, dev);
44386 if (error)
44387 goto out_drop_write;
44388@@ -2531,6 +2632,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
44389 }
44390 out_drop_write:
44391 mnt_drop_write(path.mnt);
44392+
44393+ if (!error)
44394+ gr_handle_create(dentry, path.mnt);
44395 out_dput:
44396 dput(dentry);
44397 mutex_unlock(&path.dentry->d_inode->i_mutex);
44398@@ -2580,12 +2684,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
44399 error = mnt_want_write(path.mnt);
44400 if (error)
44401 goto out_dput;
44402+
44403+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
44404+ error = -EACCES;
44405+ goto out_drop_write;
44406+ }
44407+
44408 error = security_path_mkdir(&path, dentry, mode);
44409 if (error)
44410 goto out_drop_write;
44411 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
44412 out_drop_write:
44413 mnt_drop_write(path.mnt);
44414+
44415+ if (!error)
44416+ gr_handle_create(dentry, path.mnt);
44417 out_dput:
44418 dput(dentry);
44419 mutex_unlock(&path.dentry->d_inode->i_mutex);
44420@@ -2665,6 +2778,8 @@ static long do_rmdir(int dfd, const char
44421 char * name;
44422 struct dentry *dentry;
44423 struct nameidata nd;
44424+ ino_t saved_ino = 0;
44425+ dev_t saved_dev = 0;
44426
44427 error = user_path_parent(dfd, pathname, &nd, &name);
44428 if (error)
44429@@ -2693,6 +2808,15 @@ static long do_rmdir(int dfd, const char
44430 error = -ENOENT;
44431 goto exit3;
44432 }
44433+
44434+ saved_ino = dentry->d_inode->i_ino;
44435+ saved_dev = gr_get_dev_from_dentry(dentry);
44436+
44437+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
44438+ error = -EACCES;
44439+ goto exit3;
44440+ }
44441+
44442 error = mnt_want_write(nd.path.mnt);
44443 if (error)
44444 goto exit3;
44445@@ -2700,6 +2824,8 @@ static long do_rmdir(int dfd, const char
44446 if (error)
44447 goto exit4;
44448 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
44449+ if (!error && (saved_dev || saved_ino))
44450+ gr_handle_delete(saved_ino, saved_dev);
44451 exit4:
44452 mnt_drop_write(nd.path.mnt);
44453 exit3:
44454@@ -2762,6 +2888,8 @@ static long do_unlinkat(int dfd, const c
44455 struct dentry *dentry;
44456 struct nameidata nd;
44457 struct inode *inode = NULL;
44458+ ino_t saved_ino = 0;
44459+ dev_t saved_dev = 0;
44460
44461 error = user_path_parent(dfd, pathname, &nd, &name);
44462 if (error)
44463@@ -2784,6 +2912,16 @@ static long do_unlinkat(int dfd, const c
44464 if (!inode)
44465 goto slashes;
44466 ihold(inode);
44467+
44468+ if (inode->i_nlink <= 1) {
44469+ saved_ino = inode->i_ino;
44470+ saved_dev = gr_get_dev_from_dentry(dentry);
44471+ }
44472+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44473+ error = -EACCES;
44474+ goto exit2;
44475+ }
44476+
44477 error = mnt_want_write(nd.path.mnt);
44478 if (error)
44479 goto exit2;
44480@@ -2791,6 +2929,8 @@ static long do_unlinkat(int dfd, const c
44481 if (error)
44482 goto exit3;
44483 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44484+ if (!error && (saved_ino || saved_dev))
44485+ gr_handle_delete(saved_ino, saved_dev);
44486 exit3:
44487 mnt_drop_write(nd.path.mnt);
44488 exit2:
44489@@ -2866,10 +3006,18 @@ SYSCALL_DEFINE3(symlinkat, const char __
44490 error = mnt_want_write(path.mnt);
44491 if (error)
44492 goto out_dput;
44493+
44494+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
44495+ error = -EACCES;
44496+ goto out_drop_write;
44497+ }
44498+
44499 error = security_path_symlink(&path, dentry, from);
44500 if (error)
44501 goto out_drop_write;
44502 error = vfs_symlink(path.dentry->d_inode, dentry, from);
44503+ if (!error)
44504+ gr_handle_create(dentry, path.mnt);
44505 out_drop_write:
44506 mnt_drop_write(path.mnt);
44507 out_dput:
44508@@ -2941,6 +3089,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44509 {
44510 struct dentry *new_dentry;
44511 struct path old_path, new_path;
44512+ char *to;
44513 int how = 0;
44514 int error;
44515
44516@@ -2964,7 +3113,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44517 if (error)
44518 return error;
44519
44520- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
44521+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
44522 error = PTR_ERR(new_dentry);
44523 if (IS_ERR(new_dentry))
44524 goto out;
44525@@ -2975,13 +3124,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44526 error = mnt_want_write(new_path.mnt);
44527 if (error)
44528 goto out_dput;
44529+
44530+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44531+ old_path.dentry->d_inode,
44532+ old_path.dentry->d_inode->i_mode, to)) {
44533+ error = -EACCES;
44534+ goto out_drop_write;
44535+ }
44536+
44537+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
44538+ old_path.dentry, old_path.mnt, to)) {
44539+ error = -EACCES;
44540+ goto out_drop_write;
44541+ }
44542+
44543 error = security_path_link(old_path.dentry, &new_path, new_dentry);
44544 if (error)
44545 goto out_drop_write;
44546 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
44547+ if (!error)
44548+ gr_handle_create(new_dentry, new_path.mnt);
44549 out_drop_write:
44550 mnt_drop_write(new_path.mnt);
44551 out_dput:
44552+ putname(to);
44553 dput(new_dentry);
44554 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
44555 path_put(&new_path);
44556@@ -3153,6 +3319,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44557 char *to;
44558 int error;
44559
44560+ pax_track_stack();
44561+
44562 error = user_path_parent(olddfd, oldname, &oldnd, &from);
44563 if (error)
44564 goto exit;
44565@@ -3209,6 +3377,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44566 if (new_dentry == trap)
44567 goto exit5;
44568
44569+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44570+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
44571+ to);
44572+ if (error)
44573+ goto exit5;
44574+
44575 error = mnt_want_write(oldnd.path.mnt);
44576 if (error)
44577 goto exit5;
44578@@ -3218,6 +3392,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44579 goto exit6;
44580 error = vfs_rename(old_dir->d_inode, old_dentry,
44581 new_dir->d_inode, new_dentry);
44582+ if (!error)
44583+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44584+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44585 exit6:
44586 mnt_drop_write(oldnd.path.mnt);
44587 exit5:
44588@@ -3243,6 +3420,8 @@ SYSCALL_DEFINE2(rename, const char __use
44589
44590 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44591 {
44592+ char tmpbuf[64];
44593+ const char *newlink;
44594 int len;
44595
44596 len = PTR_ERR(link);
44597@@ -3252,7 +3431,14 @@ int vfs_readlink(struct dentry *dentry,
44598 len = strlen(link);
44599 if (len > (unsigned) buflen)
44600 len = buflen;
44601- if (copy_to_user(buffer, link, len))
44602+
44603+ if (len < sizeof(tmpbuf)) {
44604+ memcpy(tmpbuf, link, len);
44605+ newlink = tmpbuf;
44606+ } else
44607+ newlink = link;
44608+
44609+ if (copy_to_user(buffer, newlink, len))
44610 len = -EFAULT;
44611 out:
44612 return len;
44613diff -urNp linux-3.1.1/fs/namespace.c linux-3.1.1/fs/namespace.c
44614--- linux-3.1.1/fs/namespace.c 2011-11-11 15:19:27.000000000 -0500
44615+++ linux-3.1.1/fs/namespace.c 2011-11-16 18:40:29.000000000 -0500
44616@@ -1329,6 +1329,9 @@ static int do_umount(struct vfsmount *mn
44617 if (!(sb->s_flags & MS_RDONLY))
44618 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44619 up_write(&sb->s_umount);
44620+
44621+ gr_log_remount(mnt->mnt_devname, retval);
44622+
44623 return retval;
44624 }
44625
44626@@ -1348,6 +1351,9 @@ static int do_umount(struct vfsmount *mn
44627 br_write_unlock(vfsmount_lock);
44628 up_write(&namespace_sem);
44629 release_mounts(&umount_list);
44630+
44631+ gr_log_unmount(mnt->mnt_devname, retval);
44632+
44633 return retval;
44634 }
44635
44636@@ -2339,6 +2345,16 @@ long do_mount(char *dev_name, char *dir_
44637 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
44638 MS_STRICTATIME);
44639
44640+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44641+ retval = -EPERM;
44642+ goto dput_out;
44643+ }
44644+
44645+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44646+ retval = -EPERM;
44647+ goto dput_out;
44648+ }
44649+
44650 if (flags & MS_REMOUNT)
44651 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44652 data_page);
44653@@ -2353,6 +2369,9 @@ long do_mount(char *dev_name, char *dir_
44654 dev_name, data_page);
44655 dput_out:
44656 path_put(&path);
44657+
44658+ gr_log_mount(dev_name, dir_name, retval);
44659+
44660 return retval;
44661 }
44662
44663@@ -2576,6 +2595,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
44664 if (error)
44665 goto out2;
44666
44667+ if (gr_handle_chroot_pivot()) {
44668+ error = -EPERM;
44669+ goto out2;
44670+ }
44671+
44672 get_fs_root(current->fs, &root);
44673 error = lock_mount(&old);
44674 if (error)
44675diff -urNp linux-3.1.1/fs/ncpfs/dir.c linux-3.1.1/fs/ncpfs/dir.c
44676--- linux-3.1.1/fs/ncpfs/dir.c 2011-11-11 15:19:27.000000000 -0500
44677+++ linux-3.1.1/fs/ncpfs/dir.c 2011-11-16 18:40:29.000000000 -0500
44678@@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
44679 int res, val = 0, len;
44680 __u8 __name[NCP_MAXPATHLEN + 1];
44681
44682+ pax_track_stack();
44683+
44684 if (dentry == dentry->d_sb->s_root)
44685 return 1;
44686
44687@@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
44688 int error, res, len;
44689 __u8 __name[NCP_MAXPATHLEN + 1];
44690
44691+ pax_track_stack();
44692+
44693 error = -EIO;
44694 if (!ncp_conn_valid(server))
44695 goto finished;
44696@@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
44697 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
44698 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
44699
44700+ pax_track_stack();
44701+
44702 ncp_age_dentry(server, dentry);
44703 len = sizeof(__name);
44704 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
44705@@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
44706 int error, len;
44707 __u8 __name[NCP_MAXPATHLEN + 1];
44708
44709+ pax_track_stack();
44710+
44711 DPRINTK("ncp_mkdir: making %s/%s\n",
44712 dentry->d_parent->d_name.name, dentry->d_name.name);
44713
44714@@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_
44715 int old_len, new_len;
44716 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
44717
44718+ pax_track_stack();
44719+
44720 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
44721 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
44722 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
44723diff -urNp linux-3.1.1/fs/ncpfs/inode.c linux-3.1.1/fs/ncpfs/inode.c
44724--- linux-3.1.1/fs/ncpfs/inode.c 2011-11-11 15:19:27.000000000 -0500
44725+++ linux-3.1.1/fs/ncpfs/inode.c 2011-11-16 18:40:29.000000000 -0500
44726@@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
44727 #endif
44728 struct ncp_entry_info finfo;
44729
44730+ pax_track_stack();
44731+
44732 memset(&data, 0, sizeof(data));
44733 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
44734 if (!server)
44735diff -urNp linux-3.1.1/fs/nfs/blocklayout/blocklayout.c linux-3.1.1/fs/nfs/blocklayout/blocklayout.c
44736--- linux-3.1.1/fs/nfs/blocklayout/blocklayout.c 2011-11-11 15:19:27.000000000 -0500
44737+++ linux-3.1.1/fs/nfs/blocklayout/blocklayout.c 2011-11-16 18:39:08.000000000 -0500
44738@@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block
44739 */
44740 struct parallel_io {
44741 struct kref refcnt;
44742- struct rpc_call_ops call_ops;
44743+ rpc_call_ops_no_const call_ops;
44744 void (*pnfs_callback) (void *data);
44745 void *data;
44746 };
44747diff -urNp linux-3.1.1/fs/nfs/inode.c linux-3.1.1/fs/nfs/inode.c
44748--- linux-3.1.1/fs/nfs/inode.c 2011-11-11 15:19:27.000000000 -0500
44749+++ linux-3.1.1/fs/nfs/inode.c 2011-11-16 18:39:08.000000000 -0500
44750@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
44751 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44752 nfsi->attrtimeo_timestamp = jiffies;
44753
44754- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44755+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44756 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44757 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44758 else
44759@@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const st
44760 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44761 }
44762
44763-static atomic_long_t nfs_attr_generation_counter;
44764+static atomic_long_unchecked_t nfs_attr_generation_counter;
44765
44766 static unsigned long nfs_read_attr_generation_counter(void)
44767 {
44768- return atomic_long_read(&nfs_attr_generation_counter);
44769+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44770 }
44771
44772 unsigned long nfs_inc_attr_generation_counter(void)
44773 {
44774- return atomic_long_inc_return(&nfs_attr_generation_counter);
44775+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44776 }
44777
44778 void nfs_fattr_init(struct nfs_fattr *fattr)
44779diff -urNp linux-3.1.1/fs/nfsd/nfs4state.c linux-3.1.1/fs/nfsd/nfs4state.c
44780--- linux-3.1.1/fs/nfsd/nfs4state.c 2011-11-11 15:19:27.000000000 -0500
44781+++ linux-3.1.1/fs/nfsd/nfs4state.c 2011-11-16 18:40:29.000000000 -0500
44782@@ -3999,6 +3999,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
44783 unsigned int strhashval;
44784 int err;
44785
44786+ pax_track_stack();
44787+
44788 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
44789 (long long) lock->lk_offset,
44790 (long long) lock->lk_length);
44791diff -urNp linux-3.1.1/fs/nfsd/nfs4xdr.c linux-3.1.1/fs/nfsd/nfs4xdr.c
44792--- linux-3.1.1/fs/nfsd/nfs4xdr.c 2011-11-11 15:19:27.000000000 -0500
44793+++ linux-3.1.1/fs/nfsd/nfs4xdr.c 2011-11-16 18:40:29.000000000 -0500
44794@@ -1875,6 +1875,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
44795 .dentry = dentry,
44796 };
44797
44798+ pax_track_stack();
44799+
44800 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
44801 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
44802 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
44803diff -urNp linux-3.1.1/fs/nfsd/vfs.c linux-3.1.1/fs/nfsd/vfs.c
44804--- linux-3.1.1/fs/nfsd/vfs.c 2011-11-11 15:19:27.000000000 -0500
44805+++ linux-3.1.1/fs/nfsd/vfs.c 2011-11-16 18:39:08.000000000 -0500
44806@@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
44807 } else {
44808 oldfs = get_fs();
44809 set_fs(KERNEL_DS);
44810- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44811+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
44812 set_fs(oldfs);
44813 }
44814
44815@@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
44816
44817 /* Write the data. */
44818 oldfs = get_fs(); set_fs(KERNEL_DS);
44819- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44820+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
44821 set_fs(oldfs);
44822 if (host_err < 0)
44823 goto out_nfserr;
44824@@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
44825 */
44826
44827 oldfs = get_fs(); set_fs(KERNEL_DS);
44828- host_err = inode->i_op->readlink(dentry, buf, *lenp);
44829+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
44830 set_fs(oldfs);
44831
44832 if (host_err < 0)
44833diff -urNp linux-3.1.1/fs/notify/fanotify/fanotify_user.c linux-3.1.1/fs/notify/fanotify/fanotify_user.c
44834--- linux-3.1.1/fs/notify/fanotify/fanotify_user.c 2011-11-11 15:19:27.000000000 -0500
44835+++ linux-3.1.1/fs/notify/fanotify/fanotify_user.c 2011-11-16 18:39:08.000000000 -0500
44836@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
44837 goto out_close_fd;
44838
44839 ret = -EFAULT;
44840- if (copy_to_user(buf, &fanotify_event_metadata,
44841+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
44842+ copy_to_user(buf, &fanotify_event_metadata,
44843 fanotify_event_metadata.event_len))
44844 goto out_kill_access_response;
44845
44846diff -urNp linux-3.1.1/fs/notify/notification.c linux-3.1.1/fs/notify/notification.c
44847--- linux-3.1.1/fs/notify/notification.c 2011-11-11 15:19:27.000000000 -0500
44848+++ linux-3.1.1/fs/notify/notification.c 2011-11-16 18:39:08.000000000 -0500
44849@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
44850 * get set to 0 so it will never get 'freed'
44851 */
44852 static struct fsnotify_event *q_overflow_event;
44853-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44854+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44855
44856 /**
44857 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
44858@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
44859 */
44860 u32 fsnotify_get_cookie(void)
44861 {
44862- return atomic_inc_return(&fsnotify_sync_cookie);
44863+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
44864 }
44865 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
44866
44867diff -urNp linux-3.1.1/fs/ntfs/dir.c linux-3.1.1/fs/ntfs/dir.c
44868--- linux-3.1.1/fs/ntfs/dir.c 2011-11-11 15:19:27.000000000 -0500
44869+++ linux-3.1.1/fs/ntfs/dir.c 2011-11-16 18:39:08.000000000 -0500
44870@@ -1329,7 +1329,7 @@ find_next_index_buffer:
44871 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
44872 ~(s64)(ndir->itype.index.block_size - 1)));
44873 /* Bounds checks. */
44874- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44875+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44876 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
44877 "inode 0x%lx or driver bug.", vdir->i_ino);
44878 goto err_out;
44879diff -urNp linux-3.1.1/fs/ntfs/file.c linux-3.1.1/fs/ntfs/file.c
44880--- linux-3.1.1/fs/ntfs/file.c 2011-11-11 15:19:27.000000000 -0500
44881+++ linux-3.1.1/fs/ntfs/file.c 2011-11-16 18:39:08.000000000 -0500
44882@@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_
44883 #endif /* NTFS_RW */
44884 };
44885
44886-const struct file_operations ntfs_empty_file_ops = {};
44887+const struct file_operations ntfs_empty_file_ops __read_only;
44888
44889-const struct inode_operations ntfs_empty_inode_ops = {};
44890+const struct inode_operations ntfs_empty_inode_ops __read_only;
44891diff -urNp linux-3.1.1/fs/ocfs2/localalloc.c linux-3.1.1/fs/ocfs2/localalloc.c
44892--- linux-3.1.1/fs/ocfs2/localalloc.c 2011-11-11 15:19:27.000000000 -0500
44893+++ linux-3.1.1/fs/ocfs2/localalloc.c 2011-11-16 18:39:08.000000000 -0500
44894@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
44895 goto bail;
44896 }
44897
44898- atomic_inc(&osb->alloc_stats.moves);
44899+ atomic_inc_unchecked(&osb->alloc_stats.moves);
44900
44901 bail:
44902 if (handle)
44903diff -urNp linux-3.1.1/fs/ocfs2/namei.c linux-3.1.1/fs/ocfs2/namei.c
44904--- linux-3.1.1/fs/ocfs2/namei.c 2011-11-11 15:19:27.000000000 -0500
44905+++ linux-3.1.1/fs/ocfs2/namei.c 2011-11-16 18:40:29.000000000 -0500
44906@@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
44907 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
44908 struct ocfs2_dir_lookup_result target_insert = { NULL, };
44909
44910+ pax_track_stack();
44911+
44912 /* At some point it might be nice to break this function up a
44913 * bit. */
44914
44915diff -urNp linux-3.1.1/fs/ocfs2/ocfs2.h linux-3.1.1/fs/ocfs2/ocfs2.h
44916--- linux-3.1.1/fs/ocfs2/ocfs2.h 2011-11-11 15:19:27.000000000 -0500
44917+++ linux-3.1.1/fs/ocfs2/ocfs2.h 2011-11-16 18:39:08.000000000 -0500
44918@@ -235,11 +235,11 @@ enum ocfs2_vol_state
44919
44920 struct ocfs2_alloc_stats
44921 {
44922- atomic_t moves;
44923- atomic_t local_data;
44924- atomic_t bitmap_data;
44925- atomic_t bg_allocs;
44926- atomic_t bg_extends;
44927+ atomic_unchecked_t moves;
44928+ atomic_unchecked_t local_data;
44929+ atomic_unchecked_t bitmap_data;
44930+ atomic_unchecked_t bg_allocs;
44931+ atomic_unchecked_t bg_extends;
44932 };
44933
44934 enum ocfs2_local_alloc_state
44935diff -urNp linux-3.1.1/fs/ocfs2/suballoc.c linux-3.1.1/fs/ocfs2/suballoc.c
44936--- linux-3.1.1/fs/ocfs2/suballoc.c 2011-11-11 15:19:27.000000000 -0500
44937+++ linux-3.1.1/fs/ocfs2/suballoc.c 2011-11-16 18:39:08.000000000 -0500
44938@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
44939 mlog_errno(status);
44940 goto bail;
44941 }
44942- atomic_inc(&osb->alloc_stats.bg_extends);
44943+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
44944
44945 /* You should never ask for this much metadata */
44946 BUG_ON(bits_wanted >
44947@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
44948 mlog_errno(status);
44949 goto bail;
44950 }
44951- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44952+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44953
44954 *suballoc_loc = res.sr_bg_blkno;
44955 *suballoc_bit_start = res.sr_bit_offset;
44956@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
44957 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
44958 res->sr_bits);
44959
44960- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44961+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44962
44963 BUG_ON(res->sr_bits != 1);
44964
44965@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
44966 mlog_errno(status);
44967 goto bail;
44968 }
44969- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44970+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44971
44972 BUG_ON(res.sr_bits != 1);
44973
44974@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
44975 cluster_start,
44976 num_clusters);
44977 if (!status)
44978- atomic_inc(&osb->alloc_stats.local_data);
44979+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
44980 } else {
44981 if (min_clusters > (osb->bitmap_cpg - 1)) {
44982 /* The only paths asking for contiguousness
44983@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
44984 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
44985 res.sr_bg_blkno,
44986 res.sr_bit_offset);
44987- atomic_inc(&osb->alloc_stats.bitmap_data);
44988+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
44989 *num_clusters = res.sr_bits;
44990 }
44991 }
44992diff -urNp linux-3.1.1/fs/ocfs2/super.c linux-3.1.1/fs/ocfs2/super.c
44993--- linux-3.1.1/fs/ocfs2/super.c 2011-11-11 15:19:27.000000000 -0500
44994+++ linux-3.1.1/fs/ocfs2/super.c 2011-11-16 18:39:08.000000000 -0500
44995@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
44996 "%10s => GlobalAllocs: %d LocalAllocs: %d "
44997 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
44998 "Stats",
44999- atomic_read(&osb->alloc_stats.bitmap_data),
45000- atomic_read(&osb->alloc_stats.local_data),
45001- atomic_read(&osb->alloc_stats.bg_allocs),
45002- atomic_read(&osb->alloc_stats.moves),
45003- atomic_read(&osb->alloc_stats.bg_extends));
45004+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
45005+ atomic_read_unchecked(&osb->alloc_stats.local_data),
45006+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
45007+ atomic_read_unchecked(&osb->alloc_stats.moves),
45008+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
45009
45010 out += snprintf(buf + out, len - out,
45011 "%10s => State: %u Descriptor: %llu Size: %u bits "
45012@@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct
45013 spin_lock_init(&osb->osb_xattr_lock);
45014 ocfs2_init_steal_slots(osb);
45015
45016- atomic_set(&osb->alloc_stats.moves, 0);
45017- atomic_set(&osb->alloc_stats.local_data, 0);
45018- atomic_set(&osb->alloc_stats.bitmap_data, 0);
45019- atomic_set(&osb->alloc_stats.bg_allocs, 0);
45020- atomic_set(&osb->alloc_stats.bg_extends, 0);
45021+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
45022+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
45023+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
45024+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
45025+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
45026
45027 /* Copy the blockcheck stats from the superblock probe */
45028 osb->osb_ecc_stats = *stats;
45029diff -urNp linux-3.1.1/fs/ocfs2/symlink.c linux-3.1.1/fs/ocfs2/symlink.c
45030--- linux-3.1.1/fs/ocfs2/symlink.c 2011-11-11 15:19:27.000000000 -0500
45031+++ linux-3.1.1/fs/ocfs2/symlink.c 2011-11-16 18:39:08.000000000 -0500
45032@@ -142,7 +142,7 @@ bail:
45033
45034 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
45035 {
45036- char *link = nd_get_link(nd);
45037+ const char *link = nd_get_link(nd);
45038 if (!IS_ERR(link))
45039 kfree(link);
45040 }
45041diff -urNp linux-3.1.1/fs/open.c linux-3.1.1/fs/open.c
45042--- linux-3.1.1/fs/open.c 2011-11-11 15:19:27.000000000 -0500
45043+++ linux-3.1.1/fs/open.c 2011-11-17 19:07:55.000000000 -0500
45044@@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
45045 error = locks_verify_truncate(inode, NULL, length);
45046 if (!error)
45047 error = security_path_truncate(&path);
45048+
45049+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
45050+ error = -EACCES;
45051+
45052 if (!error)
45053 error = do_truncate(path.dentry, length, 0, NULL);
45054
45055@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
45056 if (__mnt_is_readonly(path.mnt))
45057 res = -EROFS;
45058
45059+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
45060+ res = -EACCES;
45061+
45062 out_path_release:
45063 path_put(&path);
45064 out:
45065@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
45066 if (error)
45067 goto dput_and_out;
45068
45069+ gr_log_chdir(path.dentry, path.mnt);
45070+
45071 set_fs_pwd(current->fs, &path);
45072
45073 dput_and_out:
45074@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
45075 goto out_putf;
45076
45077 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
45078+
45079+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
45080+ error = -EPERM;
45081+
45082+ if (!error)
45083+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
45084+
45085 if (!error)
45086 set_fs_pwd(current->fs, &file->f_path);
45087 out_putf:
45088@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __use
45089 if (error)
45090 goto dput_and_out;
45091
45092+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
45093+ goto dput_and_out;
45094+
45095 set_fs_root(current->fs, &path);
45096+
45097+ gr_handle_chroot_chdir(&path);
45098+
45099 error = 0;
45100 dput_and_out:
45101 path_put(&path);
45102@@ -456,6 +478,16 @@ static int chmod_common(struct path *pat
45103 if (error)
45104 return error;
45105 mutex_lock(&inode->i_mutex);
45106+
45107+ if (!gr_acl_handle_fchmod(path->dentry, path->mnt, mode)) {
45108+ error = -EACCES;
45109+ goto out_unlock;
45110+ }
45111+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
45112+ error = -EACCES;
45113+ goto out_unlock;
45114+ }
45115+
45116 error = security_path_chmod(path->dentry, path->mnt, mode);
45117 if (error)
45118 goto out_unlock;
45119@@ -506,6 +538,9 @@ static int chown_common(struct path *pat
45120 int error;
45121 struct iattr newattrs;
45122
45123+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
45124+ return -EACCES;
45125+
45126 newattrs.ia_valid = ATTR_CTIME;
45127 if (user != (uid_t) -1) {
45128 newattrs.ia_valid |= ATTR_UID;
45129diff -urNp linux-3.1.1/fs/partitions/ldm.c linux-3.1.1/fs/partitions/ldm.c
45130--- linux-3.1.1/fs/partitions/ldm.c 2011-11-11 15:19:27.000000000 -0500
45131+++ linux-3.1.1/fs/partitions/ldm.c 2011-11-17 19:08:15.000000000 -0500
45132@@ -1322,7 +1322,7 @@ static bool ldm_frag_add (const u8 *data
45133 goto found;
45134 }
45135
45136- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
45137+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
45138 if (!f) {
45139 ldm_crit ("Out of memory.");
45140 return false;
45141diff -urNp linux-3.1.1/fs/pipe.c linux-3.1.1/fs/pipe.c
45142--- linux-3.1.1/fs/pipe.c 2011-11-11 15:19:27.000000000 -0500
45143+++ linux-3.1.1/fs/pipe.c 2011-11-16 18:40:29.000000000 -0500
45144@@ -420,9 +420,9 @@ redo:
45145 }
45146 if (bufs) /* More to do? */
45147 continue;
45148- if (!pipe->writers)
45149+ if (!atomic_read(&pipe->writers))
45150 break;
45151- if (!pipe->waiting_writers) {
45152+ if (!atomic_read(&pipe->waiting_writers)) {
45153 /* syscall merging: Usually we must not sleep
45154 * if O_NONBLOCK is set, or if we got some data.
45155 * But if a writer sleeps in kernel space, then
45156@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
45157 mutex_lock(&inode->i_mutex);
45158 pipe = inode->i_pipe;
45159
45160- if (!pipe->readers) {
45161+ if (!atomic_read(&pipe->readers)) {
45162 send_sig(SIGPIPE, current, 0);
45163 ret = -EPIPE;
45164 goto out;
45165@@ -530,7 +530,7 @@ redo1:
45166 for (;;) {
45167 int bufs;
45168
45169- if (!pipe->readers) {
45170+ if (!atomic_read(&pipe->readers)) {
45171 send_sig(SIGPIPE, current, 0);
45172 if (!ret)
45173 ret = -EPIPE;
45174@@ -616,9 +616,9 @@ redo2:
45175 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
45176 do_wakeup = 0;
45177 }
45178- pipe->waiting_writers++;
45179+ atomic_inc(&pipe->waiting_writers);
45180 pipe_wait(pipe);
45181- pipe->waiting_writers--;
45182+ atomic_dec(&pipe->waiting_writers);
45183 }
45184 out:
45185 mutex_unlock(&inode->i_mutex);
45186@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
45187 mask = 0;
45188 if (filp->f_mode & FMODE_READ) {
45189 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
45190- if (!pipe->writers && filp->f_version != pipe->w_counter)
45191+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
45192 mask |= POLLHUP;
45193 }
45194
45195@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
45196 * Most Unices do not set POLLERR for FIFOs but on Linux they
45197 * behave exactly like pipes for poll().
45198 */
45199- if (!pipe->readers)
45200+ if (!atomic_read(&pipe->readers))
45201 mask |= POLLERR;
45202 }
45203
45204@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
45205
45206 mutex_lock(&inode->i_mutex);
45207 pipe = inode->i_pipe;
45208- pipe->readers -= decr;
45209- pipe->writers -= decw;
45210+ atomic_sub(decr, &pipe->readers);
45211+ atomic_sub(decw, &pipe->writers);
45212
45213- if (!pipe->readers && !pipe->writers) {
45214+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
45215 free_pipe_info(inode);
45216 } else {
45217 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
45218@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
45219
45220 if (inode->i_pipe) {
45221 ret = 0;
45222- inode->i_pipe->readers++;
45223+ atomic_inc(&inode->i_pipe->readers);
45224 }
45225
45226 mutex_unlock(&inode->i_mutex);
45227@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
45228
45229 if (inode->i_pipe) {
45230 ret = 0;
45231- inode->i_pipe->writers++;
45232+ atomic_inc(&inode->i_pipe->writers);
45233 }
45234
45235 mutex_unlock(&inode->i_mutex);
45236@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
45237 if (inode->i_pipe) {
45238 ret = 0;
45239 if (filp->f_mode & FMODE_READ)
45240- inode->i_pipe->readers++;
45241+ atomic_inc(&inode->i_pipe->readers);
45242 if (filp->f_mode & FMODE_WRITE)
45243- inode->i_pipe->writers++;
45244+ atomic_inc(&inode->i_pipe->writers);
45245 }
45246
45247 mutex_unlock(&inode->i_mutex);
45248@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
45249 inode->i_pipe = NULL;
45250 }
45251
45252-static struct vfsmount *pipe_mnt __read_mostly;
45253+struct vfsmount *pipe_mnt __read_mostly;
45254
45255 /*
45256 * pipefs_dname() is called from d_path().
45257@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
45258 goto fail_iput;
45259 inode->i_pipe = pipe;
45260
45261- pipe->readers = pipe->writers = 1;
45262+ atomic_set(&pipe->readers, 1);
45263+ atomic_set(&pipe->writers, 1);
45264 inode->i_fop = &rdwr_pipefifo_fops;
45265
45266 /*
45267diff -urNp linux-3.1.1/fs/proc/array.c linux-3.1.1/fs/proc/array.c
45268--- linux-3.1.1/fs/proc/array.c 2011-11-11 15:19:27.000000000 -0500
45269+++ linux-3.1.1/fs/proc/array.c 2011-11-17 18:42:02.000000000 -0500
45270@@ -60,6 +60,7 @@
45271 #include <linux/tty.h>
45272 #include <linux/string.h>
45273 #include <linux/mman.h>
45274+#include <linux/grsecurity.h>
45275 #include <linux/proc_fs.h>
45276 #include <linux/ioport.h>
45277 #include <linux/uaccess.h>
45278@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
45279 seq_putc(m, '\n');
45280 }
45281
45282+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45283+static inline void task_pax(struct seq_file *m, struct task_struct *p)
45284+{
45285+ if (p->mm)
45286+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
45287+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
45288+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
45289+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
45290+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
45291+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
45292+ else
45293+ seq_printf(m, "PaX:\t-----\n");
45294+}
45295+#endif
45296+
45297 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45298 struct pid *pid, struct task_struct *task)
45299 {
45300@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
45301 task_cpus_allowed(m, task);
45302 cpuset_task_status_allowed(m, task);
45303 task_context_switch_counts(m, task);
45304+
45305+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45306+ task_pax(m, task);
45307+#endif
45308+
45309+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
45310+ task_grsec_rbac(m, task);
45311+#endif
45312+
45313 return 0;
45314 }
45315
45316+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45317+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45318+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45319+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45320+#endif
45321+
45322 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45323 struct pid *pid, struct task_struct *task, int whole)
45324 {
45325@@ -378,6 +409,8 @@ static int do_task_stat(struct seq_file
45326 char tcomm[sizeof(task->comm)];
45327 unsigned long flags;
45328
45329+ pax_track_stack();
45330+
45331 state = *get_task_state(task);
45332 vsize = eip = esp = 0;
45333 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
45334@@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
45335 gtime = task->gtime;
45336 }
45337
45338+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45339+ if (PAX_RAND_FLAGS(mm)) {
45340+ eip = 0;
45341+ esp = 0;
45342+ wchan = 0;
45343+ }
45344+#endif
45345+#ifdef CONFIG_GRKERNSEC_HIDESYM
45346+ wchan = 0;
45347+ eip =0;
45348+ esp =0;
45349+#endif
45350+
45351 /* scale priority and nice values from timeslices to -20..20 */
45352 /* to make it look like a "normal" Unix priority/nice value */
45353 priority = task_prio(task);
45354@@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
45355 vsize,
45356 mm ? get_mm_rss(mm) : 0,
45357 rsslim,
45358+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45359+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
45360+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45361+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45362+#else
45363 mm ? (permitted ? mm->start_code : 1) : 0,
45364 mm ? (permitted ? mm->end_code : 1) : 0,
45365 (permitted && mm) ? mm->start_stack : 0,
45366+#endif
45367 esp,
45368 eip,
45369 /* The signal information here is obsolete.
45370@@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
45371
45372 return 0;
45373 }
45374+
45375+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45376+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45377+{
45378+ u32 curr_ip = 0;
45379+ unsigned long flags;
45380+
45381+ if (lock_task_sighand(task, &flags)) {
45382+ curr_ip = task->signal->curr_ip;
45383+ unlock_task_sighand(task, &flags);
45384+ }
45385+
45386+ return sprintf(buffer, "%pI4\n", &curr_ip);
45387+}
45388+#endif
45389diff -urNp linux-3.1.1/fs/proc/base.c linux-3.1.1/fs/proc/base.c
45390--- linux-3.1.1/fs/proc/base.c 2011-11-11 15:19:27.000000000 -0500
45391+++ linux-3.1.1/fs/proc/base.c 2011-11-17 18:43:19.000000000 -0500
45392@@ -107,6 +107,22 @@ struct pid_entry {
45393 union proc_op op;
45394 };
45395
45396+struct getdents_callback {
45397+ struct linux_dirent __user * current_dir;
45398+ struct linux_dirent __user * previous;
45399+ struct file * file;
45400+ int count;
45401+ int error;
45402+};
45403+
45404+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45405+ loff_t offset, u64 ino, unsigned int d_type)
45406+{
45407+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
45408+ buf->error = -EINVAL;
45409+ return 0;
45410+}
45411+
45412 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45413 .name = (NAME), \
45414 .len = sizeof(NAME) - 1, \
45415@@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_per
45416 if (task == current)
45417 return mm;
45418
45419+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
45420+ return ERR_PTR(-EPERM);
45421+
45422 /*
45423 * If current is actively ptrace'ing, and would also be
45424 * permitted to freshly attach with ptrace now, permit it.
45425@@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_
45426 if (!mm->arg_end)
45427 goto out_mm; /* Shh! No looking before we're done */
45428
45429+ if (gr_acl_handle_procpidmem(task))
45430+ goto out_mm;
45431+
45432 len = mm->arg_end - mm->arg_start;
45433
45434 if (len > PAGE_SIZE)
45435@@ -309,12 +331,28 @@ out:
45436 return res;
45437 }
45438
45439+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45440+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45441+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45442+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45443+#endif
45444+
45445 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45446 {
45447 struct mm_struct *mm = mm_for_maps(task);
45448 int res = PTR_ERR(mm);
45449 if (mm && !IS_ERR(mm)) {
45450 unsigned int nwords = 0;
45451+
45452+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45453+ /* allow if we're currently ptracing this task */
45454+ if (PAX_RAND_FLAGS(mm) &&
45455+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45456+ mmput(mm);
45457+ return 0;
45458+ }
45459+#endif
45460+
45461 do {
45462 nwords += 2;
45463 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45464@@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_str
45465 }
45466
45467
45468-#ifdef CONFIG_KALLSYMS
45469+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45470 /*
45471 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45472 * Returns the resolved symbol. If that fails, simply return the address.
45473@@ -367,7 +405,7 @@ static void unlock_trace(struct task_str
45474 mutex_unlock(&task->signal->cred_guard_mutex);
45475 }
45476
45477-#ifdef CONFIG_STACKTRACE
45478+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45479
45480 #define MAX_STACK_TRACE_DEPTH 64
45481
45482@@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_s
45483 return count;
45484 }
45485
45486-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45487+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45488 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45489 {
45490 long nr;
45491@@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_
45492 /************************************************************************/
45493
45494 /* permission checks */
45495-static int proc_fd_access_allowed(struct inode *inode)
45496+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45497 {
45498 struct task_struct *task;
45499 int allowed = 0;
45500@@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct
45501 */
45502 task = get_proc_task(inode);
45503 if (task) {
45504- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45505+ if (log)
45506+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45507+ else
45508+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45509 put_task_struct(task);
45510 }
45511 return allowed;
45512@@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file
45513 if (!task)
45514 goto out_no_task;
45515
45516+ if (gr_acl_handle_procpidmem(task))
45517+ goto out;
45518+
45519 ret = -ENOMEM;
45520 page = (char *)__get_free_page(GFP_TEMPORARY);
45521 if (!page)
45522@@ -1613,7 +1657,7 @@ static void *proc_pid_follow_link(struct
45523 path_put(&nd->path);
45524
45525 /* Are we allowed to snoop on the tasks file descriptors? */
45526- if (!proc_fd_access_allowed(inode))
45527+ if (!proc_fd_access_allowed(inode,0))
45528 goto out;
45529
45530 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45531@@ -1652,8 +1696,18 @@ static int proc_pid_readlink(struct dent
45532 struct path path;
45533
45534 /* Are we allowed to snoop on the tasks file descriptors? */
45535- if (!proc_fd_access_allowed(inode))
45536- goto out;
45537+ /* logging this is needed for learning on chromium to work properly,
45538+ but we don't want to flood the logs from 'ps' which does a readlink
45539+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45540+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
45541+ */
45542+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45543+ if (!proc_fd_access_allowed(inode,0))
45544+ goto out;
45545+ } else {
45546+ if (!proc_fd_access_allowed(inode,1))
45547+ goto out;
45548+ }
45549
45550 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45551 if (error)
45552@@ -1718,7 +1772,11 @@ struct inode *proc_pid_make_inode(struct
45553 rcu_read_lock();
45554 cred = __task_cred(task);
45555 inode->i_uid = cred->euid;
45556+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45557+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45558+#else
45559 inode->i_gid = cred->egid;
45560+#endif
45561 rcu_read_unlock();
45562 }
45563 security_task_to_inode(task, inode);
45564@@ -1736,6 +1794,9 @@ int pid_getattr(struct vfsmount *mnt, st
45565 struct inode *inode = dentry->d_inode;
45566 struct task_struct *task;
45567 const struct cred *cred;
45568+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45569+ const struct cred *tmpcred = current_cred();
45570+#endif
45571
45572 generic_fillattr(inode, stat);
45573
45574@@ -1743,13 +1804,41 @@ int pid_getattr(struct vfsmount *mnt, st
45575 stat->uid = 0;
45576 stat->gid = 0;
45577 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45578+
45579+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45580+ rcu_read_unlock();
45581+ return -ENOENT;
45582+ }
45583+
45584 if (task) {
45585+ cred = __task_cred(task);
45586+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45587+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45588+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45589+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45590+#endif
45591+ ) {
45592+#endif
45593 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45594+#ifdef CONFIG_GRKERNSEC_PROC_USER
45595+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45596+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45597+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45598+#endif
45599 task_dumpable(task)) {
45600- cred = __task_cred(task);
45601 stat->uid = cred->euid;
45602+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45603+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45604+#else
45605 stat->gid = cred->egid;
45606+#endif
45607 }
45608+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45609+ } else {
45610+ rcu_read_unlock();
45611+ return -ENOENT;
45612+ }
45613+#endif
45614 }
45615 rcu_read_unlock();
45616 return 0;
45617@@ -1786,11 +1875,20 @@ int pid_revalidate(struct dentry *dentry
45618
45619 if (task) {
45620 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45621+#ifdef CONFIG_GRKERNSEC_PROC_USER
45622+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45623+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45624+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45625+#endif
45626 task_dumpable(task)) {
45627 rcu_read_lock();
45628 cred = __task_cred(task);
45629 inode->i_uid = cred->euid;
45630+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45631+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45632+#else
45633 inode->i_gid = cred->egid;
45634+#endif
45635 rcu_read_unlock();
45636 } else {
45637 inode->i_uid = 0;
45638@@ -1908,7 +2006,8 @@ static int proc_fd_info(struct inode *in
45639 int fd = proc_fd(inode);
45640
45641 if (task) {
45642- files = get_files_struct(task);
45643+ if (!gr_acl_handle_procpidmem(task))
45644+ files = get_files_struct(task);
45645 put_task_struct(task);
45646 }
45647 if (files) {
45648@@ -2176,11 +2275,21 @@ static const struct file_operations proc
45649 */
45650 static int proc_fd_permission(struct inode *inode, int mask)
45651 {
45652+ struct task_struct *task;
45653 int rv = generic_permission(inode, mask);
45654- if (rv == 0)
45655- return 0;
45656+
45657 if (task_pid(current) == proc_pid(inode))
45658 rv = 0;
45659+
45660+ task = get_proc_task(inode);
45661+ if (task == NULL)
45662+ return rv;
45663+
45664+ if (gr_acl_handle_procpidmem(task))
45665+ rv = -EACCES;
45666+
45667+ put_task_struct(task);
45668+
45669 return rv;
45670 }
45671
45672@@ -2290,6 +2399,9 @@ static struct dentry *proc_pident_lookup
45673 if (!task)
45674 goto out_no_task;
45675
45676+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45677+ goto out;
45678+
45679 /*
45680 * Yes, it does not scale. And it should not. Don't add
45681 * new entries into /proc/<tgid>/ without very good reasons.
45682@@ -2334,6 +2446,9 @@ static int proc_pident_readdir(struct fi
45683 if (!task)
45684 goto out_no_task;
45685
45686+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45687+ goto out;
45688+
45689 ret = 0;
45690 i = filp->f_pos;
45691 switch (i) {
45692@@ -2604,7 +2719,7 @@ static void *proc_self_follow_link(struc
45693 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45694 void *cookie)
45695 {
45696- char *s = nd_get_link(nd);
45697+ const char *s = nd_get_link(nd);
45698 if (!IS_ERR(s))
45699 __putname(s);
45700 }
45701@@ -2802,7 +2917,7 @@ static const struct pid_entry tgid_base_
45702 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
45703 #endif
45704 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45705-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45706+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45707 INF("syscall", S_IRUGO, proc_pid_syscall),
45708 #endif
45709 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45710@@ -2827,10 +2942,10 @@ static const struct pid_entry tgid_base_
45711 #ifdef CONFIG_SECURITY
45712 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45713 #endif
45714-#ifdef CONFIG_KALLSYMS
45715+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45716 INF("wchan", S_IRUGO, proc_pid_wchan),
45717 #endif
45718-#ifdef CONFIG_STACKTRACE
45719+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45720 ONE("stack", S_IRUGO, proc_pid_stack),
45721 #endif
45722 #ifdef CONFIG_SCHEDSTATS
45723@@ -2864,6 +2979,9 @@ static const struct pid_entry tgid_base_
45724 #ifdef CONFIG_HARDWALL
45725 INF("hardwall", S_IRUGO, proc_pid_hardwall),
45726 #endif
45727+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45728+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45729+#endif
45730 };
45731
45732 static int proc_tgid_base_readdir(struct file * filp,
45733@@ -2989,7 +3107,14 @@ static struct dentry *proc_pid_instantia
45734 if (!inode)
45735 goto out;
45736
45737+#ifdef CONFIG_GRKERNSEC_PROC_USER
45738+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45739+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45740+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45741+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45742+#else
45743 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45744+#endif
45745 inode->i_op = &proc_tgid_base_inode_operations;
45746 inode->i_fop = &proc_tgid_base_operations;
45747 inode->i_flags|=S_IMMUTABLE;
45748@@ -3031,7 +3156,14 @@ struct dentry *proc_pid_lookup(struct in
45749 if (!task)
45750 goto out;
45751
45752+ if (!has_group_leader_pid(task))
45753+ goto out_put_task;
45754+
45755+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45756+ goto out_put_task;
45757+
45758 result = proc_pid_instantiate(dir, dentry, task, NULL);
45759+out_put_task:
45760 put_task_struct(task);
45761 out:
45762 return result;
45763@@ -3096,6 +3228,11 @@ int proc_pid_readdir(struct file * filp,
45764 {
45765 unsigned int nr;
45766 struct task_struct *reaper;
45767+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45768+ const struct cred *tmpcred = current_cred();
45769+ const struct cred *itercred;
45770+#endif
45771+ filldir_t __filldir = filldir;
45772 struct tgid_iter iter;
45773 struct pid_namespace *ns;
45774
45775@@ -3119,8 +3256,27 @@ int proc_pid_readdir(struct file * filp,
45776 for (iter = next_tgid(ns, iter);
45777 iter.task;
45778 iter.tgid += 1, iter = next_tgid(ns, iter)) {
45779+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45780+ rcu_read_lock();
45781+ itercred = __task_cred(iter.task);
45782+#endif
45783+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
45784+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45785+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
45786+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45787+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45788+#endif
45789+ )
45790+#endif
45791+ )
45792+ __filldir = &gr_fake_filldir;
45793+ else
45794+ __filldir = filldir;
45795+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45796+ rcu_read_unlock();
45797+#endif
45798 filp->f_pos = iter.tgid + TGID_OFFSET;
45799- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
45800+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
45801 put_task_struct(iter.task);
45802 goto out;
45803 }
45804@@ -3148,7 +3304,7 @@ static const struct pid_entry tid_base_s
45805 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45806 #endif
45807 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45808-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45809+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45810 INF("syscall", S_IRUGO, proc_pid_syscall),
45811 #endif
45812 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45813@@ -3172,10 +3328,10 @@ static const struct pid_entry tid_base_s
45814 #ifdef CONFIG_SECURITY
45815 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45816 #endif
45817-#ifdef CONFIG_KALLSYMS
45818+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45819 INF("wchan", S_IRUGO, proc_pid_wchan),
45820 #endif
45821-#ifdef CONFIG_STACKTRACE
45822+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45823 ONE("stack", S_IRUGO, proc_pid_stack),
45824 #endif
45825 #ifdef CONFIG_SCHEDSTATS
45826diff -urNp linux-3.1.1/fs/proc/cmdline.c linux-3.1.1/fs/proc/cmdline.c
45827--- linux-3.1.1/fs/proc/cmdline.c 2011-11-11 15:19:27.000000000 -0500
45828+++ linux-3.1.1/fs/proc/cmdline.c 2011-11-16 18:40:29.000000000 -0500
45829@@ -23,7 +23,11 @@ static const struct file_operations cmdl
45830
45831 static int __init proc_cmdline_init(void)
45832 {
45833+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45834+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
45835+#else
45836 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
45837+#endif
45838 return 0;
45839 }
45840 module_init(proc_cmdline_init);
45841diff -urNp linux-3.1.1/fs/proc/devices.c linux-3.1.1/fs/proc/devices.c
45842--- linux-3.1.1/fs/proc/devices.c 2011-11-11 15:19:27.000000000 -0500
45843+++ linux-3.1.1/fs/proc/devices.c 2011-11-16 18:40:29.000000000 -0500
45844@@ -64,7 +64,11 @@ static const struct file_operations proc
45845
45846 static int __init proc_devices_init(void)
45847 {
45848+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45849+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
45850+#else
45851 proc_create("devices", 0, NULL, &proc_devinfo_operations);
45852+#endif
45853 return 0;
45854 }
45855 module_init(proc_devices_init);
45856diff -urNp linux-3.1.1/fs/proc/inode.c linux-3.1.1/fs/proc/inode.c
45857--- linux-3.1.1/fs/proc/inode.c 2011-11-11 15:19:27.000000000 -0500
45858+++ linux-3.1.1/fs/proc/inode.c 2011-11-16 18:40:29.000000000 -0500
45859@@ -18,12 +18,18 @@
45860 #include <linux/module.h>
45861 #include <linux/sysctl.h>
45862 #include <linux/slab.h>
45863+#include <linux/grsecurity.h>
45864
45865 #include <asm/system.h>
45866 #include <asm/uaccess.h>
45867
45868 #include "internal.h"
45869
45870+#ifdef CONFIG_PROC_SYSCTL
45871+extern const struct inode_operations proc_sys_inode_operations;
45872+extern const struct inode_operations proc_sys_dir_operations;
45873+#endif
45874+
45875 static void proc_evict_inode(struct inode *inode)
45876 {
45877 struct proc_dir_entry *de;
45878@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inod
45879 ns_ops = PROC_I(inode)->ns_ops;
45880 if (ns_ops && ns_ops->put)
45881 ns_ops->put(PROC_I(inode)->ns);
45882+
45883+#ifdef CONFIG_PROC_SYSCTL
45884+ if (inode->i_op == &proc_sys_inode_operations ||
45885+ inode->i_op == &proc_sys_dir_operations)
45886+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
45887+#endif
45888+
45889 }
45890
45891 static struct kmem_cache * proc_inode_cachep;
45892@@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct supe
45893 if (de->mode) {
45894 inode->i_mode = de->mode;
45895 inode->i_uid = de->uid;
45896+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45897+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45898+#else
45899 inode->i_gid = de->gid;
45900+#endif
45901 }
45902 if (de->size)
45903 inode->i_size = de->size;
45904diff -urNp linux-3.1.1/fs/proc/internal.h linux-3.1.1/fs/proc/internal.h
45905--- linux-3.1.1/fs/proc/internal.h 2011-11-11 15:19:27.000000000 -0500
45906+++ linux-3.1.1/fs/proc/internal.h 2011-11-16 18:40:29.000000000 -0500
45907@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
45908 struct pid *pid, struct task_struct *task);
45909 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45910 struct pid *pid, struct task_struct *task);
45911+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45912+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
45913+#endif
45914 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
45915
45916 extern const struct file_operations proc_maps_operations;
45917diff -urNp linux-3.1.1/fs/proc/Kconfig linux-3.1.1/fs/proc/Kconfig
45918--- linux-3.1.1/fs/proc/Kconfig 2011-11-11 15:19:27.000000000 -0500
45919+++ linux-3.1.1/fs/proc/Kconfig 2011-11-16 18:40:29.000000000 -0500
45920@@ -30,12 +30,12 @@ config PROC_FS
45921
45922 config PROC_KCORE
45923 bool "/proc/kcore support" if !ARM
45924- depends on PROC_FS && MMU
45925+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45926
45927 config PROC_VMCORE
45928 bool "/proc/vmcore support"
45929- depends on PROC_FS && CRASH_DUMP
45930- default y
45931+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45932+ default n
45933 help
45934 Exports the dump image of crashed kernel in ELF format.
45935
45936@@ -59,8 +59,8 @@ config PROC_SYSCTL
45937 limited in memory.
45938
45939 config PROC_PAGE_MONITOR
45940- default y
45941- depends on PROC_FS && MMU
45942+ default n
45943+ depends on PROC_FS && MMU && !GRKERNSEC
45944 bool "Enable /proc page monitoring" if EXPERT
45945 help
45946 Various /proc files exist to monitor process memory utilization:
45947diff -urNp linux-3.1.1/fs/proc/kcore.c linux-3.1.1/fs/proc/kcore.c
45948--- linux-3.1.1/fs/proc/kcore.c 2011-11-11 15:19:27.000000000 -0500
45949+++ linux-3.1.1/fs/proc/kcore.c 2011-11-16 18:40:29.000000000 -0500
45950@@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
45951 off_t offset = 0;
45952 struct kcore_list *m;
45953
45954+ pax_track_stack();
45955+
45956 /* setup ELF header */
45957 elf = (struct elfhdr *) bufp;
45958 bufp += sizeof(struct elfhdr);
45959@@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
45960 * the addresses in the elf_phdr on our list.
45961 */
45962 start = kc_offset_to_vaddr(*fpos - elf_buflen);
45963- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
45964+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
45965+ if (tsz > buflen)
45966 tsz = buflen;
45967-
45968+
45969 while (buflen) {
45970 struct kcore_list *m;
45971
45972@@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
45973 kfree(elf_buf);
45974 } else {
45975 if (kern_addr_valid(start)) {
45976- unsigned long n;
45977+ char *elf_buf;
45978+ mm_segment_t oldfs;
45979
45980- n = copy_to_user(buffer, (char *)start, tsz);
45981- /*
45982- * We cannot distingush between fault on source
45983- * and fault on destination. When this happens
45984- * we clear too and hope it will trigger the
45985- * EFAULT again.
45986- */
45987- if (n) {
45988- if (clear_user(buffer + tsz - n,
45989- n))
45990+ elf_buf = kmalloc(tsz, GFP_KERNEL);
45991+ if (!elf_buf)
45992+ return -ENOMEM;
45993+ oldfs = get_fs();
45994+ set_fs(KERNEL_DS);
45995+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
45996+ set_fs(oldfs);
45997+ if (copy_to_user(buffer, elf_buf, tsz)) {
45998+ kfree(elf_buf);
45999 return -EFAULT;
46000+ }
46001 }
46002+ set_fs(oldfs);
46003+ kfree(elf_buf);
46004 } else {
46005 if (clear_user(buffer, tsz))
46006 return -EFAULT;
46007@@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
46008
46009 static int open_kcore(struct inode *inode, struct file *filp)
46010 {
46011+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
46012+ return -EPERM;
46013+#endif
46014 if (!capable(CAP_SYS_RAWIO))
46015 return -EPERM;
46016 if (kcore_need_update)
46017diff -urNp linux-3.1.1/fs/proc/meminfo.c linux-3.1.1/fs/proc/meminfo.c
46018--- linux-3.1.1/fs/proc/meminfo.c 2011-11-11 15:19:27.000000000 -0500
46019+++ linux-3.1.1/fs/proc/meminfo.c 2011-11-16 18:40:29.000000000 -0500
46020@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
46021 unsigned long pages[NR_LRU_LISTS];
46022 int lru;
46023
46024+ pax_track_stack();
46025+
46026 /*
46027 * display in kilobytes.
46028 */
46029@@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
46030 vmi.used >> 10,
46031 vmi.largest_chunk >> 10
46032 #ifdef CONFIG_MEMORY_FAILURE
46033- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
46034+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
46035 #endif
46036 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
46037 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
46038diff -urNp linux-3.1.1/fs/proc/nommu.c linux-3.1.1/fs/proc/nommu.c
46039--- linux-3.1.1/fs/proc/nommu.c 2011-11-11 15:19:27.000000000 -0500
46040+++ linux-3.1.1/fs/proc/nommu.c 2011-11-16 18:39:08.000000000 -0500
46041@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
46042 if (len < 1)
46043 len = 1;
46044 seq_printf(m, "%*c", len, ' ');
46045- seq_path(m, &file->f_path, "");
46046+ seq_path(m, &file->f_path, "\n\\");
46047 }
46048
46049 seq_putc(m, '\n');
46050diff -urNp linux-3.1.1/fs/proc/proc_net.c linux-3.1.1/fs/proc/proc_net.c
46051--- linux-3.1.1/fs/proc/proc_net.c 2011-11-11 15:19:27.000000000 -0500
46052+++ linux-3.1.1/fs/proc/proc_net.c 2011-11-16 18:40:29.000000000 -0500
46053@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
46054 struct task_struct *task;
46055 struct nsproxy *ns;
46056 struct net *net = NULL;
46057+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46058+ const struct cred *cred = current_cred();
46059+#endif
46060+
46061+#ifdef CONFIG_GRKERNSEC_PROC_USER
46062+ if (cred->fsuid)
46063+ return net;
46064+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46065+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
46066+ return net;
46067+#endif
46068
46069 rcu_read_lock();
46070 task = pid_task(proc_pid(dir), PIDTYPE_PID);
46071diff -urNp linux-3.1.1/fs/proc/proc_sysctl.c linux-3.1.1/fs/proc/proc_sysctl.c
46072--- linux-3.1.1/fs/proc/proc_sysctl.c 2011-11-11 15:19:27.000000000 -0500
46073+++ linux-3.1.1/fs/proc/proc_sysctl.c 2011-11-16 18:40:29.000000000 -0500
46074@@ -8,11 +8,13 @@
46075 #include <linux/namei.h>
46076 #include "internal.h"
46077
46078+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
46079+
46080 static const struct dentry_operations proc_sys_dentry_operations;
46081 static const struct file_operations proc_sys_file_operations;
46082-static const struct inode_operations proc_sys_inode_operations;
46083+const struct inode_operations proc_sys_inode_operations;
46084 static const struct file_operations proc_sys_dir_file_operations;
46085-static const struct inode_operations proc_sys_dir_operations;
46086+const struct inode_operations proc_sys_dir_operations;
46087
46088 static struct inode *proc_sys_make_inode(struct super_block *sb,
46089 struct ctl_table_header *head, struct ctl_table *table)
46090@@ -121,8 +123,14 @@ static struct dentry *proc_sys_lookup(st
46091
46092 err = NULL;
46093 d_set_d_op(dentry, &proc_sys_dentry_operations);
46094+
46095+ gr_handle_proc_create(dentry, inode);
46096+
46097 d_add(dentry, inode);
46098
46099+ if (gr_handle_sysctl(p, MAY_EXEC))
46100+ err = ERR_PTR(-ENOENT);
46101+
46102 out:
46103 sysctl_head_finish(head);
46104 return err;
46105@@ -202,6 +210,9 @@ static int proc_sys_fill_cache(struct fi
46106 return -ENOMEM;
46107 } else {
46108 d_set_d_op(child, &proc_sys_dentry_operations);
46109+
46110+ gr_handle_proc_create(child, inode);
46111+
46112 d_add(child, inode);
46113 }
46114 } else {
46115@@ -230,6 +241,9 @@ static int scan(struct ctl_table_header
46116 if (*pos < file->f_pos)
46117 continue;
46118
46119+ if (gr_handle_sysctl(table, 0))
46120+ continue;
46121+
46122 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
46123 if (res)
46124 return res;
46125@@ -355,6 +369,9 @@ static int proc_sys_getattr(struct vfsmo
46126 if (IS_ERR(head))
46127 return PTR_ERR(head);
46128
46129+ if (table && gr_handle_sysctl(table, MAY_EXEC))
46130+ return -ENOENT;
46131+
46132 generic_fillattr(inode, stat);
46133 if (table)
46134 stat->mode = (stat->mode & S_IFMT) | table->mode;
46135@@ -374,13 +391,13 @@ static const struct file_operations proc
46136 .llseek = generic_file_llseek,
46137 };
46138
46139-static const struct inode_operations proc_sys_inode_operations = {
46140+const struct inode_operations proc_sys_inode_operations = {
46141 .permission = proc_sys_permission,
46142 .setattr = proc_sys_setattr,
46143 .getattr = proc_sys_getattr,
46144 };
46145
46146-static const struct inode_operations proc_sys_dir_operations = {
46147+const struct inode_operations proc_sys_dir_operations = {
46148 .lookup = proc_sys_lookup,
46149 .permission = proc_sys_permission,
46150 .setattr = proc_sys_setattr,
46151diff -urNp linux-3.1.1/fs/proc/root.c linux-3.1.1/fs/proc/root.c
46152--- linux-3.1.1/fs/proc/root.c 2011-11-11 15:19:27.000000000 -0500
46153+++ linux-3.1.1/fs/proc/root.c 2011-11-16 18:40:29.000000000 -0500
46154@@ -123,7 +123,15 @@ void __init proc_root_init(void)
46155 #ifdef CONFIG_PROC_DEVICETREE
46156 proc_device_tree_init();
46157 #endif
46158+#ifdef CONFIG_GRKERNSEC_PROC_ADD
46159+#ifdef CONFIG_GRKERNSEC_PROC_USER
46160+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
46161+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46162+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
46163+#endif
46164+#else
46165 proc_mkdir("bus", NULL);
46166+#endif
46167 proc_sys_init();
46168 }
46169
46170diff -urNp linux-3.1.1/fs/proc/task_mmu.c linux-3.1.1/fs/proc/task_mmu.c
46171--- linux-3.1.1/fs/proc/task_mmu.c 2011-11-11 15:19:27.000000000 -0500
46172+++ linux-3.1.1/fs/proc/task_mmu.c 2011-11-16 18:40:29.000000000 -0500
46173@@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
46174 "VmExe:\t%8lu kB\n"
46175 "VmLib:\t%8lu kB\n"
46176 "VmPTE:\t%8lu kB\n"
46177- "VmSwap:\t%8lu kB\n",
46178- hiwater_vm << (PAGE_SHIFT-10),
46179+ "VmSwap:\t%8lu kB\n"
46180+
46181+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46182+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
46183+#endif
46184+
46185+ ,hiwater_vm << (PAGE_SHIFT-10),
46186 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
46187 mm->locked_vm << (PAGE_SHIFT-10),
46188 hiwater_rss << (PAGE_SHIFT-10),
46189@@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
46190 data << (PAGE_SHIFT-10),
46191 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
46192 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
46193- swap << (PAGE_SHIFT-10));
46194+ swap << (PAGE_SHIFT-10)
46195+
46196+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46197+ , mm->context.user_cs_base, mm->context.user_cs_limit
46198+#endif
46199+
46200+ );
46201 }
46202
46203 unsigned long task_vsize(struct mm_struct *mm)
46204@@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
46205 return ret;
46206 }
46207
46208+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46209+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46210+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
46211+ _mm->pax_flags & MF_PAX_SEGMEXEC))
46212+#endif
46213+
46214 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46215 {
46216 struct mm_struct *mm = vma->vm_mm;
46217@@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
46218 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
46219 }
46220
46221- /* We don't show the stack guard page in /proc/maps */
46222+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46223+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
46224+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
46225+#else
46226 start = vma->vm_start;
46227- if (stack_guard_page_start(vma, start))
46228- start += PAGE_SIZE;
46229 end = vma->vm_end;
46230- if (stack_guard_page_end(vma, end))
46231- end -= PAGE_SIZE;
46232+#endif
46233
46234 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
46235 start,
46236@@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
46237 flags & VM_WRITE ? 'w' : '-',
46238 flags & VM_EXEC ? 'x' : '-',
46239 flags & VM_MAYSHARE ? 's' : 'p',
46240+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46241+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
46242+#else
46243 pgoff,
46244+#endif
46245 MAJOR(dev), MINOR(dev), ino, &len);
46246
46247 /*
46248@@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
46249 */
46250 if (file) {
46251 pad_len_spaces(m, len);
46252- seq_path(m, &file->f_path, "\n");
46253+ seq_path(m, &file->f_path, "\n\\");
46254 } else {
46255 const char *name = arch_vma_name(vma);
46256 if (!name) {
46257@@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
46258 if (vma->vm_start <= mm->brk &&
46259 vma->vm_end >= mm->start_brk) {
46260 name = "[heap]";
46261- } else if (vma->vm_start <= mm->start_stack &&
46262- vma->vm_end >= mm->start_stack) {
46263+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
46264+ (vma->vm_start <= mm->start_stack &&
46265+ vma->vm_end >= mm->start_stack)) {
46266 name = "[stack]";
46267 }
46268 } else {
46269@@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
46270 };
46271
46272 memset(&mss, 0, sizeof mss);
46273- mss.vma = vma;
46274- /* mmap_sem is held in m_start */
46275- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46276- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46277-
46278+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46279+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
46280+#endif
46281+ mss.vma = vma;
46282+ /* mmap_sem is held in m_start */
46283+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46284+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46285+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46286+ }
46287+#endif
46288 show_map_vma(m, vma);
46289
46290 seq_printf(m,
46291@@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
46292 "KernelPageSize: %8lu kB\n"
46293 "MMUPageSize: %8lu kB\n"
46294 "Locked: %8lu kB\n",
46295+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46296+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
46297+#else
46298 (vma->vm_end - vma->vm_start) >> 10,
46299+#endif
46300 mss.resident >> 10,
46301 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
46302 mss.shared_clean >> 10,
46303@@ -1031,7 +1062,7 @@ static int show_numa_map(struct seq_file
46304
46305 if (file) {
46306 seq_printf(m, " file=");
46307- seq_path(m, &file->f_path, "\n\t= ");
46308+ seq_path(m, &file->f_path, "\n\t\\= ");
46309 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
46310 seq_printf(m, " heap");
46311 } else if (vma->vm_start <= mm->start_stack &&
46312diff -urNp linux-3.1.1/fs/proc/task_nommu.c linux-3.1.1/fs/proc/task_nommu.c
46313--- linux-3.1.1/fs/proc/task_nommu.c 2011-11-11 15:19:27.000000000 -0500
46314+++ linux-3.1.1/fs/proc/task_nommu.c 2011-11-16 18:39:08.000000000 -0500
46315@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
46316 else
46317 bytes += kobjsize(mm);
46318
46319- if (current->fs && current->fs->users > 1)
46320+ if (current->fs && atomic_read(&current->fs->users) > 1)
46321 sbytes += kobjsize(current->fs);
46322 else
46323 bytes += kobjsize(current->fs);
46324@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
46325
46326 if (file) {
46327 pad_len_spaces(m, len);
46328- seq_path(m, &file->f_path, "");
46329+ seq_path(m, &file->f_path, "\n\\");
46330 } else if (mm) {
46331 if (vma->vm_start <= mm->start_stack &&
46332 vma->vm_end >= mm->start_stack) {
46333diff -urNp linux-3.1.1/fs/quota/netlink.c linux-3.1.1/fs/quota/netlink.c
46334--- linux-3.1.1/fs/quota/netlink.c 2011-11-11 15:19:27.000000000 -0500
46335+++ linux-3.1.1/fs/quota/netlink.c 2011-11-16 18:39:08.000000000 -0500
46336@@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
46337 void quota_send_warning(short type, unsigned int id, dev_t dev,
46338 const char warntype)
46339 {
46340- static atomic_t seq;
46341+ static atomic_unchecked_t seq;
46342 struct sk_buff *skb;
46343 void *msg_head;
46344 int ret;
46345@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
46346 "VFS: Not enough memory to send quota warning.\n");
46347 return;
46348 }
46349- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
46350+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
46351 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
46352 if (!msg_head) {
46353 printk(KERN_ERR
46354diff -urNp linux-3.1.1/fs/readdir.c linux-3.1.1/fs/readdir.c
46355--- linux-3.1.1/fs/readdir.c 2011-11-11 15:19:27.000000000 -0500
46356+++ linux-3.1.1/fs/readdir.c 2011-11-16 18:40:29.000000000 -0500
46357@@ -17,6 +17,7 @@
46358 #include <linux/security.h>
46359 #include <linux/syscalls.h>
46360 #include <linux/unistd.h>
46361+#include <linux/namei.h>
46362
46363 #include <asm/uaccess.h>
46364
46365@@ -67,6 +68,7 @@ struct old_linux_dirent {
46366
46367 struct readdir_callback {
46368 struct old_linux_dirent __user * dirent;
46369+ struct file * file;
46370 int result;
46371 };
46372
46373@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
46374 buf->result = -EOVERFLOW;
46375 return -EOVERFLOW;
46376 }
46377+
46378+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46379+ return 0;
46380+
46381 buf->result++;
46382 dirent = buf->dirent;
46383 if (!access_ok(VERIFY_WRITE, dirent,
46384@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
46385
46386 buf.result = 0;
46387 buf.dirent = dirent;
46388+ buf.file = file;
46389
46390 error = vfs_readdir(file, fillonedir, &buf);
46391 if (buf.result)
46392@@ -142,6 +149,7 @@ struct linux_dirent {
46393 struct getdents_callback {
46394 struct linux_dirent __user * current_dir;
46395 struct linux_dirent __user * previous;
46396+ struct file * file;
46397 int count;
46398 int error;
46399 };
46400@@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
46401 buf->error = -EOVERFLOW;
46402 return -EOVERFLOW;
46403 }
46404+
46405+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46406+ return 0;
46407+
46408 dirent = buf->previous;
46409 if (dirent) {
46410 if (__put_user(offset, &dirent->d_off))
46411@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
46412 buf.previous = NULL;
46413 buf.count = count;
46414 buf.error = 0;
46415+ buf.file = file;
46416
46417 error = vfs_readdir(file, filldir, &buf);
46418 if (error >= 0)
46419@@ -229,6 +242,7 @@ out:
46420 struct getdents_callback64 {
46421 struct linux_dirent64 __user * current_dir;
46422 struct linux_dirent64 __user * previous;
46423+ struct file *file;
46424 int count;
46425 int error;
46426 };
46427@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
46428 buf->error = -EINVAL; /* only used if we fail.. */
46429 if (reclen > buf->count)
46430 return -EINVAL;
46431+
46432+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46433+ return 0;
46434+
46435 dirent = buf->previous;
46436 if (dirent) {
46437 if (__put_user(offset, &dirent->d_off))
46438@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
46439
46440 buf.current_dir = dirent;
46441 buf.previous = NULL;
46442+ buf.file = file;
46443 buf.count = count;
46444 buf.error = 0;
46445
46446@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
46447 error = buf.error;
46448 lastdirent = buf.previous;
46449 if (lastdirent) {
46450- typeof(lastdirent->d_off) d_off = file->f_pos;
46451+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
46452 if (__put_user(d_off, &lastdirent->d_off))
46453 error = -EFAULT;
46454 else
46455diff -urNp linux-3.1.1/fs/reiserfs/dir.c linux-3.1.1/fs/reiserfs/dir.c
46456--- linux-3.1.1/fs/reiserfs/dir.c 2011-11-11 15:19:27.000000000 -0500
46457+++ linux-3.1.1/fs/reiserfs/dir.c 2011-11-16 18:40:29.000000000 -0500
46458@@ -75,6 +75,8 @@ int reiserfs_readdir_dentry(struct dentr
46459 struct reiserfs_dir_entry de;
46460 int ret = 0;
46461
46462+ pax_track_stack();
46463+
46464 reiserfs_write_lock(inode->i_sb);
46465
46466 reiserfs_check_lock_depth(inode->i_sb, "readdir");
46467diff -urNp linux-3.1.1/fs/reiserfs/do_balan.c linux-3.1.1/fs/reiserfs/do_balan.c
46468--- linux-3.1.1/fs/reiserfs/do_balan.c 2011-11-11 15:19:27.000000000 -0500
46469+++ linux-3.1.1/fs/reiserfs/do_balan.c 2011-11-16 18:39:08.000000000 -0500
46470@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
46471 return;
46472 }
46473
46474- atomic_inc(&(fs_generation(tb->tb_sb)));
46475+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46476 do_balance_starts(tb);
46477
46478 /* balance leaf returns 0 except if combining L R and S into
46479diff -urNp linux-3.1.1/fs/reiserfs/journal.c linux-3.1.1/fs/reiserfs/journal.c
46480--- linux-3.1.1/fs/reiserfs/journal.c 2011-11-11 15:19:27.000000000 -0500
46481+++ linux-3.1.1/fs/reiserfs/journal.c 2011-11-16 18:40:29.000000000 -0500
46482@@ -2289,6 +2289,8 @@ static struct buffer_head *reiserfs_brea
46483 struct buffer_head *bh;
46484 int i, j;
46485
46486+ pax_track_stack();
46487+
46488 bh = __getblk(dev, block, bufsize);
46489 if (buffer_uptodate(bh))
46490 return (bh);
46491diff -urNp linux-3.1.1/fs/reiserfs/namei.c linux-3.1.1/fs/reiserfs/namei.c
46492--- linux-3.1.1/fs/reiserfs/namei.c 2011-11-11 15:19:27.000000000 -0500
46493+++ linux-3.1.1/fs/reiserfs/namei.c 2011-11-16 18:40:29.000000000 -0500
46494@@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
46495 unsigned long savelink = 1;
46496 struct timespec ctime;
46497
46498+ pax_track_stack();
46499+
46500 /* three balancings: (1) old name removal, (2) new name insertion
46501 and (3) maybe "save" link insertion
46502 stat data updates: (1) old directory,
46503diff -urNp linux-3.1.1/fs/reiserfs/procfs.c linux-3.1.1/fs/reiserfs/procfs.c
46504--- linux-3.1.1/fs/reiserfs/procfs.c 2011-11-11 15:19:27.000000000 -0500
46505+++ linux-3.1.1/fs/reiserfs/procfs.c 2011-11-16 18:40:29.000000000 -0500
46506@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
46507 "SMALL_TAILS " : "NO_TAILS ",
46508 replay_only(sb) ? "REPLAY_ONLY " : "",
46509 convert_reiserfs(sb) ? "CONV " : "",
46510- atomic_read(&r->s_generation_counter),
46511+ atomic_read_unchecked(&r->s_generation_counter),
46512 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46513 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46514 SF(s_good_search_by_key_reada), SF(s_bmaps),
46515@@ -299,6 +299,8 @@ static int show_journal(struct seq_file
46516 struct journal_params *jp = &rs->s_v1.s_journal;
46517 char b[BDEVNAME_SIZE];
46518
46519+ pax_track_stack();
46520+
46521 seq_printf(m, /* on-disk fields */
46522 "jp_journal_1st_block: \t%i\n"
46523 "jp_journal_dev: \t%s[%x]\n"
46524diff -urNp linux-3.1.1/fs/reiserfs/stree.c linux-3.1.1/fs/reiserfs/stree.c
46525--- linux-3.1.1/fs/reiserfs/stree.c 2011-11-11 15:19:27.000000000 -0500
46526+++ linux-3.1.1/fs/reiserfs/stree.c 2011-11-16 18:40:29.000000000 -0500
46527@@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
46528 int iter = 0;
46529 #endif
46530
46531+ pax_track_stack();
46532+
46533 BUG_ON(!th->t_trans_id);
46534
46535 init_tb_struct(th, &s_del_balance, sb, path,
46536@@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
46537 int retval;
46538 int quota_cut_bytes = 0;
46539
46540+ pax_track_stack();
46541+
46542 BUG_ON(!th->t_trans_id);
46543
46544 le_key2cpu_key(&cpu_key, key);
46545@@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
46546 int quota_cut_bytes;
46547 loff_t tail_pos = 0;
46548
46549+ pax_track_stack();
46550+
46551 BUG_ON(!th->t_trans_id);
46552
46553 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
46554@@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
46555 int retval;
46556 int fs_gen;
46557
46558+ pax_track_stack();
46559+
46560 BUG_ON(!th->t_trans_id);
46561
46562 fs_gen = get_generation(inode->i_sb);
46563@@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
46564 int fs_gen = 0;
46565 int quota_bytes = 0;
46566
46567+ pax_track_stack();
46568+
46569 BUG_ON(!th->t_trans_id);
46570
46571 if (inode) { /* Do we count quotas for item? */
46572diff -urNp linux-3.1.1/fs/reiserfs/super.c linux-3.1.1/fs/reiserfs/super.c
46573--- linux-3.1.1/fs/reiserfs/super.c 2011-11-11 15:19:27.000000000 -0500
46574+++ linux-3.1.1/fs/reiserfs/super.c 2011-11-16 18:40:29.000000000 -0500
46575@@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
46576 {.option_name = NULL}
46577 };
46578
46579+ pax_track_stack();
46580+
46581 *blocks = 0;
46582 if (!options || !*options)
46583 /* use default configuration: create tails, journaling on, no
46584diff -urNp linux-3.1.1/fs/select.c linux-3.1.1/fs/select.c
46585--- linux-3.1.1/fs/select.c 2011-11-11 15:19:27.000000000 -0500
46586+++ linux-3.1.1/fs/select.c 2011-11-16 18:40:29.000000000 -0500
46587@@ -20,6 +20,7 @@
46588 #include <linux/module.h>
46589 #include <linux/slab.h>
46590 #include <linux/poll.h>
46591+#include <linux/security.h>
46592 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46593 #include <linux/file.h>
46594 #include <linux/fdtable.h>
46595@@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
46596 int retval, i, timed_out = 0;
46597 unsigned long slack = 0;
46598
46599+ pax_track_stack();
46600+
46601 rcu_read_lock();
46602 retval = max_select_fd(n, fds);
46603 rcu_read_unlock();
46604@@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
46605 /* Allocate small arguments on the stack to save memory and be faster */
46606 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
46607
46608+ pax_track_stack();
46609+
46610 ret = -EINVAL;
46611 if (n < 0)
46612 goto out_nofds;
46613@@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
46614 struct poll_list *walk = head;
46615 unsigned long todo = nfds;
46616
46617+ pax_track_stack();
46618+
46619+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46620 if (nfds > rlimit(RLIMIT_NOFILE))
46621 return -EINVAL;
46622
46623diff -urNp linux-3.1.1/fs/seq_file.c linux-3.1.1/fs/seq_file.c
46624--- linux-3.1.1/fs/seq_file.c 2011-11-11 15:19:27.000000000 -0500
46625+++ linux-3.1.1/fs/seq_file.c 2011-11-16 18:39:08.000000000 -0500
46626@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
46627 return 0;
46628 }
46629 if (!m->buf) {
46630- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46631+ m->size = PAGE_SIZE;
46632+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46633 if (!m->buf)
46634 return -ENOMEM;
46635 }
46636@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
46637 Eoverflow:
46638 m->op->stop(m, p);
46639 kfree(m->buf);
46640- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46641+ m->size <<= 1;
46642+ m->buf = kmalloc(m->size, GFP_KERNEL);
46643 return !m->buf ? -ENOMEM : -EAGAIN;
46644 }
46645
46646@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
46647 m->version = file->f_version;
46648 /* grab buffer if we didn't have one */
46649 if (!m->buf) {
46650- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46651+ m->size = PAGE_SIZE;
46652+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46653 if (!m->buf)
46654 goto Enomem;
46655 }
46656@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
46657 goto Fill;
46658 m->op->stop(m, p);
46659 kfree(m->buf);
46660- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46661+ m->size <<= 1;
46662+ m->buf = kmalloc(m->size, GFP_KERNEL);
46663 if (!m->buf)
46664 goto Enomem;
46665 m->count = 0;
46666@@ -549,7 +553,7 @@ static void single_stop(struct seq_file
46667 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
46668 void *data)
46669 {
46670- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
46671+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
46672 int res = -ENOMEM;
46673
46674 if (op) {
46675diff -urNp linux-3.1.1/fs/splice.c linux-3.1.1/fs/splice.c
46676--- linux-3.1.1/fs/splice.c 2011-11-11 15:19:27.000000000 -0500
46677+++ linux-3.1.1/fs/splice.c 2011-11-16 18:40:29.000000000 -0500
46678@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode
46679 pipe_lock(pipe);
46680
46681 for (;;) {
46682- if (!pipe->readers) {
46683+ if (!atomic_read(&pipe->readers)) {
46684 send_sig(SIGPIPE, current, 0);
46685 if (!ret)
46686 ret = -EPIPE;
46687@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode
46688 do_wakeup = 0;
46689 }
46690
46691- pipe->waiting_writers++;
46692+ atomic_inc(&pipe->waiting_writers);
46693 pipe_wait(pipe);
46694- pipe->waiting_writers--;
46695+ atomic_dec(&pipe->waiting_writers);
46696 }
46697
46698 pipe_unlock(pipe);
46699@@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *
46700 .spd_release = spd_release_page,
46701 };
46702
46703+ pax_track_stack();
46704+
46705 if (splice_grow_spd(pipe, &spd))
46706 return -ENOMEM;
46707
46708@@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file
46709 old_fs = get_fs();
46710 set_fs(get_ds());
46711 /* The cast to a user pointer is valid due to the set_fs() */
46712- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46713+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
46714 set_fs(old_fs);
46715
46716 return res;
46717@@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file
46718 old_fs = get_fs();
46719 set_fs(get_ds());
46720 /* The cast to a user pointer is valid due to the set_fs() */
46721- res = vfs_write(file, (const char __user *)buf, count, &pos);
46722+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
46723 set_fs(old_fs);
46724
46725 return res;
46726@@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct
46727 .spd_release = spd_release_page,
46728 };
46729
46730+ pax_track_stack();
46731+
46732 if (splice_grow_spd(pipe, &spd))
46733 return -ENOMEM;
46734
46735@@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct
46736 goto err;
46737
46738 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46739- vec[i].iov_base = (void __user *) page_address(page);
46740+ vec[i].iov_base = (void __force_user *) page_address(page);
46741 vec[i].iov_len = this_len;
46742 spd.pages[i] = page;
46743 spd.nr_pages++;
46744@@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46745 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46746 {
46747 while (!pipe->nrbufs) {
46748- if (!pipe->writers)
46749+ if (!atomic_read(&pipe->writers))
46750 return 0;
46751
46752- if (!pipe->waiting_writers && sd->num_spliced)
46753+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46754 return 0;
46755
46756 if (sd->flags & SPLICE_F_NONBLOCK)
46757@@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct fi
46758 * out of the pipe right after the splice_to_pipe(). So set
46759 * PIPE_READERS appropriately.
46760 */
46761- pipe->readers = 1;
46762+ atomic_set(&pipe->readers, 1);
46763
46764 current->splice_pipe = pipe;
46765 }
46766@@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file
46767 };
46768 long ret;
46769
46770+ pax_track_stack();
46771+
46772 pipe = get_pipe_info(file);
46773 if (!pipe)
46774 return -EBADF;
46775@@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_
46776 ret = -ERESTARTSYS;
46777 break;
46778 }
46779- if (!pipe->writers)
46780+ if (!atomic_read(&pipe->writers))
46781 break;
46782- if (!pipe->waiting_writers) {
46783+ if (!atomic_read(&pipe->waiting_writers)) {
46784 if (flags & SPLICE_F_NONBLOCK) {
46785 ret = -EAGAIN;
46786 break;
46787@@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_
46788 pipe_lock(pipe);
46789
46790 while (pipe->nrbufs >= pipe->buffers) {
46791- if (!pipe->readers) {
46792+ if (!atomic_read(&pipe->readers)) {
46793 send_sig(SIGPIPE, current, 0);
46794 ret = -EPIPE;
46795 break;
46796@@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_
46797 ret = -ERESTARTSYS;
46798 break;
46799 }
46800- pipe->waiting_writers++;
46801+ atomic_inc(&pipe->waiting_writers);
46802 pipe_wait(pipe);
46803- pipe->waiting_writers--;
46804+ atomic_dec(&pipe->waiting_writers);
46805 }
46806
46807 pipe_unlock(pipe);
46808@@ -1819,14 +1825,14 @@ retry:
46809 pipe_double_lock(ipipe, opipe);
46810
46811 do {
46812- if (!opipe->readers) {
46813+ if (!atomic_read(&opipe->readers)) {
46814 send_sig(SIGPIPE, current, 0);
46815 if (!ret)
46816 ret = -EPIPE;
46817 break;
46818 }
46819
46820- if (!ipipe->nrbufs && !ipipe->writers)
46821+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46822 break;
46823
46824 /*
46825@@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_i
46826 pipe_double_lock(ipipe, opipe);
46827
46828 do {
46829- if (!opipe->readers) {
46830+ if (!atomic_read(&opipe->readers)) {
46831 send_sig(SIGPIPE, current, 0);
46832 if (!ret)
46833 ret = -EPIPE;
46834@@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_i
46835 * return EAGAIN if we have the potential of some data in the
46836 * future, otherwise just return 0
46837 */
46838- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46839+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46840 ret = -EAGAIN;
46841
46842 pipe_unlock(ipipe);
46843diff -urNp linux-3.1.1/fs/sysfs/file.c linux-3.1.1/fs/sysfs/file.c
46844--- linux-3.1.1/fs/sysfs/file.c 2011-11-11 15:19:27.000000000 -0500
46845+++ linux-3.1.1/fs/sysfs/file.c 2011-11-16 18:39:08.000000000 -0500
46846@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
46847
46848 struct sysfs_open_dirent {
46849 atomic_t refcnt;
46850- atomic_t event;
46851+ atomic_unchecked_t event;
46852 wait_queue_head_t poll;
46853 struct list_head buffers; /* goes through sysfs_buffer.list */
46854 };
46855@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentr
46856 if (!sysfs_get_active(attr_sd))
46857 return -ENODEV;
46858
46859- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46860+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46861 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46862
46863 sysfs_put_active(attr_sd);
46864@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct
46865 return -ENOMEM;
46866
46867 atomic_set(&new_od->refcnt, 0);
46868- atomic_set(&new_od->event, 1);
46869+ atomic_set_unchecked(&new_od->event, 1);
46870 init_waitqueue_head(&new_od->poll);
46871 INIT_LIST_HEAD(&new_od->buffers);
46872 goto retry;
46873@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct fi
46874
46875 sysfs_put_active(attr_sd);
46876
46877- if (buffer->event != atomic_read(&od->event))
46878+ if (buffer->event != atomic_read_unchecked(&od->event))
46879 goto trigger;
46880
46881 return DEFAULT_POLLMASK;
46882@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_di
46883
46884 od = sd->s_attr.open;
46885 if (od) {
46886- atomic_inc(&od->event);
46887+ atomic_inc_unchecked(&od->event);
46888 wake_up_interruptible(&od->poll);
46889 }
46890
46891diff -urNp linux-3.1.1/fs/sysfs/mount.c linux-3.1.1/fs/sysfs/mount.c
46892--- linux-3.1.1/fs/sysfs/mount.c 2011-11-11 15:19:27.000000000 -0500
46893+++ linux-3.1.1/fs/sysfs/mount.c 2011-11-16 18:40:29.000000000 -0500
46894@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
46895 .s_name = "",
46896 .s_count = ATOMIC_INIT(1),
46897 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
46898+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46899+ .s_mode = S_IFDIR | S_IRWXU,
46900+#else
46901 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
46902+#endif
46903 .s_ino = 1,
46904 };
46905
46906diff -urNp linux-3.1.1/fs/sysfs/symlink.c linux-3.1.1/fs/sysfs/symlink.c
46907--- linux-3.1.1/fs/sysfs/symlink.c 2011-11-11 15:19:27.000000000 -0500
46908+++ linux-3.1.1/fs/sysfs/symlink.c 2011-11-16 18:39:08.000000000 -0500
46909@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
46910
46911 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46912 {
46913- char *page = nd_get_link(nd);
46914+ const char *page = nd_get_link(nd);
46915 if (!IS_ERR(page))
46916 free_page((unsigned long)page);
46917 }
46918diff -urNp linux-3.1.1/fs/udf/inode.c linux-3.1.1/fs/udf/inode.c
46919--- linux-3.1.1/fs/udf/inode.c 2011-11-11 15:19:27.000000000 -0500
46920+++ linux-3.1.1/fs/udf/inode.c 2011-11-16 18:40:29.000000000 -0500
46921@@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
46922 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
46923 int lastblock = 0;
46924
46925+ pax_track_stack();
46926+
46927 prev_epos.offset = udf_file_entry_alloc_offset(inode);
46928 prev_epos.block = iinfo->i_location;
46929 prev_epos.bh = NULL;
46930diff -urNp linux-3.1.1/fs/udf/misc.c linux-3.1.1/fs/udf/misc.c
46931--- linux-3.1.1/fs/udf/misc.c 2011-11-11 15:19:27.000000000 -0500
46932+++ linux-3.1.1/fs/udf/misc.c 2011-11-16 18:39:08.000000000 -0500
46933@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
46934
46935 u8 udf_tag_checksum(const struct tag *t)
46936 {
46937- u8 *data = (u8 *)t;
46938+ const u8 *data = (const u8 *)t;
46939 u8 checksum = 0;
46940 int i;
46941 for (i = 0; i < sizeof(struct tag); ++i)
46942diff -urNp linux-3.1.1/fs/utimes.c linux-3.1.1/fs/utimes.c
46943--- linux-3.1.1/fs/utimes.c 2011-11-11 15:19:27.000000000 -0500
46944+++ linux-3.1.1/fs/utimes.c 2011-11-16 18:40:29.000000000 -0500
46945@@ -1,6 +1,7 @@
46946 #include <linux/compiler.h>
46947 #include <linux/file.h>
46948 #include <linux/fs.h>
46949+#include <linux/security.h>
46950 #include <linux/linkage.h>
46951 #include <linux/mount.h>
46952 #include <linux/namei.h>
46953@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
46954 goto mnt_drop_write_and_out;
46955 }
46956 }
46957+
46958+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
46959+ error = -EACCES;
46960+ goto mnt_drop_write_and_out;
46961+ }
46962+
46963 mutex_lock(&inode->i_mutex);
46964 error = notify_change(path->dentry, &newattrs);
46965 mutex_unlock(&inode->i_mutex);
46966diff -urNp linux-3.1.1/fs/xattr_acl.c linux-3.1.1/fs/xattr_acl.c
46967--- linux-3.1.1/fs/xattr_acl.c 2011-11-11 15:19:27.000000000 -0500
46968+++ linux-3.1.1/fs/xattr_acl.c 2011-11-16 18:39:08.000000000 -0500
46969@@ -17,8 +17,8 @@
46970 struct posix_acl *
46971 posix_acl_from_xattr(const void *value, size_t size)
46972 {
46973- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
46974- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
46975+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
46976+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
46977 int count;
46978 struct posix_acl *acl;
46979 struct posix_acl_entry *acl_e;
46980diff -urNp linux-3.1.1/fs/xattr.c linux-3.1.1/fs/xattr.c
46981--- linux-3.1.1/fs/xattr.c 2011-11-11 15:19:27.000000000 -0500
46982+++ linux-3.1.1/fs/xattr.c 2011-11-16 18:40:29.000000000 -0500
46983@@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
46984 * Extended attribute SET operations
46985 */
46986 static long
46987-setxattr(struct dentry *d, const char __user *name, const void __user *value,
46988+setxattr(struct path *path, const char __user *name, const void __user *value,
46989 size_t size, int flags)
46990 {
46991 int error;
46992@@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __
46993 return PTR_ERR(kvalue);
46994 }
46995
46996- error = vfs_setxattr(d, kname, kvalue, size, flags);
46997+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
46998+ error = -EACCES;
46999+ goto out;
47000+ }
47001+
47002+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
47003+out:
47004 kfree(kvalue);
47005 return error;
47006 }
47007@@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
47008 return error;
47009 error = mnt_want_write(path.mnt);
47010 if (!error) {
47011- error = setxattr(path.dentry, name, value, size, flags);
47012+ error = setxattr(&path, name, value, size, flags);
47013 mnt_drop_write(path.mnt);
47014 }
47015 path_put(&path);
47016@@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
47017 return error;
47018 error = mnt_want_write(path.mnt);
47019 if (!error) {
47020- error = setxattr(path.dentry, name, value, size, flags);
47021+ error = setxattr(&path, name, value, size, flags);
47022 mnt_drop_write(path.mnt);
47023 }
47024 path_put(&path);
47025@@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
47026 const void __user *,value, size_t, size, int, flags)
47027 {
47028 struct file *f;
47029- struct dentry *dentry;
47030 int error = -EBADF;
47031
47032 f = fget(fd);
47033 if (!f)
47034 return error;
47035- dentry = f->f_path.dentry;
47036- audit_inode(NULL, dentry);
47037+ audit_inode(NULL, f->f_path.dentry);
47038 error = mnt_want_write_file(f);
47039 if (!error) {
47040- error = setxattr(dentry, name, value, size, flags);
47041+ error = setxattr(&f->f_path, name, value, size, flags);
47042 mnt_drop_write(f->f_path.mnt);
47043 }
47044 fput(f);
47045diff -urNp linux-3.1.1/fs/xfs/xfs_bmap.c linux-3.1.1/fs/xfs/xfs_bmap.c
47046--- linux-3.1.1/fs/xfs/xfs_bmap.c 2011-11-11 15:19:27.000000000 -0500
47047+++ linux-3.1.1/fs/xfs/xfs_bmap.c 2011-11-16 18:39:08.000000000 -0500
47048@@ -250,7 +250,7 @@ xfs_bmap_validate_ret(
47049 int nmap,
47050 int ret_nmap);
47051 #else
47052-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
47053+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
47054 #endif /* DEBUG */
47055
47056 STATIC int
47057diff -urNp linux-3.1.1/fs/xfs/xfs_dir2_sf.c linux-3.1.1/fs/xfs/xfs_dir2_sf.c
47058--- linux-3.1.1/fs/xfs/xfs_dir2_sf.c 2011-11-11 15:19:27.000000000 -0500
47059+++ linux-3.1.1/fs/xfs/xfs_dir2_sf.c 2011-11-16 18:39:08.000000000 -0500
47060@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
47061 }
47062
47063 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
47064- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47065+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
47066+ char name[sfep->namelen];
47067+ memcpy(name, sfep->name, sfep->namelen);
47068+ if (filldir(dirent, name, sfep->namelen,
47069+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
47070+ *offset = off & 0x7fffffff;
47071+ return 0;
47072+ }
47073+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47074 off & 0x7fffffff, ino, DT_UNKNOWN)) {
47075 *offset = off & 0x7fffffff;
47076 return 0;
47077diff -urNp linux-3.1.1/fs/xfs/xfs_ioctl.c linux-3.1.1/fs/xfs/xfs_ioctl.c
47078--- linux-3.1.1/fs/xfs/xfs_ioctl.c 2011-11-11 15:19:27.000000000 -0500
47079+++ linux-3.1.1/fs/xfs/xfs_ioctl.c 2011-11-16 18:39:08.000000000 -0500
47080@@ -128,7 +128,7 @@ xfs_find_handle(
47081 }
47082
47083 error = -EFAULT;
47084- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
47085+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
47086 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
47087 goto out_put;
47088
47089diff -urNp linux-3.1.1/fs/xfs/xfs_iops.c linux-3.1.1/fs/xfs/xfs_iops.c
47090--- linux-3.1.1/fs/xfs/xfs_iops.c 2011-11-11 15:19:27.000000000 -0500
47091+++ linux-3.1.1/fs/xfs/xfs_iops.c 2011-11-16 18:39:08.000000000 -0500
47092@@ -446,7 +446,7 @@ xfs_vn_put_link(
47093 struct nameidata *nd,
47094 void *p)
47095 {
47096- char *s = nd_get_link(nd);
47097+ const char *s = nd_get_link(nd);
47098
47099 if (!IS_ERR(s))
47100 kfree(s);
47101diff -urNp linux-3.1.1/grsecurity/gracl_alloc.c linux-3.1.1/grsecurity/gracl_alloc.c
47102--- linux-3.1.1/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
47103+++ linux-3.1.1/grsecurity/gracl_alloc.c 2011-11-16 18:40:31.000000000 -0500
47104@@ -0,0 +1,105 @@
47105+#include <linux/kernel.h>
47106+#include <linux/mm.h>
47107+#include <linux/slab.h>
47108+#include <linux/vmalloc.h>
47109+#include <linux/gracl.h>
47110+#include <linux/grsecurity.h>
47111+
47112+static unsigned long alloc_stack_next = 1;
47113+static unsigned long alloc_stack_size = 1;
47114+static void **alloc_stack;
47115+
47116+static __inline__ int
47117+alloc_pop(void)
47118+{
47119+ if (alloc_stack_next == 1)
47120+ return 0;
47121+
47122+ kfree(alloc_stack[alloc_stack_next - 2]);
47123+
47124+ alloc_stack_next--;
47125+
47126+ return 1;
47127+}
47128+
47129+static __inline__ int
47130+alloc_push(void *buf)
47131+{
47132+ if (alloc_stack_next >= alloc_stack_size)
47133+ return 1;
47134+
47135+ alloc_stack[alloc_stack_next - 1] = buf;
47136+
47137+ alloc_stack_next++;
47138+
47139+ return 0;
47140+}
47141+
47142+void *
47143+acl_alloc(unsigned long len)
47144+{
47145+ void *ret = NULL;
47146+
47147+ if (!len || len > PAGE_SIZE)
47148+ goto out;
47149+
47150+ ret = kmalloc(len, GFP_KERNEL);
47151+
47152+ if (ret) {
47153+ if (alloc_push(ret)) {
47154+ kfree(ret);
47155+ ret = NULL;
47156+ }
47157+ }
47158+
47159+out:
47160+ return ret;
47161+}
47162+
47163+void *
47164+acl_alloc_num(unsigned long num, unsigned long len)
47165+{
47166+ if (!len || (num > (PAGE_SIZE / len)))
47167+ return NULL;
47168+
47169+ return acl_alloc(num * len);
47170+}
47171+
47172+void
47173+acl_free_all(void)
47174+{
47175+ if (gr_acl_is_enabled() || !alloc_stack)
47176+ return;
47177+
47178+ while (alloc_pop()) ;
47179+
47180+ if (alloc_stack) {
47181+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
47182+ kfree(alloc_stack);
47183+ else
47184+ vfree(alloc_stack);
47185+ }
47186+
47187+ alloc_stack = NULL;
47188+ alloc_stack_size = 1;
47189+ alloc_stack_next = 1;
47190+
47191+ return;
47192+}
47193+
47194+int
47195+acl_alloc_stack_init(unsigned long size)
47196+{
47197+ if ((size * sizeof (void *)) <= PAGE_SIZE)
47198+ alloc_stack =
47199+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
47200+ else
47201+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
47202+
47203+ alloc_stack_size = size;
47204+
47205+ if (!alloc_stack)
47206+ return 0;
47207+ else
47208+ return 1;
47209+}
47210diff -urNp linux-3.1.1/grsecurity/gracl.c linux-3.1.1/grsecurity/gracl.c
47211--- linux-3.1.1/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
47212+++ linux-3.1.1/grsecurity/gracl.c 2011-11-16 19:31:00.000000000 -0500
47213@@ -0,0 +1,4156 @@
47214+#include <linux/kernel.h>
47215+#include <linux/module.h>
47216+#include <linux/sched.h>
47217+#include <linux/mm.h>
47218+#include <linux/file.h>
47219+#include <linux/fs.h>
47220+#include <linux/namei.h>
47221+#include <linux/mount.h>
47222+#include <linux/tty.h>
47223+#include <linux/proc_fs.h>
47224+#include <linux/lglock.h>
47225+#include <linux/slab.h>
47226+#include <linux/vmalloc.h>
47227+#include <linux/types.h>
47228+#include <linux/sysctl.h>
47229+#include <linux/netdevice.h>
47230+#include <linux/ptrace.h>
47231+#include <linux/gracl.h>
47232+#include <linux/gralloc.h>
47233+#include <linux/grsecurity.h>
47234+#include <linux/grinternal.h>
47235+#include <linux/pid_namespace.h>
47236+#include <linux/fdtable.h>
47237+#include <linux/percpu.h>
47238+
47239+#include <asm/uaccess.h>
47240+#include <asm/errno.h>
47241+#include <asm/mman.h>
47242+
47243+static struct acl_role_db acl_role_set;
47244+static struct name_db name_set;
47245+static struct inodev_db inodev_set;
47246+
47247+/* for keeping track of userspace pointers used for subjects, so we
47248+ can share references in the kernel as well
47249+*/
47250+
47251+static struct path real_root;
47252+
47253+static struct acl_subj_map_db subj_map_set;
47254+
47255+static struct acl_role_label *default_role;
47256+
47257+static struct acl_role_label *role_list;
47258+
47259+static u16 acl_sp_role_value;
47260+
47261+extern char *gr_shared_page[4];
47262+static DEFINE_MUTEX(gr_dev_mutex);
47263+DEFINE_RWLOCK(gr_inode_lock);
47264+
47265+struct gr_arg *gr_usermode;
47266+
47267+static unsigned int gr_status __read_only = GR_STATUS_INIT;
47268+
47269+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
47270+extern void gr_clear_learn_entries(void);
47271+
47272+#ifdef CONFIG_GRKERNSEC_RESLOG
47273+extern void gr_log_resource(const struct task_struct *task,
47274+ const int res, const unsigned long wanted, const int gt);
47275+#endif
47276+
47277+unsigned char *gr_system_salt;
47278+unsigned char *gr_system_sum;
47279+
47280+static struct sprole_pw **acl_special_roles = NULL;
47281+static __u16 num_sprole_pws = 0;
47282+
47283+static struct acl_role_label *kernel_role = NULL;
47284+
47285+static unsigned int gr_auth_attempts = 0;
47286+static unsigned long gr_auth_expires = 0UL;
47287+
47288+#ifdef CONFIG_NET
47289+extern struct vfsmount *sock_mnt;
47290+#endif
47291+
47292+extern struct vfsmount *pipe_mnt;
47293+extern struct vfsmount *shm_mnt;
47294+#ifdef CONFIG_HUGETLBFS
47295+extern struct vfsmount *hugetlbfs_vfsmount;
47296+#endif
47297+
47298+static struct acl_object_label *fakefs_obj_rw;
47299+static struct acl_object_label *fakefs_obj_rwx;
47300+
47301+extern int gr_init_uidset(void);
47302+extern void gr_free_uidset(void);
47303+extern void gr_remove_uid(uid_t uid);
47304+extern int gr_find_uid(uid_t uid);
47305+
47306+DECLARE_BRLOCK(vfsmount_lock);
47307+
47308+__inline__ int
47309+gr_acl_is_enabled(void)
47310+{
47311+ return (gr_status & GR_READY);
47312+}
47313+
47314+#ifdef CONFIG_BTRFS_FS
47315+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
47316+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
47317+#endif
47318+
47319+static inline dev_t __get_dev(const struct dentry *dentry)
47320+{
47321+#ifdef CONFIG_BTRFS_FS
47322+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
47323+ return get_btrfs_dev_from_inode(dentry->d_inode);
47324+ else
47325+#endif
47326+ return dentry->d_inode->i_sb->s_dev;
47327+}
47328+
47329+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
47330+{
47331+ return __get_dev(dentry);
47332+}
47333+
47334+static char gr_task_roletype_to_char(struct task_struct *task)
47335+{
47336+ switch (task->role->roletype &
47337+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
47338+ GR_ROLE_SPECIAL)) {
47339+ case GR_ROLE_DEFAULT:
47340+ return 'D';
47341+ case GR_ROLE_USER:
47342+ return 'U';
47343+ case GR_ROLE_GROUP:
47344+ return 'G';
47345+ case GR_ROLE_SPECIAL:
47346+ return 'S';
47347+ }
47348+
47349+ return 'X';
47350+}
47351+
47352+char gr_roletype_to_char(void)
47353+{
47354+ return gr_task_roletype_to_char(current);
47355+}
47356+
47357+__inline__ int
47358+gr_acl_tpe_check(void)
47359+{
47360+ if (unlikely(!(gr_status & GR_READY)))
47361+ return 0;
47362+ if (current->role->roletype & GR_ROLE_TPE)
47363+ return 1;
47364+ else
47365+ return 0;
47366+}
47367+
47368+int
47369+gr_handle_rawio(const struct inode *inode)
47370+{
47371+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47372+ if (inode && S_ISBLK(inode->i_mode) &&
47373+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
47374+ !capable(CAP_SYS_RAWIO))
47375+ return 1;
47376+#endif
47377+ return 0;
47378+}
47379+
47380+static int
47381+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
47382+{
47383+ if (likely(lena != lenb))
47384+ return 0;
47385+
47386+ return !memcmp(a, b, lena);
47387+}
47388+
47389+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
47390+{
47391+ *buflen -= namelen;
47392+ if (*buflen < 0)
47393+ return -ENAMETOOLONG;
47394+ *buffer -= namelen;
47395+ memcpy(*buffer, str, namelen);
47396+ return 0;
47397+}
47398+
47399+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
47400+{
47401+ return prepend(buffer, buflen, name->name, name->len);
47402+}
47403+
47404+static int prepend_path(const struct path *path, struct path *root,
47405+ char **buffer, int *buflen)
47406+{
47407+ struct dentry *dentry = path->dentry;
47408+ struct vfsmount *vfsmnt = path->mnt;
47409+ bool slash = false;
47410+ int error = 0;
47411+
47412+ while (dentry != root->dentry || vfsmnt != root->mnt) {
47413+ struct dentry * parent;
47414+
47415+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
47416+ /* Global root? */
47417+ if (vfsmnt->mnt_parent == vfsmnt) {
47418+ goto out;
47419+ }
47420+ dentry = vfsmnt->mnt_mountpoint;
47421+ vfsmnt = vfsmnt->mnt_parent;
47422+ continue;
47423+ }
47424+ parent = dentry->d_parent;
47425+ prefetch(parent);
47426+ spin_lock(&dentry->d_lock);
47427+ error = prepend_name(buffer, buflen, &dentry->d_name);
47428+ spin_unlock(&dentry->d_lock);
47429+ if (!error)
47430+ error = prepend(buffer, buflen, "/", 1);
47431+ if (error)
47432+ break;
47433+
47434+ slash = true;
47435+ dentry = parent;
47436+ }
47437+
47438+out:
47439+ if (!error && !slash)
47440+ error = prepend(buffer, buflen, "/", 1);
47441+
47442+ return error;
47443+}
47444+
47445+/* this must be called with vfsmount_lock and rename_lock held */
47446+
47447+static char *__our_d_path(const struct path *path, struct path *root,
47448+ char *buf, int buflen)
47449+{
47450+ char *res = buf + buflen;
47451+ int error;
47452+
47453+ prepend(&res, &buflen, "\0", 1);
47454+ error = prepend_path(path, root, &res, &buflen);
47455+ if (error)
47456+ return ERR_PTR(error);
47457+
47458+ return res;
47459+}
47460+
47461+static char *
47462+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
47463+{
47464+ char *retval;
47465+
47466+ retval = __our_d_path(path, root, buf, buflen);
47467+ if (unlikely(IS_ERR(retval)))
47468+ retval = strcpy(buf, "<path too long>");
47469+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
47470+ retval[1] = '\0';
47471+
47472+ return retval;
47473+}
47474+
47475+static char *
47476+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47477+ char *buf, int buflen)
47478+{
47479+ struct path path;
47480+ char *res;
47481+
47482+ path.dentry = (struct dentry *)dentry;
47483+ path.mnt = (struct vfsmount *)vfsmnt;
47484+
47485+ /* we can use real_root.dentry, real_root.mnt, because this is only called
47486+ by the RBAC system */
47487+ res = gen_full_path(&path, &real_root, buf, buflen);
47488+
47489+ return res;
47490+}
47491+
47492+static char *
47493+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47494+ char *buf, int buflen)
47495+{
47496+ char *res;
47497+ struct path path;
47498+ struct path root;
47499+ struct task_struct *reaper = &init_task;
47500+
47501+ path.dentry = (struct dentry *)dentry;
47502+ path.mnt = (struct vfsmount *)vfsmnt;
47503+
47504+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
47505+ get_fs_root(reaper->fs, &root);
47506+
47507+ write_seqlock(&rename_lock);
47508+ br_read_lock(vfsmount_lock);
47509+ res = gen_full_path(&path, &root, buf, buflen);
47510+ br_read_unlock(vfsmount_lock);
47511+ write_sequnlock(&rename_lock);
47512+
47513+ path_put(&root);
47514+ return res;
47515+}
47516+
47517+static char *
47518+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
47519+{
47520+ char *ret;
47521+ write_seqlock(&rename_lock);
47522+ br_read_lock(vfsmount_lock);
47523+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47524+ PAGE_SIZE);
47525+ br_read_unlock(vfsmount_lock);
47526+ write_sequnlock(&rename_lock);
47527+ return ret;
47528+}
47529+
47530+static char *
47531+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
47532+{
47533+ char *ret;
47534+ char *buf;
47535+ int buflen;
47536+
47537+ write_seqlock(&rename_lock);
47538+ br_read_lock(vfsmount_lock);
47539+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
47540+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
47541+ buflen = (int)(ret - buf);
47542+ if (buflen >= 5)
47543+ prepend(&ret, &buflen, "/proc", 5);
47544+ else
47545+ ret = strcpy(buf, "<path too long>");
47546+ br_read_unlock(vfsmount_lock);
47547+ write_sequnlock(&rename_lock);
47548+ return ret;
47549+}
47550+
47551+char *
47552+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
47553+{
47554+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47555+ PAGE_SIZE);
47556+}
47557+
47558+char *
47559+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
47560+{
47561+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
47562+ PAGE_SIZE);
47563+}
47564+
47565+char *
47566+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
47567+{
47568+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
47569+ PAGE_SIZE);
47570+}
47571+
47572+char *
47573+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
47574+{
47575+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
47576+ PAGE_SIZE);
47577+}
47578+
47579+char *
47580+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
47581+{
47582+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
47583+ PAGE_SIZE);
47584+}
47585+
47586+__inline__ __u32
47587+to_gr_audit(const __u32 reqmode)
47588+{
47589+ /* masks off auditable permission flags, then shifts them to create
47590+ auditing flags, and adds the special case of append auditing if
47591+ we're requesting write */
47592+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
47593+}
47594+
47595+struct acl_subject_label *
47596+lookup_subject_map(const struct acl_subject_label *userp)
47597+{
47598+ unsigned int index = shash(userp, subj_map_set.s_size);
47599+ struct subject_map *match;
47600+
47601+ match = subj_map_set.s_hash[index];
47602+
47603+ while (match && match->user != userp)
47604+ match = match->next;
47605+
47606+ if (match != NULL)
47607+ return match->kernel;
47608+ else
47609+ return NULL;
47610+}
47611+
47612+static void
47613+insert_subj_map_entry(struct subject_map *subjmap)
47614+{
47615+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
47616+ struct subject_map **curr;
47617+
47618+ subjmap->prev = NULL;
47619+
47620+ curr = &subj_map_set.s_hash[index];
47621+ if (*curr != NULL)
47622+ (*curr)->prev = subjmap;
47623+
47624+ subjmap->next = *curr;
47625+ *curr = subjmap;
47626+
47627+ return;
47628+}
47629+
47630+static struct acl_role_label *
47631+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
47632+ const gid_t gid)
47633+{
47634+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
47635+ struct acl_role_label *match;
47636+ struct role_allowed_ip *ipp;
47637+ unsigned int x;
47638+ u32 curr_ip = task->signal->curr_ip;
47639+
47640+ task->signal->saved_ip = curr_ip;
47641+
47642+ match = acl_role_set.r_hash[index];
47643+
47644+ while (match) {
47645+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
47646+ for (x = 0; x < match->domain_child_num; x++) {
47647+ if (match->domain_children[x] == uid)
47648+ goto found;
47649+ }
47650+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
47651+ break;
47652+ match = match->next;
47653+ }
47654+found:
47655+ if (match == NULL) {
47656+ try_group:
47657+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
47658+ match = acl_role_set.r_hash[index];
47659+
47660+ while (match) {
47661+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
47662+ for (x = 0; x < match->domain_child_num; x++) {
47663+ if (match->domain_children[x] == gid)
47664+ goto found2;
47665+ }
47666+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
47667+ break;
47668+ match = match->next;
47669+ }
47670+found2:
47671+ if (match == NULL)
47672+ match = default_role;
47673+ if (match->allowed_ips == NULL)
47674+ return match;
47675+ else {
47676+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47677+ if (likely
47678+ ((ntohl(curr_ip) & ipp->netmask) ==
47679+ (ntohl(ipp->addr) & ipp->netmask)))
47680+ return match;
47681+ }
47682+ match = default_role;
47683+ }
47684+ } else if (match->allowed_ips == NULL) {
47685+ return match;
47686+ } else {
47687+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47688+ if (likely
47689+ ((ntohl(curr_ip) & ipp->netmask) ==
47690+ (ntohl(ipp->addr) & ipp->netmask)))
47691+ return match;
47692+ }
47693+ goto try_group;
47694+ }
47695+
47696+ return match;
47697+}
47698+
47699+struct acl_subject_label *
47700+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
47701+ const struct acl_role_label *role)
47702+{
47703+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
47704+ struct acl_subject_label *match;
47705+
47706+ match = role->subj_hash[index];
47707+
47708+ while (match && (match->inode != ino || match->device != dev ||
47709+ (match->mode & GR_DELETED))) {
47710+ match = match->next;
47711+ }
47712+
47713+ if (match && !(match->mode & GR_DELETED))
47714+ return match;
47715+ else
47716+ return NULL;
47717+}
47718+
47719+struct acl_subject_label *
47720+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
47721+ const struct acl_role_label *role)
47722+{
47723+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
47724+ struct acl_subject_label *match;
47725+
47726+ match = role->subj_hash[index];
47727+
47728+ while (match && (match->inode != ino || match->device != dev ||
47729+ !(match->mode & GR_DELETED))) {
47730+ match = match->next;
47731+ }
47732+
47733+ if (match && (match->mode & GR_DELETED))
47734+ return match;
47735+ else
47736+ return NULL;
47737+}
47738+
47739+static struct acl_object_label *
47740+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
47741+ const struct acl_subject_label *subj)
47742+{
47743+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47744+ struct acl_object_label *match;
47745+
47746+ match = subj->obj_hash[index];
47747+
47748+ while (match && (match->inode != ino || match->device != dev ||
47749+ (match->mode & GR_DELETED))) {
47750+ match = match->next;
47751+ }
47752+
47753+ if (match && !(match->mode & GR_DELETED))
47754+ return match;
47755+ else
47756+ return NULL;
47757+}
47758+
47759+static struct acl_object_label *
47760+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
47761+ const struct acl_subject_label *subj)
47762+{
47763+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47764+ struct acl_object_label *match;
47765+
47766+ match = subj->obj_hash[index];
47767+
47768+ while (match && (match->inode != ino || match->device != dev ||
47769+ !(match->mode & GR_DELETED))) {
47770+ match = match->next;
47771+ }
47772+
47773+ if (match && (match->mode & GR_DELETED))
47774+ return match;
47775+
47776+ match = subj->obj_hash[index];
47777+
47778+ while (match && (match->inode != ino || match->device != dev ||
47779+ (match->mode & GR_DELETED))) {
47780+ match = match->next;
47781+ }
47782+
47783+ if (match && !(match->mode & GR_DELETED))
47784+ return match;
47785+ else
47786+ return NULL;
47787+}
47788+
47789+static struct name_entry *
47790+lookup_name_entry(const char *name)
47791+{
47792+ unsigned int len = strlen(name);
47793+ unsigned int key = full_name_hash(name, len);
47794+ unsigned int index = key % name_set.n_size;
47795+ struct name_entry *match;
47796+
47797+ match = name_set.n_hash[index];
47798+
47799+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
47800+ match = match->next;
47801+
47802+ return match;
47803+}
47804+
47805+static struct name_entry *
47806+lookup_name_entry_create(const char *name)
47807+{
47808+ unsigned int len = strlen(name);
47809+ unsigned int key = full_name_hash(name, len);
47810+ unsigned int index = key % name_set.n_size;
47811+ struct name_entry *match;
47812+
47813+ match = name_set.n_hash[index];
47814+
47815+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47816+ !match->deleted))
47817+ match = match->next;
47818+
47819+ if (match && match->deleted)
47820+ return match;
47821+
47822+ match = name_set.n_hash[index];
47823+
47824+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47825+ match->deleted))
47826+ match = match->next;
47827+
47828+ if (match && !match->deleted)
47829+ return match;
47830+ else
47831+ return NULL;
47832+}
47833+
47834+static struct inodev_entry *
47835+lookup_inodev_entry(const ino_t ino, const dev_t dev)
47836+{
47837+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
47838+ struct inodev_entry *match;
47839+
47840+ match = inodev_set.i_hash[index];
47841+
47842+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
47843+ match = match->next;
47844+
47845+ return match;
47846+}
47847+
47848+static void
47849+insert_inodev_entry(struct inodev_entry *entry)
47850+{
47851+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
47852+ inodev_set.i_size);
47853+ struct inodev_entry **curr;
47854+
47855+ entry->prev = NULL;
47856+
47857+ curr = &inodev_set.i_hash[index];
47858+ if (*curr != NULL)
47859+ (*curr)->prev = entry;
47860+
47861+ entry->next = *curr;
47862+ *curr = entry;
47863+
47864+ return;
47865+}
47866+
47867+static void
47868+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
47869+{
47870+ unsigned int index =
47871+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
47872+ struct acl_role_label **curr;
47873+ struct acl_role_label *tmp;
47874+
47875+ curr = &acl_role_set.r_hash[index];
47876+
47877+ /* if role was already inserted due to domains and already has
47878+ a role in the same bucket as it attached, then we need to
47879+ combine these two buckets
47880+ */
47881+ if (role->next) {
47882+ tmp = role->next;
47883+ while (tmp->next)
47884+ tmp = tmp->next;
47885+ tmp->next = *curr;
47886+ } else
47887+ role->next = *curr;
47888+ *curr = role;
47889+
47890+ return;
47891+}
47892+
47893+static void
47894+insert_acl_role_label(struct acl_role_label *role)
47895+{
47896+ int i;
47897+
47898+ if (role_list == NULL) {
47899+ role_list = role;
47900+ role->prev = NULL;
47901+ } else {
47902+ role->prev = role_list;
47903+ role_list = role;
47904+ }
47905+
47906+ /* used for hash chains */
47907+ role->next = NULL;
47908+
47909+ if (role->roletype & GR_ROLE_DOMAIN) {
47910+ for (i = 0; i < role->domain_child_num; i++)
47911+ __insert_acl_role_label(role, role->domain_children[i]);
47912+ } else
47913+ __insert_acl_role_label(role, role->uidgid);
47914+}
47915+
47916+static int
47917+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
47918+{
47919+ struct name_entry **curr, *nentry;
47920+ struct inodev_entry *ientry;
47921+ unsigned int len = strlen(name);
47922+ unsigned int key = full_name_hash(name, len);
47923+ unsigned int index = key % name_set.n_size;
47924+
47925+ curr = &name_set.n_hash[index];
47926+
47927+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
47928+ curr = &((*curr)->next);
47929+
47930+ if (*curr != NULL)
47931+ return 1;
47932+
47933+ nentry = acl_alloc(sizeof (struct name_entry));
47934+ if (nentry == NULL)
47935+ return 0;
47936+ ientry = acl_alloc(sizeof (struct inodev_entry));
47937+ if (ientry == NULL)
47938+ return 0;
47939+ ientry->nentry = nentry;
47940+
47941+ nentry->key = key;
47942+ nentry->name = name;
47943+ nentry->inode = inode;
47944+ nentry->device = device;
47945+ nentry->len = len;
47946+ nentry->deleted = deleted;
47947+
47948+ nentry->prev = NULL;
47949+ curr = &name_set.n_hash[index];
47950+ if (*curr != NULL)
47951+ (*curr)->prev = nentry;
47952+ nentry->next = *curr;
47953+ *curr = nentry;
47954+
47955+ /* insert us into the table searchable by inode/dev */
47956+ insert_inodev_entry(ientry);
47957+
47958+ return 1;
47959+}
47960+
47961+static void
47962+insert_acl_obj_label(struct acl_object_label *obj,
47963+ struct acl_subject_label *subj)
47964+{
47965+ unsigned int index =
47966+ fhash(obj->inode, obj->device, subj->obj_hash_size);
47967+ struct acl_object_label **curr;
47968+
47969+
47970+ obj->prev = NULL;
47971+
47972+ curr = &subj->obj_hash[index];
47973+ if (*curr != NULL)
47974+ (*curr)->prev = obj;
47975+
47976+ obj->next = *curr;
47977+ *curr = obj;
47978+
47979+ return;
47980+}
47981+
47982+static void
47983+insert_acl_subj_label(struct acl_subject_label *obj,
47984+ struct acl_role_label *role)
47985+{
47986+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
47987+ struct acl_subject_label **curr;
47988+
47989+ obj->prev = NULL;
47990+
47991+ curr = &role->subj_hash[index];
47992+ if (*curr != NULL)
47993+ (*curr)->prev = obj;
47994+
47995+ obj->next = *curr;
47996+ *curr = obj;
47997+
47998+ return;
47999+}
48000+
48001+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
48002+
48003+static void *
48004+create_table(__u32 * len, int elementsize)
48005+{
48006+ unsigned int table_sizes[] = {
48007+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
48008+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
48009+ 4194301, 8388593, 16777213, 33554393, 67108859
48010+ };
48011+ void *newtable = NULL;
48012+ unsigned int pwr = 0;
48013+
48014+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
48015+ table_sizes[pwr] <= *len)
48016+ pwr++;
48017+
48018+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
48019+ return newtable;
48020+
48021+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
48022+ newtable =
48023+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
48024+ else
48025+ newtable = vmalloc(table_sizes[pwr] * elementsize);
48026+
48027+ *len = table_sizes[pwr];
48028+
48029+ return newtable;
48030+}
48031+
48032+static int
48033+init_variables(const struct gr_arg *arg)
48034+{
48035+ struct task_struct *reaper = &init_task;
48036+ unsigned int stacksize;
48037+
48038+ subj_map_set.s_size = arg->role_db.num_subjects;
48039+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
48040+ name_set.n_size = arg->role_db.num_objects;
48041+ inodev_set.i_size = arg->role_db.num_objects;
48042+
48043+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
48044+ !name_set.n_size || !inodev_set.i_size)
48045+ return 1;
48046+
48047+ if (!gr_init_uidset())
48048+ return 1;
48049+
48050+ /* set up the stack that holds allocation info */
48051+
48052+ stacksize = arg->role_db.num_pointers + 5;
48053+
48054+ if (!acl_alloc_stack_init(stacksize))
48055+ return 1;
48056+
48057+ /* grab reference for the real root dentry and vfsmount */
48058+ get_fs_root(reaper->fs, &real_root);
48059+
48060+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48061+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
48062+#endif
48063+
48064+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
48065+ if (fakefs_obj_rw == NULL)
48066+ return 1;
48067+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
48068+
48069+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
48070+ if (fakefs_obj_rwx == NULL)
48071+ return 1;
48072+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
48073+
48074+ subj_map_set.s_hash =
48075+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
48076+ acl_role_set.r_hash =
48077+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
48078+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
48079+ inodev_set.i_hash =
48080+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
48081+
48082+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
48083+ !name_set.n_hash || !inodev_set.i_hash)
48084+ return 1;
48085+
48086+ memset(subj_map_set.s_hash, 0,
48087+ sizeof(struct subject_map *) * subj_map_set.s_size);
48088+ memset(acl_role_set.r_hash, 0,
48089+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
48090+ memset(name_set.n_hash, 0,
48091+ sizeof (struct name_entry *) * name_set.n_size);
48092+ memset(inodev_set.i_hash, 0,
48093+ sizeof (struct inodev_entry *) * inodev_set.i_size);
48094+
48095+ return 0;
48096+}
48097+
48098+/* free information not needed after startup
48099+ currently contains user->kernel pointer mappings for subjects
48100+*/
48101+
48102+static void
48103+free_init_variables(void)
48104+{
48105+ __u32 i;
48106+
48107+ if (subj_map_set.s_hash) {
48108+ for (i = 0; i < subj_map_set.s_size; i++) {
48109+ if (subj_map_set.s_hash[i]) {
48110+ kfree(subj_map_set.s_hash[i]);
48111+ subj_map_set.s_hash[i] = NULL;
48112+ }
48113+ }
48114+
48115+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
48116+ PAGE_SIZE)
48117+ kfree(subj_map_set.s_hash);
48118+ else
48119+ vfree(subj_map_set.s_hash);
48120+ }
48121+
48122+ return;
48123+}
48124+
48125+static void
48126+free_variables(void)
48127+{
48128+ struct acl_subject_label *s;
48129+ struct acl_role_label *r;
48130+ struct task_struct *task, *task2;
48131+ unsigned int x;
48132+
48133+ gr_clear_learn_entries();
48134+
48135+ read_lock(&tasklist_lock);
48136+ do_each_thread(task2, task) {
48137+ task->acl_sp_role = 0;
48138+ task->acl_role_id = 0;
48139+ task->acl = NULL;
48140+ task->role = NULL;
48141+ } while_each_thread(task2, task);
48142+ read_unlock(&tasklist_lock);
48143+
48144+ /* release the reference to the real root dentry and vfsmount */
48145+ path_put(&real_root);
48146+
48147+ /* free all object hash tables */
48148+
48149+ FOR_EACH_ROLE_START(r)
48150+ if (r->subj_hash == NULL)
48151+ goto next_role;
48152+ FOR_EACH_SUBJECT_START(r, s, x)
48153+ if (s->obj_hash == NULL)
48154+ break;
48155+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
48156+ kfree(s->obj_hash);
48157+ else
48158+ vfree(s->obj_hash);
48159+ FOR_EACH_SUBJECT_END(s, x)
48160+ FOR_EACH_NESTED_SUBJECT_START(r, s)
48161+ if (s->obj_hash == NULL)
48162+ break;
48163+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
48164+ kfree(s->obj_hash);
48165+ else
48166+ vfree(s->obj_hash);
48167+ FOR_EACH_NESTED_SUBJECT_END(s)
48168+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
48169+ kfree(r->subj_hash);
48170+ else
48171+ vfree(r->subj_hash);
48172+ r->subj_hash = NULL;
48173+next_role:
48174+ FOR_EACH_ROLE_END(r)
48175+
48176+ acl_free_all();
48177+
48178+ if (acl_role_set.r_hash) {
48179+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
48180+ PAGE_SIZE)
48181+ kfree(acl_role_set.r_hash);
48182+ else
48183+ vfree(acl_role_set.r_hash);
48184+ }
48185+ if (name_set.n_hash) {
48186+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
48187+ PAGE_SIZE)
48188+ kfree(name_set.n_hash);
48189+ else
48190+ vfree(name_set.n_hash);
48191+ }
48192+
48193+ if (inodev_set.i_hash) {
48194+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
48195+ PAGE_SIZE)
48196+ kfree(inodev_set.i_hash);
48197+ else
48198+ vfree(inodev_set.i_hash);
48199+ }
48200+
48201+ gr_free_uidset();
48202+
48203+ memset(&name_set, 0, sizeof (struct name_db));
48204+ memset(&inodev_set, 0, sizeof (struct inodev_db));
48205+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
48206+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
48207+
48208+ default_role = NULL;
48209+ role_list = NULL;
48210+
48211+ return;
48212+}
48213+
48214+static __u32
48215+count_user_objs(struct acl_object_label *userp)
48216+{
48217+ struct acl_object_label o_tmp;
48218+ __u32 num = 0;
48219+
48220+ while (userp) {
48221+ if (copy_from_user(&o_tmp, userp,
48222+ sizeof (struct acl_object_label)))
48223+ break;
48224+
48225+ userp = o_tmp.prev;
48226+ num++;
48227+ }
48228+
48229+ return num;
48230+}
48231+
48232+static struct acl_subject_label *
48233+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
48234+
48235+static int
48236+copy_user_glob(struct acl_object_label *obj)
48237+{
48238+ struct acl_object_label *g_tmp, **guser;
48239+ unsigned int len;
48240+ char *tmp;
48241+
48242+ if (obj->globbed == NULL)
48243+ return 0;
48244+
48245+ guser = &obj->globbed;
48246+ while (*guser) {
48247+ g_tmp = (struct acl_object_label *)
48248+ acl_alloc(sizeof (struct acl_object_label));
48249+ if (g_tmp == NULL)
48250+ return -ENOMEM;
48251+
48252+ if (copy_from_user(g_tmp, *guser,
48253+ sizeof (struct acl_object_label)))
48254+ return -EFAULT;
48255+
48256+ len = strnlen_user(g_tmp->filename, PATH_MAX);
48257+
48258+ if (!len || len >= PATH_MAX)
48259+ return -EINVAL;
48260+
48261+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48262+ return -ENOMEM;
48263+
48264+ if (copy_from_user(tmp, g_tmp->filename, len))
48265+ return -EFAULT;
48266+ tmp[len-1] = '\0';
48267+ g_tmp->filename = tmp;
48268+
48269+ *guser = g_tmp;
48270+ guser = &(g_tmp->next);
48271+ }
48272+
48273+ return 0;
48274+}
48275+
48276+static int
48277+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
48278+ struct acl_role_label *role)
48279+{
48280+ struct acl_object_label *o_tmp;
48281+ unsigned int len;
48282+ int ret;
48283+ char *tmp;
48284+
48285+ while (userp) {
48286+ if ((o_tmp = (struct acl_object_label *)
48287+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
48288+ return -ENOMEM;
48289+
48290+ if (copy_from_user(o_tmp, userp,
48291+ sizeof (struct acl_object_label)))
48292+ return -EFAULT;
48293+
48294+ userp = o_tmp->prev;
48295+
48296+ len = strnlen_user(o_tmp->filename, PATH_MAX);
48297+
48298+ if (!len || len >= PATH_MAX)
48299+ return -EINVAL;
48300+
48301+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48302+ return -ENOMEM;
48303+
48304+ if (copy_from_user(tmp, o_tmp->filename, len))
48305+ return -EFAULT;
48306+ tmp[len-1] = '\0';
48307+ o_tmp->filename = tmp;
48308+
48309+ insert_acl_obj_label(o_tmp, subj);
48310+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
48311+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
48312+ return -ENOMEM;
48313+
48314+ ret = copy_user_glob(o_tmp);
48315+ if (ret)
48316+ return ret;
48317+
48318+ if (o_tmp->nested) {
48319+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
48320+ if (IS_ERR(o_tmp->nested))
48321+ return PTR_ERR(o_tmp->nested);
48322+
48323+ /* insert into nested subject list */
48324+ o_tmp->nested->next = role->hash->first;
48325+ role->hash->first = o_tmp->nested;
48326+ }
48327+ }
48328+
48329+ return 0;
48330+}
48331+
48332+static __u32
48333+count_user_subjs(struct acl_subject_label *userp)
48334+{
48335+ struct acl_subject_label s_tmp;
48336+ __u32 num = 0;
48337+
48338+ while (userp) {
48339+ if (copy_from_user(&s_tmp, userp,
48340+ sizeof (struct acl_subject_label)))
48341+ break;
48342+
48343+ userp = s_tmp.prev;
48344+ /* do not count nested subjects against this count, since
48345+ they are not included in the hash table, but are
48346+ attached to objects. We have already counted
48347+ the subjects in userspace for the allocation
48348+ stack
48349+ */
48350+ if (!(s_tmp.mode & GR_NESTED))
48351+ num++;
48352+ }
48353+
48354+ return num;
48355+}
48356+
48357+static int
48358+copy_user_allowedips(struct acl_role_label *rolep)
48359+{
48360+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
48361+
48362+ ruserip = rolep->allowed_ips;
48363+
48364+ while (ruserip) {
48365+ rlast = rtmp;
48366+
48367+ if ((rtmp = (struct role_allowed_ip *)
48368+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
48369+ return -ENOMEM;
48370+
48371+ if (copy_from_user(rtmp, ruserip,
48372+ sizeof (struct role_allowed_ip)))
48373+ return -EFAULT;
48374+
48375+ ruserip = rtmp->prev;
48376+
48377+ if (!rlast) {
48378+ rtmp->prev = NULL;
48379+ rolep->allowed_ips = rtmp;
48380+ } else {
48381+ rlast->next = rtmp;
48382+ rtmp->prev = rlast;
48383+ }
48384+
48385+ if (!ruserip)
48386+ rtmp->next = NULL;
48387+ }
48388+
48389+ return 0;
48390+}
48391+
48392+static int
48393+copy_user_transitions(struct acl_role_label *rolep)
48394+{
48395+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
48396+
48397+ unsigned int len;
48398+ char *tmp;
48399+
48400+ rusertp = rolep->transitions;
48401+
48402+ while (rusertp) {
48403+ rlast = rtmp;
48404+
48405+ if ((rtmp = (struct role_transition *)
48406+ acl_alloc(sizeof (struct role_transition))) == NULL)
48407+ return -ENOMEM;
48408+
48409+ if (copy_from_user(rtmp, rusertp,
48410+ sizeof (struct role_transition)))
48411+ return -EFAULT;
48412+
48413+ rusertp = rtmp->prev;
48414+
48415+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
48416+
48417+ if (!len || len >= GR_SPROLE_LEN)
48418+ return -EINVAL;
48419+
48420+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48421+ return -ENOMEM;
48422+
48423+ if (copy_from_user(tmp, rtmp->rolename, len))
48424+ return -EFAULT;
48425+ tmp[len-1] = '\0';
48426+ rtmp->rolename = tmp;
48427+
48428+ if (!rlast) {
48429+ rtmp->prev = NULL;
48430+ rolep->transitions = rtmp;
48431+ } else {
48432+ rlast->next = rtmp;
48433+ rtmp->prev = rlast;
48434+ }
48435+
48436+ if (!rusertp)
48437+ rtmp->next = NULL;
48438+ }
48439+
48440+ return 0;
48441+}
48442+
48443+static struct acl_subject_label *
48444+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
48445+{
48446+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
48447+ unsigned int len;
48448+ char *tmp;
48449+ __u32 num_objs;
48450+ struct acl_ip_label **i_tmp, *i_utmp2;
48451+ struct gr_hash_struct ghash;
48452+ struct subject_map *subjmap;
48453+ unsigned int i_num;
48454+ int err;
48455+
48456+ s_tmp = lookup_subject_map(userp);
48457+
48458+ /* we've already copied this subject into the kernel, just return
48459+ the reference to it, and don't copy it over again
48460+ */
48461+ if (s_tmp)
48462+ return(s_tmp);
48463+
48464+ if ((s_tmp = (struct acl_subject_label *)
48465+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
48466+ return ERR_PTR(-ENOMEM);
48467+
48468+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
48469+ if (subjmap == NULL)
48470+ return ERR_PTR(-ENOMEM);
48471+
48472+ subjmap->user = userp;
48473+ subjmap->kernel = s_tmp;
48474+ insert_subj_map_entry(subjmap);
48475+
48476+ if (copy_from_user(s_tmp, userp,
48477+ sizeof (struct acl_subject_label)))
48478+ return ERR_PTR(-EFAULT);
48479+
48480+ len = strnlen_user(s_tmp->filename, PATH_MAX);
48481+
48482+ if (!len || len >= PATH_MAX)
48483+ return ERR_PTR(-EINVAL);
48484+
48485+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48486+ return ERR_PTR(-ENOMEM);
48487+
48488+ if (copy_from_user(tmp, s_tmp->filename, len))
48489+ return ERR_PTR(-EFAULT);
48490+ tmp[len-1] = '\0';
48491+ s_tmp->filename = tmp;
48492+
48493+ if (!strcmp(s_tmp->filename, "/"))
48494+ role->root_label = s_tmp;
48495+
48496+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
48497+ return ERR_PTR(-EFAULT);
48498+
48499+ /* copy user and group transition tables */
48500+
48501+ if (s_tmp->user_trans_num) {
48502+ uid_t *uidlist;
48503+
48504+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
48505+ if (uidlist == NULL)
48506+ return ERR_PTR(-ENOMEM);
48507+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
48508+ return ERR_PTR(-EFAULT);
48509+
48510+ s_tmp->user_transitions = uidlist;
48511+ }
48512+
48513+ if (s_tmp->group_trans_num) {
48514+ gid_t *gidlist;
48515+
48516+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
48517+ if (gidlist == NULL)
48518+ return ERR_PTR(-ENOMEM);
48519+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
48520+ return ERR_PTR(-EFAULT);
48521+
48522+ s_tmp->group_transitions = gidlist;
48523+ }
48524+
48525+ /* set up object hash table */
48526+ num_objs = count_user_objs(ghash.first);
48527+
48528+ s_tmp->obj_hash_size = num_objs;
48529+ s_tmp->obj_hash =
48530+ (struct acl_object_label **)
48531+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
48532+
48533+ if (!s_tmp->obj_hash)
48534+ return ERR_PTR(-ENOMEM);
48535+
48536+ memset(s_tmp->obj_hash, 0,
48537+ s_tmp->obj_hash_size *
48538+ sizeof (struct acl_object_label *));
48539+
48540+ /* add in objects */
48541+ err = copy_user_objs(ghash.first, s_tmp, role);
48542+
48543+ if (err)
48544+ return ERR_PTR(err);
48545+
48546+ /* set pointer for parent subject */
48547+ if (s_tmp->parent_subject) {
48548+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
48549+
48550+ if (IS_ERR(s_tmp2))
48551+ return s_tmp2;
48552+
48553+ s_tmp->parent_subject = s_tmp2;
48554+ }
48555+
48556+ /* add in ip acls */
48557+
48558+ if (!s_tmp->ip_num) {
48559+ s_tmp->ips = NULL;
48560+ goto insert;
48561+ }
48562+
48563+ i_tmp =
48564+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
48565+ sizeof (struct acl_ip_label *));
48566+
48567+ if (!i_tmp)
48568+ return ERR_PTR(-ENOMEM);
48569+
48570+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
48571+ *(i_tmp + i_num) =
48572+ (struct acl_ip_label *)
48573+ acl_alloc(sizeof (struct acl_ip_label));
48574+ if (!*(i_tmp + i_num))
48575+ return ERR_PTR(-ENOMEM);
48576+
48577+ if (copy_from_user
48578+ (&i_utmp2, s_tmp->ips + i_num,
48579+ sizeof (struct acl_ip_label *)))
48580+ return ERR_PTR(-EFAULT);
48581+
48582+ if (copy_from_user
48583+ (*(i_tmp + i_num), i_utmp2,
48584+ sizeof (struct acl_ip_label)))
48585+ return ERR_PTR(-EFAULT);
48586+
48587+ if ((*(i_tmp + i_num))->iface == NULL)
48588+ continue;
48589+
48590+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
48591+ if (!len || len >= IFNAMSIZ)
48592+ return ERR_PTR(-EINVAL);
48593+ tmp = acl_alloc(len);
48594+ if (tmp == NULL)
48595+ return ERR_PTR(-ENOMEM);
48596+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
48597+ return ERR_PTR(-EFAULT);
48598+ (*(i_tmp + i_num))->iface = tmp;
48599+ }
48600+
48601+ s_tmp->ips = i_tmp;
48602+
48603+insert:
48604+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
48605+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
48606+ return ERR_PTR(-ENOMEM);
48607+
48608+ return s_tmp;
48609+}
48610+
48611+static int
48612+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
48613+{
48614+ struct acl_subject_label s_pre;
48615+ struct acl_subject_label * ret;
48616+ int err;
48617+
48618+ while (userp) {
48619+ if (copy_from_user(&s_pre, userp,
48620+ sizeof (struct acl_subject_label)))
48621+ return -EFAULT;
48622+
48623+ /* do not add nested subjects here, add
48624+ while parsing objects
48625+ */
48626+
48627+ if (s_pre.mode & GR_NESTED) {
48628+ userp = s_pre.prev;
48629+ continue;
48630+ }
48631+
48632+ ret = do_copy_user_subj(userp, role);
48633+
48634+ err = PTR_ERR(ret);
48635+ if (IS_ERR(ret))
48636+ return err;
48637+
48638+ insert_acl_subj_label(ret, role);
48639+
48640+ userp = s_pre.prev;
48641+ }
48642+
48643+ return 0;
48644+}
48645+
48646+static int
48647+copy_user_acl(struct gr_arg *arg)
48648+{
48649+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
48650+ struct sprole_pw *sptmp;
48651+ struct gr_hash_struct *ghash;
48652+ uid_t *domainlist;
48653+ unsigned int r_num;
48654+ unsigned int len;
48655+ char *tmp;
48656+ int err = 0;
48657+ __u16 i;
48658+ __u32 num_subjs;
48659+
48660+ /* we need a default and kernel role */
48661+ if (arg->role_db.num_roles < 2)
48662+ return -EINVAL;
48663+
48664+ /* copy special role authentication info from userspace */
48665+
48666+ num_sprole_pws = arg->num_sprole_pws;
48667+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
48668+
48669+ if (!acl_special_roles) {
48670+ err = -ENOMEM;
48671+ goto cleanup;
48672+ }
48673+
48674+ for (i = 0; i < num_sprole_pws; i++) {
48675+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
48676+ if (!sptmp) {
48677+ err = -ENOMEM;
48678+ goto cleanup;
48679+ }
48680+ if (copy_from_user(sptmp, arg->sprole_pws + i,
48681+ sizeof (struct sprole_pw))) {
48682+ err = -EFAULT;
48683+ goto cleanup;
48684+ }
48685+
48686+ len =
48687+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
48688+
48689+ if (!len || len >= GR_SPROLE_LEN) {
48690+ err = -EINVAL;
48691+ goto cleanup;
48692+ }
48693+
48694+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
48695+ err = -ENOMEM;
48696+ goto cleanup;
48697+ }
48698+
48699+ if (copy_from_user(tmp, sptmp->rolename, len)) {
48700+ err = -EFAULT;
48701+ goto cleanup;
48702+ }
48703+ tmp[len-1] = '\0';
48704+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48705+ printk(KERN_ALERT "Copying special role %s\n", tmp);
48706+#endif
48707+ sptmp->rolename = tmp;
48708+ acl_special_roles[i] = sptmp;
48709+ }
48710+
48711+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
48712+
48713+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
48714+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
48715+
48716+ if (!r_tmp) {
48717+ err = -ENOMEM;
48718+ goto cleanup;
48719+ }
48720+
48721+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
48722+ sizeof (struct acl_role_label *))) {
48723+ err = -EFAULT;
48724+ goto cleanup;
48725+ }
48726+
48727+ if (copy_from_user(r_tmp, r_utmp2,
48728+ sizeof (struct acl_role_label))) {
48729+ err = -EFAULT;
48730+ goto cleanup;
48731+ }
48732+
48733+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
48734+
48735+ if (!len || len >= PATH_MAX) {
48736+ err = -EINVAL;
48737+ goto cleanup;
48738+ }
48739+
48740+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
48741+ err = -ENOMEM;
48742+ goto cleanup;
48743+ }
48744+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
48745+ err = -EFAULT;
48746+ goto cleanup;
48747+ }
48748+ tmp[len-1] = '\0';
48749+ r_tmp->rolename = tmp;
48750+
48751+ if (!strcmp(r_tmp->rolename, "default")
48752+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
48753+ default_role = r_tmp;
48754+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
48755+ kernel_role = r_tmp;
48756+ }
48757+
48758+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
48759+ err = -ENOMEM;
48760+ goto cleanup;
48761+ }
48762+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
48763+ err = -EFAULT;
48764+ goto cleanup;
48765+ }
48766+
48767+ r_tmp->hash = ghash;
48768+
48769+ num_subjs = count_user_subjs(r_tmp->hash->first);
48770+
48771+ r_tmp->subj_hash_size = num_subjs;
48772+ r_tmp->subj_hash =
48773+ (struct acl_subject_label **)
48774+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
48775+
48776+ if (!r_tmp->subj_hash) {
48777+ err = -ENOMEM;
48778+ goto cleanup;
48779+ }
48780+
48781+ err = copy_user_allowedips(r_tmp);
48782+ if (err)
48783+ goto cleanup;
48784+
48785+ /* copy domain info */
48786+ if (r_tmp->domain_children != NULL) {
48787+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
48788+ if (domainlist == NULL) {
48789+ err = -ENOMEM;
48790+ goto cleanup;
48791+ }
48792+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
48793+ err = -EFAULT;
48794+ goto cleanup;
48795+ }
48796+ r_tmp->domain_children = domainlist;
48797+ }
48798+
48799+ err = copy_user_transitions(r_tmp);
48800+ if (err)
48801+ goto cleanup;
48802+
48803+ memset(r_tmp->subj_hash, 0,
48804+ r_tmp->subj_hash_size *
48805+ sizeof (struct acl_subject_label *));
48806+
48807+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
48808+
48809+ if (err)
48810+ goto cleanup;
48811+
48812+ /* set nested subject list to null */
48813+ r_tmp->hash->first = NULL;
48814+
48815+ insert_acl_role_label(r_tmp);
48816+ }
48817+
48818+ goto return_err;
48819+ cleanup:
48820+ free_variables();
48821+ return_err:
48822+ return err;
48823+
48824+}
48825+
48826+static int
48827+gracl_init(struct gr_arg *args)
48828+{
48829+ int error = 0;
48830+
48831+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
48832+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
48833+
48834+ if (init_variables(args)) {
48835+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
48836+ error = -ENOMEM;
48837+ free_variables();
48838+ goto out;
48839+ }
48840+
48841+ error = copy_user_acl(args);
48842+ free_init_variables();
48843+ if (error) {
48844+ free_variables();
48845+ goto out;
48846+ }
48847+
48848+ if ((error = gr_set_acls(0))) {
48849+ free_variables();
48850+ goto out;
48851+ }
48852+
48853+ pax_open_kernel();
48854+ gr_status |= GR_READY;
48855+ pax_close_kernel();
48856+
48857+ out:
48858+ return error;
48859+}
48860+
48861+/* derived from glibc fnmatch() 0: match, 1: no match*/
48862+
48863+static int
48864+glob_match(const char *p, const char *n)
48865+{
48866+ char c;
48867+
48868+ while ((c = *p++) != '\0') {
48869+ switch (c) {
48870+ case '?':
48871+ if (*n == '\0')
48872+ return 1;
48873+ else if (*n == '/')
48874+ return 1;
48875+ break;
48876+ case '\\':
48877+ if (*n != c)
48878+ return 1;
48879+ break;
48880+ case '*':
48881+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
48882+ if (*n == '/')
48883+ return 1;
48884+ else if (c == '?') {
48885+ if (*n == '\0')
48886+ return 1;
48887+ else
48888+ ++n;
48889+ }
48890+ }
48891+ if (c == '\0') {
48892+ return 0;
48893+ } else {
48894+ const char *endp;
48895+
48896+ if ((endp = strchr(n, '/')) == NULL)
48897+ endp = n + strlen(n);
48898+
48899+ if (c == '[') {
48900+ for (--p; n < endp; ++n)
48901+ if (!glob_match(p, n))
48902+ return 0;
48903+ } else if (c == '/') {
48904+ while (*n != '\0' && *n != '/')
48905+ ++n;
48906+ if (*n == '/' && !glob_match(p, n + 1))
48907+ return 0;
48908+ } else {
48909+ for (--p; n < endp; ++n)
48910+ if (*n == c && !glob_match(p, n))
48911+ return 0;
48912+ }
48913+
48914+ return 1;
48915+ }
48916+ case '[':
48917+ {
48918+ int not;
48919+ char cold;
48920+
48921+ if (*n == '\0' || *n == '/')
48922+ return 1;
48923+
48924+ not = (*p == '!' || *p == '^');
48925+ if (not)
48926+ ++p;
48927+
48928+ c = *p++;
48929+ for (;;) {
48930+ unsigned char fn = (unsigned char)*n;
48931+
48932+ if (c == '\0')
48933+ return 1;
48934+ else {
48935+ if (c == fn)
48936+ goto matched;
48937+ cold = c;
48938+ c = *p++;
48939+
48940+ if (c == '-' && *p != ']') {
48941+ unsigned char cend = *p++;
48942+
48943+ if (cend == '\0')
48944+ return 1;
48945+
48946+ if (cold <= fn && fn <= cend)
48947+ goto matched;
48948+
48949+ c = *p++;
48950+ }
48951+ }
48952+
48953+ if (c == ']')
48954+ break;
48955+ }
48956+ if (!not)
48957+ return 1;
48958+ break;
48959+ matched:
48960+ while (c != ']') {
48961+ if (c == '\0')
48962+ return 1;
48963+
48964+ c = *p++;
48965+ }
48966+ if (not)
48967+ return 1;
48968+ }
48969+ break;
48970+ default:
48971+ if (c != *n)
48972+ return 1;
48973+ }
48974+
48975+ ++n;
48976+ }
48977+
48978+ if (*n == '\0')
48979+ return 0;
48980+
48981+ if (*n == '/')
48982+ return 0;
48983+
48984+ return 1;
48985+}
48986+
48987+static struct acl_object_label *
48988+chk_glob_label(struct acl_object_label *globbed,
48989+ struct dentry *dentry, struct vfsmount *mnt, char **path)
48990+{
48991+ struct acl_object_label *tmp;
48992+
48993+ if (*path == NULL)
48994+ *path = gr_to_filename_nolock(dentry, mnt);
48995+
48996+ tmp = globbed;
48997+
48998+ while (tmp) {
48999+ if (!glob_match(tmp->filename, *path))
49000+ return tmp;
49001+ tmp = tmp->next;
49002+ }
49003+
49004+ return NULL;
49005+}
49006+
49007+static struct acl_object_label *
49008+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
49009+ const ino_t curr_ino, const dev_t curr_dev,
49010+ const struct acl_subject_label *subj, char **path, const int checkglob)
49011+{
49012+ struct acl_subject_label *tmpsubj;
49013+ struct acl_object_label *retval;
49014+ struct acl_object_label *retval2;
49015+
49016+ tmpsubj = (struct acl_subject_label *) subj;
49017+ read_lock(&gr_inode_lock);
49018+ do {
49019+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
49020+ if (retval) {
49021+ if (checkglob && retval->globbed) {
49022+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
49023+ (struct vfsmount *)orig_mnt, path);
49024+ if (retval2)
49025+ retval = retval2;
49026+ }
49027+ break;
49028+ }
49029+ } while ((tmpsubj = tmpsubj->parent_subject));
49030+ read_unlock(&gr_inode_lock);
49031+
49032+ return retval;
49033+}
49034+
49035+static __inline__ struct acl_object_label *
49036+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
49037+ struct dentry *curr_dentry,
49038+ const struct acl_subject_label *subj, char **path, const int checkglob)
49039+{
49040+ int newglob = checkglob;
49041+ ino_t inode;
49042+ dev_t device;
49043+
49044+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
49045+ as we don't want a / * rule to match instead of the / object
49046+ don't do this for create lookups that call this function though, since they're looking up
49047+ on the parent and thus need globbing checks on all paths
49048+ */
49049+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
49050+ newglob = GR_NO_GLOB;
49051+
49052+ spin_lock(&curr_dentry->d_lock);
49053+ inode = curr_dentry->d_inode->i_ino;
49054+ device = __get_dev(curr_dentry);
49055+ spin_unlock(&curr_dentry->d_lock);
49056+
49057+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
49058+}
49059+
49060+static struct acl_object_label *
49061+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49062+ const struct acl_subject_label *subj, char *path, const int checkglob)
49063+{
49064+ struct dentry *dentry = (struct dentry *) l_dentry;
49065+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
49066+ struct acl_object_label *retval;
49067+ struct dentry *parent;
49068+
49069+ write_seqlock(&rename_lock);
49070+ br_read_lock(vfsmount_lock);
49071+
49072+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
49073+#ifdef CONFIG_NET
49074+ mnt == sock_mnt ||
49075+#endif
49076+#ifdef CONFIG_HUGETLBFS
49077+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
49078+#endif
49079+ /* ignore Eric Biederman */
49080+ IS_PRIVATE(l_dentry->d_inode))) {
49081+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
49082+ goto out;
49083+ }
49084+
49085+ for (;;) {
49086+ if (dentry == real_root.dentry && mnt == real_root.mnt)
49087+ break;
49088+
49089+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
49090+ if (mnt->mnt_parent == mnt)
49091+ break;
49092+
49093+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49094+ if (retval != NULL)
49095+ goto out;
49096+
49097+ dentry = mnt->mnt_mountpoint;
49098+ mnt = mnt->mnt_parent;
49099+ continue;
49100+ }
49101+
49102+ parent = dentry->d_parent;
49103+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49104+ if (retval != NULL)
49105+ goto out;
49106+
49107+ dentry = parent;
49108+ }
49109+
49110+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49111+
49112+ /* real_root is pinned so we don't have to hold a reference */
49113+ if (retval == NULL)
49114+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
49115+out:
49116+ br_read_unlock(vfsmount_lock);
49117+ write_sequnlock(&rename_lock);
49118+
49119+ BUG_ON(retval == NULL);
49120+
49121+ return retval;
49122+}
49123+
49124+static __inline__ struct acl_object_label *
49125+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49126+ const struct acl_subject_label *subj)
49127+{
49128+ char *path = NULL;
49129+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
49130+}
49131+
49132+static __inline__ struct acl_object_label *
49133+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49134+ const struct acl_subject_label *subj)
49135+{
49136+ char *path = NULL;
49137+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
49138+}
49139+
49140+static __inline__ struct acl_object_label *
49141+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49142+ const struct acl_subject_label *subj, char *path)
49143+{
49144+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
49145+}
49146+
49147+static struct acl_subject_label *
49148+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49149+ const struct acl_role_label *role)
49150+{
49151+ struct dentry *dentry = (struct dentry *) l_dentry;
49152+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
49153+ struct acl_subject_label *retval;
49154+ struct dentry *parent;
49155+
49156+ write_seqlock(&rename_lock);
49157+ br_read_lock(vfsmount_lock);
49158+
49159+ for (;;) {
49160+ if (dentry == real_root.dentry && mnt == real_root.mnt)
49161+ break;
49162+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
49163+ if (mnt->mnt_parent == mnt)
49164+ break;
49165+
49166+ spin_lock(&dentry->d_lock);
49167+ read_lock(&gr_inode_lock);
49168+ retval =
49169+ lookup_acl_subj_label(dentry->d_inode->i_ino,
49170+ __get_dev(dentry), role);
49171+ read_unlock(&gr_inode_lock);
49172+ spin_unlock(&dentry->d_lock);
49173+ if (retval != NULL)
49174+ goto out;
49175+
49176+ dentry = mnt->mnt_mountpoint;
49177+ mnt = mnt->mnt_parent;
49178+ continue;
49179+ }
49180+
49181+ spin_lock(&dentry->d_lock);
49182+ read_lock(&gr_inode_lock);
49183+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
49184+ __get_dev(dentry), role);
49185+ read_unlock(&gr_inode_lock);
49186+ parent = dentry->d_parent;
49187+ spin_unlock(&dentry->d_lock);
49188+
49189+ if (retval != NULL)
49190+ goto out;
49191+
49192+ dentry = parent;
49193+ }
49194+
49195+ spin_lock(&dentry->d_lock);
49196+ read_lock(&gr_inode_lock);
49197+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
49198+ __get_dev(dentry), role);
49199+ read_unlock(&gr_inode_lock);
49200+ spin_unlock(&dentry->d_lock);
49201+
49202+ if (unlikely(retval == NULL)) {
49203+ /* real_root is pinned, we don't need to hold a reference */
49204+ read_lock(&gr_inode_lock);
49205+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
49206+ __get_dev(real_root.dentry), role);
49207+ read_unlock(&gr_inode_lock);
49208+ }
49209+out:
49210+ br_read_unlock(vfsmount_lock);
49211+ write_sequnlock(&rename_lock);
49212+
49213+ BUG_ON(retval == NULL);
49214+
49215+ return retval;
49216+}
49217+
49218+static void
49219+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
49220+{
49221+ struct task_struct *task = current;
49222+ const struct cred *cred = current_cred();
49223+
49224+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
49225+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49226+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49227+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
49228+
49229+ return;
49230+}
49231+
49232+static void
49233+gr_log_learn_sysctl(const char *path, const __u32 mode)
49234+{
49235+ struct task_struct *task = current;
49236+ const struct cred *cred = current_cred();
49237+
49238+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
49239+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49240+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49241+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
49242+
49243+ return;
49244+}
49245+
49246+static void
49247+gr_log_learn_id_change(const char type, const unsigned int real,
49248+ const unsigned int effective, const unsigned int fs)
49249+{
49250+ struct task_struct *task = current;
49251+ const struct cred *cred = current_cred();
49252+
49253+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
49254+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49255+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49256+ type, real, effective, fs, &task->signal->saved_ip);
49257+
49258+ return;
49259+}
49260+
49261+__u32
49262+gr_search_file(const struct dentry * dentry, const __u32 mode,
49263+ const struct vfsmount * mnt)
49264+{
49265+ __u32 retval = mode;
49266+ struct acl_subject_label *curracl;
49267+ struct acl_object_label *currobj;
49268+
49269+ if (unlikely(!(gr_status & GR_READY)))
49270+ return (mode & ~GR_AUDITS);
49271+
49272+ curracl = current->acl;
49273+
49274+ currobj = chk_obj_label(dentry, mnt, curracl);
49275+ retval = currobj->mode & mode;
49276+
49277+ /* if we're opening a specified transfer file for writing
49278+ (e.g. /dev/initctl), then transfer our role to init
49279+ */
49280+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
49281+ current->role->roletype & GR_ROLE_PERSIST)) {
49282+ struct task_struct *task = init_pid_ns.child_reaper;
49283+
49284+ if (task->role != current->role) {
49285+ task->acl_sp_role = 0;
49286+ task->acl_role_id = current->acl_role_id;
49287+ task->role = current->role;
49288+ rcu_read_lock();
49289+ read_lock(&grsec_exec_file_lock);
49290+ gr_apply_subject_to_task(task);
49291+ read_unlock(&grsec_exec_file_lock);
49292+ rcu_read_unlock();
49293+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
49294+ }
49295+ }
49296+
49297+ if (unlikely
49298+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
49299+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
49300+ __u32 new_mode = mode;
49301+
49302+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49303+
49304+ retval = new_mode;
49305+
49306+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
49307+ new_mode |= GR_INHERIT;
49308+
49309+ if (!(mode & GR_NOLEARN))
49310+ gr_log_learn(dentry, mnt, new_mode);
49311+ }
49312+
49313+ return retval;
49314+}
49315+
49316+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
49317+ const struct dentry *parent,
49318+ const struct vfsmount *mnt)
49319+{
49320+ struct name_entry *match;
49321+ struct acl_object_label *matchpo;
49322+ struct acl_subject_label *curracl;
49323+ char *path;
49324+
49325+ if (unlikely(!(gr_status & GR_READY)))
49326+ return NULL;
49327+
49328+ preempt_disable();
49329+ path = gr_to_filename_rbac(new_dentry, mnt);
49330+ match = lookup_name_entry_create(path);
49331+
49332+ curracl = current->acl;
49333+
49334+ if (match) {
49335+ read_lock(&gr_inode_lock);
49336+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
49337+ read_unlock(&gr_inode_lock);
49338+
49339+ if (matchpo) {
49340+ preempt_enable();
49341+ return matchpo;
49342+ }
49343+ }
49344+
49345+ // lookup parent
49346+
49347+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
49348+
49349+ preempt_enable();
49350+ return matchpo;
49351+}
49352+
49353+__u32
49354+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
49355+ const struct vfsmount * mnt, const __u32 mode)
49356+{
49357+ struct acl_object_label *matchpo;
49358+ __u32 retval;
49359+
49360+ if (unlikely(!(gr_status & GR_READY)))
49361+ return (mode & ~GR_AUDITS);
49362+
49363+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
49364+
49365+ retval = matchpo->mode & mode;
49366+
49367+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
49368+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
49369+ __u32 new_mode = mode;
49370+
49371+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49372+
49373+ gr_log_learn(new_dentry, mnt, new_mode);
49374+ return new_mode;
49375+ }
49376+
49377+ return retval;
49378+}
49379+
49380+__u32
49381+gr_check_link(const struct dentry * new_dentry,
49382+ const struct dentry * parent_dentry,
49383+ const struct vfsmount * parent_mnt,
49384+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
49385+{
49386+ struct acl_object_label *obj;
49387+ __u32 oldmode, newmode;
49388+ __u32 needmode;
49389+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
49390+ GR_DELETE | GR_INHERIT;
49391+
49392+ if (unlikely(!(gr_status & GR_READY)))
49393+ return (GR_CREATE | GR_LINK);
49394+
49395+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
49396+ oldmode = obj->mode;
49397+
49398+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
49399+ newmode = obj->mode;
49400+
49401+ needmode = newmode & checkmodes;
49402+
49403+ // old name for hardlink must have at least the permissions of the new name
49404+ if ((oldmode & needmode) != needmode)
49405+ goto bad;
49406+
49407+ // if old name had restrictions/auditing, make sure the new name does as well
49408+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
49409+
49410+ // don't allow hardlinking of suid/sgid files without permission
49411+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49412+ needmode |= GR_SETID;
49413+
49414+ if ((newmode & needmode) != needmode)
49415+ goto bad;
49416+
49417+ // enforce minimum permissions
49418+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
49419+ return newmode;
49420+bad:
49421+ needmode = oldmode;
49422+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49423+ needmode |= GR_SETID;
49424+
49425+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
49426+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
49427+ return (GR_CREATE | GR_LINK);
49428+ } else if (newmode & GR_SUPPRESS)
49429+ return GR_SUPPRESS;
49430+ else
49431+ return 0;
49432+}
49433+
49434+int
49435+gr_check_hidden_task(const struct task_struct *task)
49436+{
49437+ if (unlikely(!(gr_status & GR_READY)))
49438+ return 0;
49439+
49440+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
49441+ return 1;
49442+
49443+ return 0;
49444+}
49445+
49446+int
49447+gr_check_protected_task(const struct task_struct *task)
49448+{
49449+ if (unlikely(!(gr_status & GR_READY) || !task))
49450+ return 0;
49451+
49452+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49453+ task->acl != current->acl)
49454+ return 1;
49455+
49456+ return 0;
49457+}
49458+
49459+int
49460+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
49461+{
49462+ struct task_struct *p;
49463+ int ret = 0;
49464+
49465+ if (unlikely(!(gr_status & GR_READY) || !pid))
49466+ return ret;
49467+
49468+ read_lock(&tasklist_lock);
49469+ do_each_pid_task(pid, type, p) {
49470+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49471+ p->acl != current->acl) {
49472+ ret = 1;
49473+ goto out;
49474+ }
49475+ } while_each_pid_task(pid, type, p);
49476+out:
49477+ read_unlock(&tasklist_lock);
49478+
49479+ return ret;
49480+}
49481+
49482+void
49483+gr_copy_label(struct task_struct *tsk)
49484+{
49485+ tsk->signal->used_accept = 0;
49486+ tsk->acl_sp_role = 0;
49487+ tsk->acl_role_id = current->acl_role_id;
49488+ tsk->acl = current->acl;
49489+ tsk->role = current->role;
49490+ tsk->signal->curr_ip = current->signal->curr_ip;
49491+ tsk->signal->saved_ip = current->signal->saved_ip;
49492+ if (current->exec_file)
49493+ get_file(current->exec_file);
49494+ tsk->exec_file = current->exec_file;
49495+ tsk->is_writable = current->is_writable;
49496+ if (unlikely(current->signal->used_accept)) {
49497+ current->signal->curr_ip = 0;
49498+ current->signal->saved_ip = 0;
49499+ }
49500+
49501+ return;
49502+}
49503+
49504+static void
49505+gr_set_proc_res(struct task_struct *task)
49506+{
49507+ struct acl_subject_label *proc;
49508+ unsigned short i;
49509+
49510+ proc = task->acl;
49511+
49512+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
49513+ return;
49514+
49515+ for (i = 0; i < RLIM_NLIMITS; i++) {
49516+ if (!(proc->resmask & (1 << i)))
49517+ continue;
49518+
49519+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
49520+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
49521+ }
49522+
49523+ return;
49524+}
49525+
49526+extern int __gr_process_user_ban(struct user_struct *user);
49527+
49528+int
49529+gr_check_user_change(int real, int effective, int fs)
49530+{
49531+ unsigned int i;
49532+ __u16 num;
49533+ uid_t *uidlist;
49534+ int curuid;
49535+ int realok = 0;
49536+ int effectiveok = 0;
49537+ int fsok = 0;
49538+
49539+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49540+ struct user_struct *user;
49541+
49542+ if (real == -1)
49543+ goto skipit;
49544+
49545+ user = find_user(real);
49546+ if (user == NULL)
49547+ goto skipit;
49548+
49549+ if (__gr_process_user_ban(user)) {
49550+ /* for find_user */
49551+ free_uid(user);
49552+ return 1;
49553+ }
49554+
49555+ /* for find_user */
49556+ free_uid(user);
49557+
49558+skipit:
49559+#endif
49560+
49561+ if (unlikely(!(gr_status & GR_READY)))
49562+ return 0;
49563+
49564+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49565+ gr_log_learn_id_change('u', real, effective, fs);
49566+
49567+ num = current->acl->user_trans_num;
49568+ uidlist = current->acl->user_transitions;
49569+
49570+ if (uidlist == NULL)
49571+ return 0;
49572+
49573+ if (real == -1)
49574+ realok = 1;
49575+ if (effective == -1)
49576+ effectiveok = 1;
49577+ if (fs == -1)
49578+ fsok = 1;
49579+
49580+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
49581+ for (i = 0; i < num; i++) {
49582+ curuid = (int)uidlist[i];
49583+ if (real == curuid)
49584+ realok = 1;
49585+ if (effective == curuid)
49586+ effectiveok = 1;
49587+ if (fs == curuid)
49588+ fsok = 1;
49589+ }
49590+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
49591+ for (i = 0; i < num; i++) {
49592+ curuid = (int)uidlist[i];
49593+ if (real == curuid)
49594+ break;
49595+ if (effective == curuid)
49596+ break;
49597+ if (fs == curuid)
49598+ break;
49599+ }
49600+ /* not in deny list */
49601+ if (i == num) {
49602+ realok = 1;
49603+ effectiveok = 1;
49604+ fsok = 1;
49605+ }
49606+ }
49607+
49608+ if (realok && effectiveok && fsok)
49609+ return 0;
49610+ else {
49611+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49612+ return 1;
49613+ }
49614+}
49615+
49616+int
49617+gr_check_group_change(int real, int effective, int fs)
49618+{
49619+ unsigned int i;
49620+ __u16 num;
49621+ gid_t *gidlist;
49622+ int curgid;
49623+ int realok = 0;
49624+ int effectiveok = 0;
49625+ int fsok = 0;
49626+
49627+ if (unlikely(!(gr_status & GR_READY)))
49628+ return 0;
49629+
49630+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49631+ gr_log_learn_id_change('g', real, effective, fs);
49632+
49633+ num = current->acl->group_trans_num;
49634+ gidlist = current->acl->group_transitions;
49635+
49636+ if (gidlist == NULL)
49637+ return 0;
49638+
49639+ if (real == -1)
49640+ realok = 1;
49641+ if (effective == -1)
49642+ effectiveok = 1;
49643+ if (fs == -1)
49644+ fsok = 1;
49645+
49646+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
49647+ for (i = 0; i < num; i++) {
49648+ curgid = (int)gidlist[i];
49649+ if (real == curgid)
49650+ realok = 1;
49651+ if (effective == curgid)
49652+ effectiveok = 1;
49653+ if (fs == curgid)
49654+ fsok = 1;
49655+ }
49656+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
49657+ for (i = 0; i < num; i++) {
49658+ curgid = (int)gidlist[i];
49659+ if (real == curgid)
49660+ break;
49661+ if (effective == curgid)
49662+ break;
49663+ if (fs == curgid)
49664+ break;
49665+ }
49666+ /* not in deny list */
49667+ if (i == num) {
49668+ realok = 1;
49669+ effectiveok = 1;
49670+ fsok = 1;
49671+ }
49672+ }
49673+
49674+ if (realok && effectiveok && fsok)
49675+ return 0;
49676+ else {
49677+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49678+ return 1;
49679+ }
49680+}
49681+
49682+void
49683+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
49684+{
49685+ struct acl_role_label *role = task->role;
49686+ struct acl_subject_label *subj = NULL;
49687+ struct acl_object_label *obj;
49688+ struct file *filp;
49689+
49690+ if (unlikely(!(gr_status & GR_READY)))
49691+ return;
49692+
49693+ filp = task->exec_file;
49694+
49695+ /* kernel process, we'll give them the kernel role */
49696+ if (unlikely(!filp)) {
49697+ task->role = kernel_role;
49698+ task->acl = kernel_role->root_label;
49699+ return;
49700+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
49701+ role = lookup_acl_role_label(task, uid, gid);
49702+
49703+ /* perform subject lookup in possibly new role
49704+ we can use this result below in the case where role == task->role
49705+ */
49706+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
49707+
49708+ /* if we changed uid/gid, but result in the same role
49709+ and are using inheritance, don't lose the inherited subject
49710+ if current subject is other than what normal lookup
49711+ would result in, we arrived via inheritance, don't
49712+ lose subject
49713+ */
49714+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
49715+ (subj == task->acl)))
49716+ task->acl = subj;
49717+
49718+ task->role = role;
49719+
49720+ task->is_writable = 0;
49721+
49722+ /* ignore additional mmap checks for processes that are writable
49723+ by the default ACL */
49724+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49725+ if (unlikely(obj->mode & GR_WRITE))
49726+ task->is_writable = 1;
49727+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
49728+ if (unlikely(obj->mode & GR_WRITE))
49729+ task->is_writable = 1;
49730+
49731+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49732+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49733+#endif
49734+
49735+ gr_set_proc_res(task);
49736+
49737+ return;
49738+}
49739+
49740+int
49741+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
49742+ const int unsafe_share)
49743+{
49744+ struct task_struct *task = current;
49745+ struct acl_subject_label *newacl;
49746+ struct acl_object_label *obj;
49747+ __u32 retmode;
49748+
49749+ if (unlikely(!(gr_status & GR_READY)))
49750+ return 0;
49751+
49752+ newacl = chk_subj_label(dentry, mnt, task->role);
49753+
49754+ task_lock(task);
49755+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
49756+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
49757+ !(task->role->roletype & GR_ROLE_GOD) &&
49758+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
49759+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
49760+ task_unlock(task);
49761+ if (unsafe_share)
49762+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
49763+ else
49764+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
49765+ return -EACCES;
49766+ }
49767+ task_unlock(task);
49768+
49769+ obj = chk_obj_label(dentry, mnt, task->acl);
49770+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
49771+
49772+ if (!(task->acl->mode & GR_INHERITLEARN) &&
49773+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
49774+ if (obj->nested)
49775+ task->acl = obj->nested;
49776+ else
49777+ task->acl = newacl;
49778+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
49779+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
49780+
49781+ task->is_writable = 0;
49782+
49783+ /* ignore additional mmap checks for processes that are writable
49784+ by the default ACL */
49785+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
49786+ if (unlikely(obj->mode & GR_WRITE))
49787+ task->is_writable = 1;
49788+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
49789+ if (unlikely(obj->mode & GR_WRITE))
49790+ task->is_writable = 1;
49791+
49792+ gr_set_proc_res(task);
49793+
49794+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49795+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49796+#endif
49797+ return 0;
49798+}
49799+
49800+/* always called with valid inodev ptr */
49801+static void
49802+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
49803+{
49804+ struct acl_object_label *matchpo;
49805+ struct acl_subject_label *matchps;
49806+ struct acl_subject_label *subj;
49807+ struct acl_role_label *role;
49808+ unsigned int x;
49809+
49810+ FOR_EACH_ROLE_START(role)
49811+ FOR_EACH_SUBJECT_START(role, subj, x)
49812+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
49813+ matchpo->mode |= GR_DELETED;
49814+ FOR_EACH_SUBJECT_END(subj,x)
49815+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
49816+ if (subj->inode == ino && subj->device == dev)
49817+ subj->mode |= GR_DELETED;
49818+ FOR_EACH_NESTED_SUBJECT_END(subj)
49819+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
49820+ matchps->mode |= GR_DELETED;
49821+ FOR_EACH_ROLE_END(role)
49822+
49823+ inodev->nentry->deleted = 1;
49824+
49825+ return;
49826+}
49827+
49828+void
49829+gr_handle_delete(const ino_t ino, const dev_t dev)
49830+{
49831+ struct inodev_entry *inodev;
49832+
49833+ if (unlikely(!(gr_status & GR_READY)))
49834+ return;
49835+
49836+ write_lock(&gr_inode_lock);
49837+ inodev = lookup_inodev_entry(ino, dev);
49838+ if (inodev != NULL)
49839+ do_handle_delete(inodev, ino, dev);
49840+ write_unlock(&gr_inode_lock);
49841+
49842+ return;
49843+}
49844+
49845+static void
49846+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
49847+ const ino_t newinode, const dev_t newdevice,
49848+ struct acl_subject_label *subj)
49849+{
49850+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
49851+ struct acl_object_label *match;
49852+
49853+ match = subj->obj_hash[index];
49854+
49855+ while (match && (match->inode != oldinode ||
49856+ match->device != olddevice ||
49857+ !(match->mode & GR_DELETED)))
49858+ match = match->next;
49859+
49860+ if (match && (match->inode == oldinode)
49861+ && (match->device == olddevice)
49862+ && (match->mode & GR_DELETED)) {
49863+ if (match->prev == NULL) {
49864+ subj->obj_hash[index] = match->next;
49865+ if (match->next != NULL)
49866+ match->next->prev = NULL;
49867+ } else {
49868+ match->prev->next = match->next;
49869+ if (match->next != NULL)
49870+ match->next->prev = match->prev;
49871+ }
49872+ match->prev = NULL;
49873+ match->next = NULL;
49874+ match->inode = newinode;
49875+ match->device = newdevice;
49876+ match->mode &= ~GR_DELETED;
49877+
49878+ insert_acl_obj_label(match, subj);
49879+ }
49880+
49881+ return;
49882+}
49883+
49884+static void
49885+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
49886+ const ino_t newinode, const dev_t newdevice,
49887+ struct acl_role_label *role)
49888+{
49889+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
49890+ struct acl_subject_label *match;
49891+
49892+ match = role->subj_hash[index];
49893+
49894+ while (match && (match->inode != oldinode ||
49895+ match->device != olddevice ||
49896+ !(match->mode & GR_DELETED)))
49897+ match = match->next;
49898+
49899+ if (match && (match->inode == oldinode)
49900+ && (match->device == olddevice)
49901+ && (match->mode & GR_DELETED)) {
49902+ if (match->prev == NULL) {
49903+ role->subj_hash[index] = match->next;
49904+ if (match->next != NULL)
49905+ match->next->prev = NULL;
49906+ } else {
49907+ match->prev->next = match->next;
49908+ if (match->next != NULL)
49909+ match->next->prev = match->prev;
49910+ }
49911+ match->prev = NULL;
49912+ match->next = NULL;
49913+ match->inode = newinode;
49914+ match->device = newdevice;
49915+ match->mode &= ~GR_DELETED;
49916+
49917+ insert_acl_subj_label(match, role);
49918+ }
49919+
49920+ return;
49921+}
49922+
49923+static void
49924+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
49925+ const ino_t newinode, const dev_t newdevice)
49926+{
49927+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
49928+ struct inodev_entry *match;
49929+
49930+ match = inodev_set.i_hash[index];
49931+
49932+ while (match && (match->nentry->inode != oldinode ||
49933+ match->nentry->device != olddevice || !match->nentry->deleted))
49934+ match = match->next;
49935+
49936+ if (match && (match->nentry->inode == oldinode)
49937+ && (match->nentry->device == olddevice) &&
49938+ match->nentry->deleted) {
49939+ if (match->prev == NULL) {
49940+ inodev_set.i_hash[index] = match->next;
49941+ if (match->next != NULL)
49942+ match->next->prev = NULL;
49943+ } else {
49944+ match->prev->next = match->next;
49945+ if (match->next != NULL)
49946+ match->next->prev = match->prev;
49947+ }
49948+ match->prev = NULL;
49949+ match->next = NULL;
49950+ match->nentry->inode = newinode;
49951+ match->nentry->device = newdevice;
49952+ match->nentry->deleted = 0;
49953+
49954+ insert_inodev_entry(match);
49955+ }
49956+
49957+ return;
49958+}
49959+
49960+static void
49961+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
49962+{
49963+ struct acl_subject_label *subj;
49964+ struct acl_role_label *role;
49965+ unsigned int x;
49966+
49967+ FOR_EACH_ROLE_START(role)
49968+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
49969+
49970+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
49971+ if ((subj->inode == ino) && (subj->device == dev)) {
49972+ subj->inode = ino;
49973+ subj->device = dev;
49974+ }
49975+ FOR_EACH_NESTED_SUBJECT_END(subj)
49976+ FOR_EACH_SUBJECT_START(role, subj, x)
49977+ update_acl_obj_label(matchn->inode, matchn->device,
49978+ ino, dev, subj);
49979+ FOR_EACH_SUBJECT_END(subj,x)
49980+ FOR_EACH_ROLE_END(role)
49981+
49982+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
49983+
49984+ return;
49985+}
49986+
49987+static void
49988+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
49989+ const struct vfsmount *mnt)
49990+{
49991+ ino_t ino = dentry->d_inode->i_ino;
49992+ dev_t dev = __get_dev(dentry);
49993+
49994+ __do_handle_create(matchn, ino, dev);
49995+
49996+ return;
49997+}
49998+
49999+void
50000+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50001+{
50002+ struct name_entry *matchn;
50003+
50004+ if (unlikely(!(gr_status & GR_READY)))
50005+ return;
50006+
50007+ preempt_disable();
50008+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
50009+
50010+ if (unlikely((unsigned long)matchn)) {
50011+ write_lock(&gr_inode_lock);
50012+ do_handle_create(matchn, dentry, mnt);
50013+ write_unlock(&gr_inode_lock);
50014+ }
50015+ preempt_enable();
50016+
50017+ return;
50018+}
50019+
50020+void
50021+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
50022+{
50023+ struct name_entry *matchn;
50024+
50025+ if (unlikely(!(gr_status & GR_READY)))
50026+ return;
50027+
50028+ preempt_disable();
50029+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
50030+
50031+ if (unlikely((unsigned long)matchn)) {
50032+ write_lock(&gr_inode_lock);
50033+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
50034+ write_unlock(&gr_inode_lock);
50035+ }
50036+ preempt_enable();
50037+
50038+ return;
50039+}
50040+
50041+void
50042+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
50043+ struct dentry *old_dentry,
50044+ struct dentry *new_dentry,
50045+ struct vfsmount *mnt, const __u8 replace)
50046+{
50047+ struct name_entry *matchn;
50048+ struct inodev_entry *inodev;
50049+ struct inode *inode = new_dentry->d_inode;
50050+ ino_t old_ino = old_dentry->d_inode->i_ino;
50051+ dev_t old_dev = __get_dev(old_dentry);
50052+
50053+ /* vfs_rename swaps the name and parent link for old_dentry and
50054+ new_dentry
50055+ at this point, old_dentry has the new name, parent link, and inode
50056+ for the renamed file
50057+ if a file is being replaced by a rename, new_dentry has the inode
50058+ and name for the replaced file
50059+ */
50060+
50061+ if (unlikely(!(gr_status & GR_READY)))
50062+ return;
50063+
50064+ preempt_disable();
50065+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
50066+
50067+ /* we wouldn't have to check d_inode if it weren't for
50068+ NFS silly-renaming
50069+ */
50070+
50071+ write_lock(&gr_inode_lock);
50072+ if (unlikely(replace && inode)) {
50073+ ino_t new_ino = inode->i_ino;
50074+ dev_t new_dev = __get_dev(new_dentry);
50075+
50076+ inodev = lookup_inodev_entry(new_ino, new_dev);
50077+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
50078+ do_handle_delete(inodev, new_ino, new_dev);
50079+ }
50080+
50081+ inodev = lookup_inodev_entry(old_ino, old_dev);
50082+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
50083+ do_handle_delete(inodev, old_ino, old_dev);
50084+
50085+ if (unlikely((unsigned long)matchn))
50086+ do_handle_create(matchn, old_dentry, mnt);
50087+
50088+ write_unlock(&gr_inode_lock);
50089+ preempt_enable();
50090+
50091+ return;
50092+}
50093+
50094+static int
50095+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
50096+ unsigned char **sum)
50097+{
50098+ struct acl_role_label *r;
50099+ struct role_allowed_ip *ipp;
50100+ struct role_transition *trans;
50101+ unsigned int i;
50102+ int found = 0;
50103+ u32 curr_ip = current->signal->curr_ip;
50104+
50105+ current->signal->saved_ip = curr_ip;
50106+
50107+ /* check transition table */
50108+
50109+ for (trans = current->role->transitions; trans; trans = trans->next) {
50110+ if (!strcmp(rolename, trans->rolename)) {
50111+ found = 1;
50112+ break;
50113+ }
50114+ }
50115+
50116+ if (!found)
50117+ return 0;
50118+
50119+ /* handle special roles that do not require authentication
50120+ and check ip */
50121+
50122+ FOR_EACH_ROLE_START(r)
50123+ if (!strcmp(rolename, r->rolename) &&
50124+ (r->roletype & GR_ROLE_SPECIAL)) {
50125+ found = 0;
50126+ if (r->allowed_ips != NULL) {
50127+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
50128+ if ((ntohl(curr_ip) & ipp->netmask) ==
50129+ (ntohl(ipp->addr) & ipp->netmask))
50130+ found = 1;
50131+ }
50132+ } else
50133+ found = 2;
50134+ if (!found)
50135+ return 0;
50136+
50137+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
50138+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
50139+ *salt = NULL;
50140+ *sum = NULL;
50141+ return 1;
50142+ }
50143+ }
50144+ FOR_EACH_ROLE_END(r)
50145+
50146+ for (i = 0; i < num_sprole_pws; i++) {
50147+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
50148+ *salt = acl_special_roles[i]->salt;
50149+ *sum = acl_special_roles[i]->sum;
50150+ return 1;
50151+ }
50152+ }
50153+
50154+ return 0;
50155+}
50156+
50157+static void
50158+assign_special_role(char *rolename)
50159+{
50160+ struct acl_object_label *obj;
50161+ struct acl_role_label *r;
50162+ struct acl_role_label *assigned = NULL;
50163+ struct task_struct *tsk;
50164+ struct file *filp;
50165+
50166+ FOR_EACH_ROLE_START(r)
50167+ if (!strcmp(rolename, r->rolename) &&
50168+ (r->roletype & GR_ROLE_SPECIAL)) {
50169+ assigned = r;
50170+ break;
50171+ }
50172+ FOR_EACH_ROLE_END(r)
50173+
50174+ if (!assigned)
50175+ return;
50176+
50177+ read_lock(&tasklist_lock);
50178+ read_lock(&grsec_exec_file_lock);
50179+
50180+ tsk = current->real_parent;
50181+ if (tsk == NULL)
50182+ goto out_unlock;
50183+
50184+ filp = tsk->exec_file;
50185+ if (filp == NULL)
50186+ goto out_unlock;
50187+
50188+ tsk->is_writable = 0;
50189+
50190+ tsk->acl_sp_role = 1;
50191+ tsk->acl_role_id = ++acl_sp_role_value;
50192+ tsk->role = assigned;
50193+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
50194+
50195+ /* ignore additional mmap checks for processes that are writable
50196+ by the default ACL */
50197+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50198+ if (unlikely(obj->mode & GR_WRITE))
50199+ tsk->is_writable = 1;
50200+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
50201+ if (unlikely(obj->mode & GR_WRITE))
50202+ tsk->is_writable = 1;
50203+
50204+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50205+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
50206+#endif
50207+
50208+out_unlock:
50209+ read_unlock(&grsec_exec_file_lock);
50210+ read_unlock(&tasklist_lock);
50211+ return;
50212+}
50213+
50214+int gr_check_secure_terminal(struct task_struct *task)
50215+{
50216+ struct task_struct *p, *p2, *p3;
50217+ struct files_struct *files;
50218+ struct fdtable *fdt;
50219+ struct file *our_file = NULL, *file;
50220+ int i;
50221+
50222+ if (task->signal->tty == NULL)
50223+ return 1;
50224+
50225+ files = get_files_struct(task);
50226+ if (files != NULL) {
50227+ rcu_read_lock();
50228+ fdt = files_fdtable(files);
50229+ for (i=0; i < fdt->max_fds; i++) {
50230+ file = fcheck_files(files, i);
50231+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
50232+ get_file(file);
50233+ our_file = file;
50234+ }
50235+ }
50236+ rcu_read_unlock();
50237+ put_files_struct(files);
50238+ }
50239+
50240+ if (our_file == NULL)
50241+ return 1;
50242+
50243+ read_lock(&tasklist_lock);
50244+ do_each_thread(p2, p) {
50245+ files = get_files_struct(p);
50246+ if (files == NULL ||
50247+ (p->signal && p->signal->tty == task->signal->tty)) {
50248+ if (files != NULL)
50249+ put_files_struct(files);
50250+ continue;
50251+ }
50252+ rcu_read_lock();
50253+ fdt = files_fdtable(files);
50254+ for (i=0; i < fdt->max_fds; i++) {
50255+ file = fcheck_files(files, i);
50256+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
50257+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
50258+ p3 = task;
50259+ while (p3->pid > 0) {
50260+ if (p3 == p)
50261+ break;
50262+ p3 = p3->real_parent;
50263+ }
50264+ if (p3 == p)
50265+ break;
50266+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
50267+ gr_handle_alertkill(p);
50268+ rcu_read_unlock();
50269+ put_files_struct(files);
50270+ read_unlock(&tasklist_lock);
50271+ fput(our_file);
50272+ return 0;
50273+ }
50274+ }
50275+ rcu_read_unlock();
50276+ put_files_struct(files);
50277+ } while_each_thread(p2, p);
50278+ read_unlock(&tasklist_lock);
50279+
50280+ fput(our_file);
50281+ return 1;
50282+}
50283+
50284+ssize_t
50285+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
50286+{
50287+ struct gr_arg_wrapper uwrap;
50288+ unsigned char *sprole_salt = NULL;
50289+ unsigned char *sprole_sum = NULL;
50290+ int error = sizeof (struct gr_arg_wrapper);
50291+ int error2 = 0;
50292+
50293+ mutex_lock(&gr_dev_mutex);
50294+
50295+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
50296+ error = -EPERM;
50297+ goto out;
50298+ }
50299+
50300+ if (count != sizeof (struct gr_arg_wrapper)) {
50301+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
50302+ error = -EINVAL;
50303+ goto out;
50304+ }
50305+
50306+
50307+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
50308+ gr_auth_expires = 0;
50309+ gr_auth_attempts = 0;
50310+ }
50311+
50312+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
50313+ error = -EFAULT;
50314+ goto out;
50315+ }
50316+
50317+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
50318+ error = -EINVAL;
50319+ goto out;
50320+ }
50321+
50322+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
50323+ error = -EFAULT;
50324+ goto out;
50325+ }
50326+
50327+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
50328+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50329+ time_after(gr_auth_expires, get_seconds())) {
50330+ error = -EBUSY;
50331+ goto out;
50332+ }
50333+
50334+ /* if non-root trying to do anything other than use a special role,
50335+ do not attempt authentication, do not count towards authentication
50336+ locking
50337+ */
50338+
50339+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
50340+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
50341+ current_uid()) {
50342+ error = -EPERM;
50343+ goto out;
50344+ }
50345+
50346+ /* ensure pw and special role name are null terminated */
50347+
50348+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
50349+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
50350+
50351+ /* Okay.
50352+ * We have our enough of the argument structure..(we have yet
50353+ * to copy_from_user the tables themselves) . Copy the tables
50354+ * only if we need them, i.e. for loading operations. */
50355+
50356+ switch (gr_usermode->mode) {
50357+ case GR_STATUS:
50358+ if (gr_status & GR_READY) {
50359+ error = 1;
50360+ if (!gr_check_secure_terminal(current))
50361+ error = 3;
50362+ } else
50363+ error = 2;
50364+ goto out;
50365+ case GR_SHUTDOWN:
50366+ if ((gr_status & GR_READY)
50367+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50368+ pax_open_kernel();
50369+ gr_status &= ~GR_READY;
50370+ pax_close_kernel();
50371+
50372+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
50373+ free_variables();
50374+ memset(gr_usermode, 0, sizeof (struct gr_arg));
50375+ memset(gr_system_salt, 0, GR_SALT_LEN);
50376+ memset(gr_system_sum, 0, GR_SHA_LEN);
50377+ } else if (gr_status & GR_READY) {
50378+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
50379+ error = -EPERM;
50380+ } else {
50381+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
50382+ error = -EAGAIN;
50383+ }
50384+ break;
50385+ case GR_ENABLE:
50386+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
50387+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
50388+ else {
50389+ if (gr_status & GR_READY)
50390+ error = -EAGAIN;
50391+ else
50392+ error = error2;
50393+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
50394+ }
50395+ break;
50396+ case GR_RELOAD:
50397+ if (!(gr_status & GR_READY)) {
50398+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
50399+ error = -EAGAIN;
50400+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50401+ preempt_disable();
50402+
50403+ pax_open_kernel();
50404+ gr_status &= ~GR_READY;
50405+ pax_close_kernel();
50406+
50407+ free_variables();
50408+ if (!(error2 = gracl_init(gr_usermode))) {
50409+ preempt_enable();
50410+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
50411+ } else {
50412+ preempt_enable();
50413+ error = error2;
50414+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50415+ }
50416+ } else {
50417+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50418+ error = -EPERM;
50419+ }
50420+ break;
50421+ case GR_SEGVMOD:
50422+ if (unlikely(!(gr_status & GR_READY))) {
50423+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
50424+ error = -EAGAIN;
50425+ break;
50426+ }
50427+
50428+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50429+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
50430+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
50431+ struct acl_subject_label *segvacl;
50432+ segvacl =
50433+ lookup_acl_subj_label(gr_usermode->segv_inode,
50434+ gr_usermode->segv_device,
50435+ current->role);
50436+ if (segvacl) {
50437+ segvacl->crashes = 0;
50438+ segvacl->expires = 0;
50439+ }
50440+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
50441+ gr_remove_uid(gr_usermode->segv_uid);
50442+ }
50443+ } else {
50444+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
50445+ error = -EPERM;
50446+ }
50447+ break;
50448+ case GR_SPROLE:
50449+ case GR_SPROLEPAM:
50450+ if (unlikely(!(gr_status & GR_READY))) {
50451+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
50452+ error = -EAGAIN;
50453+ break;
50454+ }
50455+
50456+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
50457+ current->role->expires = 0;
50458+ current->role->auth_attempts = 0;
50459+ }
50460+
50461+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50462+ time_after(current->role->expires, get_seconds())) {
50463+ error = -EBUSY;
50464+ goto out;
50465+ }
50466+
50467+ if (lookup_special_role_auth
50468+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
50469+ && ((!sprole_salt && !sprole_sum)
50470+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
50471+ char *p = "";
50472+ assign_special_role(gr_usermode->sp_role);
50473+ read_lock(&tasklist_lock);
50474+ if (current->real_parent)
50475+ p = current->real_parent->role->rolename;
50476+ read_unlock(&tasklist_lock);
50477+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
50478+ p, acl_sp_role_value);
50479+ } else {
50480+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
50481+ error = -EPERM;
50482+ if(!(current->role->auth_attempts++))
50483+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50484+
50485+ goto out;
50486+ }
50487+ break;
50488+ case GR_UNSPROLE:
50489+ if (unlikely(!(gr_status & GR_READY))) {
50490+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
50491+ error = -EAGAIN;
50492+ break;
50493+ }
50494+
50495+ if (current->role->roletype & GR_ROLE_SPECIAL) {
50496+ char *p = "";
50497+ int i = 0;
50498+
50499+ read_lock(&tasklist_lock);
50500+ if (current->real_parent) {
50501+ p = current->real_parent->role->rolename;
50502+ i = current->real_parent->acl_role_id;
50503+ }
50504+ read_unlock(&tasklist_lock);
50505+
50506+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
50507+ gr_set_acls(1);
50508+ } else {
50509+ error = -EPERM;
50510+ goto out;
50511+ }
50512+ break;
50513+ default:
50514+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
50515+ error = -EINVAL;
50516+ break;
50517+ }
50518+
50519+ if (error != -EPERM)
50520+ goto out;
50521+
50522+ if(!(gr_auth_attempts++))
50523+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50524+
50525+ out:
50526+ mutex_unlock(&gr_dev_mutex);
50527+ return error;
50528+}
50529+
50530+/* must be called with
50531+ rcu_read_lock();
50532+ read_lock(&tasklist_lock);
50533+ read_lock(&grsec_exec_file_lock);
50534+*/
50535+int gr_apply_subject_to_task(struct task_struct *task)
50536+{
50537+ struct acl_object_label *obj;
50538+ char *tmpname;
50539+ struct acl_subject_label *tmpsubj;
50540+ struct file *filp;
50541+ struct name_entry *nmatch;
50542+
50543+ filp = task->exec_file;
50544+ if (filp == NULL)
50545+ return 0;
50546+
50547+ /* the following is to apply the correct subject
50548+ on binaries running when the RBAC system
50549+ is enabled, when the binaries have been
50550+ replaced or deleted since their execution
50551+ -----
50552+ when the RBAC system starts, the inode/dev
50553+ from exec_file will be one the RBAC system
50554+ is unaware of. It only knows the inode/dev
50555+ of the present file on disk, or the absence
50556+ of it.
50557+ */
50558+ preempt_disable();
50559+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
50560+
50561+ nmatch = lookup_name_entry(tmpname);
50562+ preempt_enable();
50563+ tmpsubj = NULL;
50564+ if (nmatch) {
50565+ if (nmatch->deleted)
50566+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
50567+ else
50568+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
50569+ if (tmpsubj != NULL)
50570+ task->acl = tmpsubj;
50571+ }
50572+ if (tmpsubj == NULL)
50573+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
50574+ task->role);
50575+ if (task->acl) {
50576+ task->is_writable = 0;
50577+ /* ignore additional mmap checks for processes that are writable
50578+ by the default ACL */
50579+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50580+ if (unlikely(obj->mode & GR_WRITE))
50581+ task->is_writable = 1;
50582+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50583+ if (unlikely(obj->mode & GR_WRITE))
50584+ task->is_writable = 1;
50585+
50586+ gr_set_proc_res(task);
50587+
50588+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50589+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50590+#endif
50591+ } else {
50592+ return 1;
50593+ }
50594+
50595+ return 0;
50596+}
50597+
50598+int
50599+gr_set_acls(const int type)
50600+{
50601+ struct task_struct *task, *task2;
50602+ struct acl_role_label *role = current->role;
50603+ __u16 acl_role_id = current->acl_role_id;
50604+ const struct cred *cred;
50605+ int ret;
50606+
50607+ rcu_read_lock();
50608+ read_lock(&tasklist_lock);
50609+ read_lock(&grsec_exec_file_lock);
50610+ do_each_thread(task2, task) {
50611+ /* check to see if we're called from the exit handler,
50612+ if so, only replace ACLs that have inherited the admin
50613+ ACL */
50614+
50615+ if (type && (task->role != role ||
50616+ task->acl_role_id != acl_role_id))
50617+ continue;
50618+
50619+ task->acl_role_id = 0;
50620+ task->acl_sp_role = 0;
50621+
50622+ if (task->exec_file) {
50623+ cred = __task_cred(task);
50624+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
50625+ ret = gr_apply_subject_to_task(task);
50626+ if (ret) {
50627+ read_unlock(&grsec_exec_file_lock);
50628+ read_unlock(&tasklist_lock);
50629+ rcu_read_unlock();
50630+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
50631+ return ret;
50632+ }
50633+ } else {
50634+ // it's a kernel process
50635+ task->role = kernel_role;
50636+ task->acl = kernel_role->root_label;
50637+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
50638+ task->acl->mode &= ~GR_PROCFIND;
50639+#endif
50640+ }
50641+ } while_each_thread(task2, task);
50642+ read_unlock(&grsec_exec_file_lock);
50643+ read_unlock(&tasklist_lock);
50644+ rcu_read_unlock();
50645+
50646+ return 0;
50647+}
50648+
50649+void
50650+gr_learn_resource(const struct task_struct *task,
50651+ const int res, const unsigned long wanted, const int gt)
50652+{
50653+ struct acl_subject_label *acl;
50654+ const struct cred *cred;
50655+
50656+ if (unlikely((gr_status & GR_READY) &&
50657+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
50658+ goto skip_reslog;
50659+
50660+#ifdef CONFIG_GRKERNSEC_RESLOG
50661+ gr_log_resource(task, res, wanted, gt);
50662+#endif
50663+ skip_reslog:
50664+
50665+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
50666+ return;
50667+
50668+ acl = task->acl;
50669+
50670+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
50671+ !(acl->resmask & (1 << (unsigned short) res))))
50672+ return;
50673+
50674+ if (wanted >= acl->res[res].rlim_cur) {
50675+ unsigned long res_add;
50676+
50677+ res_add = wanted;
50678+ switch (res) {
50679+ case RLIMIT_CPU:
50680+ res_add += GR_RLIM_CPU_BUMP;
50681+ break;
50682+ case RLIMIT_FSIZE:
50683+ res_add += GR_RLIM_FSIZE_BUMP;
50684+ break;
50685+ case RLIMIT_DATA:
50686+ res_add += GR_RLIM_DATA_BUMP;
50687+ break;
50688+ case RLIMIT_STACK:
50689+ res_add += GR_RLIM_STACK_BUMP;
50690+ break;
50691+ case RLIMIT_CORE:
50692+ res_add += GR_RLIM_CORE_BUMP;
50693+ break;
50694+ case RLIMIT_RSS:
50695+ res_add += GR_RLIM_RSS_BUMP;
50696+ break;
50697+ case RLIMIT_NPROC:
50698+ res_add += GR_RLIM_NPROC_BUMP;
50699+ break;
50700+ case RLIMIT_NOFILE:
50701+ res_add += GR_RLIM_NOFILE_BUMP;
50702+ break;
50703+ case RLIMIT_MEMLOCK:
50704+ res_add += GR_RLIM_MEMLOCK_BUMP;
50705+ break;
50706+ case RLIMIT_AS:
50707+ res_add += GR_RLIM_AS_BUMP;
50708+ break;
50709+ case RLIMIT_LOCKS:
50710+ res_add += GR_RLIM_LOCKS_BUMP;
50711+ break;
50712+ case RLIMIT_SIGPENDING:
50713+ res_add += GR_RLIM_SIGPENDING_BUMP;
50714+ break;
50715+ case RLIMIT_MSGQUEUE:
50716+ res_add += GR_RLIM_MSGQUEUE_BUMP;
50717+ break;
50718+ case RLIMIT_NICE:
50719+ res_add += GR_RLIM_NICE_BUMP;
50720+ break;
50721+ case RLIMIT_RTPRIO:
50722+ res_add += GR_RLIM_RTPRIO_BUMP;
50723+ break;
50724+ case RLIMIT_RTTIME:
50725+ res_add += GR_RLIM_RTTIME_BUMP;
50726+ break;
50727+ }
50728+
50729+ acl->res[res].rlim_cur = res_add;
50730+
50731+ if (wanted > acl->res[res].rlim_max)
50732+ acl->res[res].rlim_max = res_add;
50733+
50734+ /* only log the subject filename, since resource logging is supported for
50735+ single-subject learning only */
50736+ rcu_read_lock();
50737+ cred = __task_cred(task);
50738+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
50739+ task->role->roletype, cred->uid, cred->gid, acl->filename,
50740+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
50741+ "", (unsigned long) res, &task->signal->saved_ip);
50742+ rcu_read_unlock();
50743+ }
50744+
50745+ return;
50746+}
50747+
50748+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
50749+void
50750+pax_set_initial_flags(struct linux_binprm *bprm)
50751+{
50752+ struct task_struct *task = current;
50753+ struct acl_subject_label *proc;
50754+ unsigned long flags;
50755+
50756+ if (unlikely(!(gr_status & GR_READY)))
50757+ return;
50758+
50759+ flags = pax_get_flags(task);
50760+
50761+ proc = task->acl;
50762+
50763+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
50764+ flags &= ~MF_PAX_PAGEEXEC;
50765+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
50766+ flags &= ~MF_PAX_SEGMEXEC;
50767+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
50768+ flags &= ~MF_PAX_RANDMMAP;
50769+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
50770+ flags &= ~MF_PAX_EMUTRAMP;
50771+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
50772+ flags &= ~MF_PAX_MPROTECT;
50773+
50774+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
50775+ flags |= MF_PAX_PAGEEXEC;
50776+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
50777+ flags |= MF_PAX_SEGMEXEC;
50778+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
50779+ flags |= MF_PAX_RANDMMAP;
50780+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
50781+ flags |= MF_PAX_EMUTRAMP;
50782+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
50783+ flags |= MF_PAX_MPROTECT;
50784+
50785+ pax_set_flags(task, flags);
50786+
50787+ return;
50788+}
50789+#endif
50790+
50791+#ifdef CONFIG_SYSCTL
50792+/* Eric Biederman likes breaking userland ABI and every inode-based security
50793+ system to save 35kb of memory */
50794+
50795+/* we modify the passed in filename, but adjust it back before returning */
50796+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
50797+{
50798+ struct name_entry *nmatch;
50799+ char *p, *lastp = NULL;
50800+ struct acl_object_label *obj = NULL, *tmp;
50801+ struct acl_subject_label *tmpsubj;
50802+ char c = '\0';
50803+
50804+ read_lock(&gr_inode_lock);
50805+
50806+ p = name + len - 1;
50807+ do {
50808+ nmatch = lookup_name_entry(name);
50809+ if (lastp != NULL)
50810+ *lastp = c;
50811+
50812+ if (nmatch == NULL)
50813+ goto next_component;
50814+ tmpsubj = current->acl;
50815+ do {
50816+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
50817+ if (obj != NULL) {
50818+ tmp = obj->globbed;
50819+ while (tmp) {
50820+ if (!glob_match(tmp->filename, name)) {
50821+ obj = tmp;
50822+ goto found_obj;
50823+ }
50824+ tmp = tmp->next;
50825+ }
50826+ goto found_obj;
50827+ }
50828+ } while ((tmpsubj = tmpsubj->parent_subject));
50829+next_component:
50830+ /* end case */
50831+ if (p == name)
50832+ break;
50833+
50834+ while (*p != '/')
50835+ p--;
50836+ if (p == name)
50837+ lastp = p + 1;
50838+ else {
50839+ lastp = p;
50840+ p--;
50841+ }
50842+ c = *lastp;
50843+ *lastp = '\0';
50844+ } while (1);
50845+found_obj:
50846+ read_unlock(&gr_inode_lock);
50847+ /* obj returned will always be non-null */
50848+ return obj;
50849+}
50850+
50851+/* returns 0 when allowing, non-zero on error
50852+ op of 0 is used for readdir, so we don't log the names of hidden files
50853+*/
50854+__u32
50855+gr_handle_sysctl(const struct ctl_table *table, const int op)
50856+{
50857+ struct ctl_table *tmp;
50858+ const char *proc_sys = "/proc/sys";
50859+ char *path;
50860+ struct acl_object_label *obj;
50861+ unsigned short len = 0, pos = 0, depth = 0, i;
50862+ __u32 err = 0;
50863+ __u32 mode = 0;
50864+
50865+ if (unlikely(!(gr_status & GR_READY)))
50866+ return 0;
50867+
50868+ /* for now, ignore operations on non-sysctl entries if it's not a
50869+ readdir*/
50870+ if (table->child != NULL && op != 0)
50871+ return 0;
50872+
50873+ mode |= GR_FIND;
50874+ /* it's only a read if it's an entry, read on dirs is for readdir */
50875+ if (op & MAY_READ)
50876+ mode |= GR_READ;
50877+ if (op & MAY_WRITE)
50878+ mode |= GR_WRITE;
50879+
50880+ preempt_disable();
50881+
50882+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50883+
50884+ /* it's only a read/write if it's an actual entry, not a dir
50885+ (which are opened for readdir)
50886+ */
50887+
50888+ /* convert the requested sysctl entry into a pathname */
50889+
50890+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50891+ len += strlen(tmp->procname);
50892+ len++;
50893+ depth++;
50894+ }
50895+
50896+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
50897+ /* deny */
50898+ goto out;
50899+ }
50900+
50901+ memset(path, 0, PAGE_SIZE);
50902+
50903+ memcpy(path, proc_sys, strlen(proc_sys));
50904+
50905+ pos += strlen(proc_sys);
50906+
50907+ for (; depth > 0; depth--) {
50908+ path[pos] = '/';
50909+ pos++;
50910+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50911+ if (depth == i) {
50912+ memcpy(path + pos, tmp->procname,
50913+ strlen(tmp->procname));
50914+ pos += strlen(tmp->procname);
50915+ }
50916+ i++;
50917+ }
50918+ }
50919+
50920+ obj = gr_lookup_by_name(path, pos);
50921+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
50922+
50923+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
50924+ ((err & mode) != mode))) {
50925+ __u32 new_mode = mode;
50926+
50927+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50928+
50929+ err = 0;
50930+ gr_log_learn_sysctl(path, new_mode);
50931+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
50932+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
50933+ err = -ENOENT;
50934+ } else if (!(err & GR_FIND)) {
50935+ err = -ENOENT;
50936+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
50937+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
50938+ path, (mode & GR_READ) ? " reading" : "",
50939+ (mode & GR_WRITE) ? " writing" : "");
50940+ err = -EACCES;
50941+ } else if ((err & mode) != mode) {
50942+ err = -EACCES;
50943+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
50944+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
50945+ path, (mode & GR_READ) ? " reading" : "",
50946+ (mode & GR_WRITE) ? " writing" : "");
50947+ err = 0;
50948+ } else
50949+ err = 0;
50950+
50951+ out:
50952+ preempt_enable();
50953+
50954+ return err;
50955+}
50956+#endif
50957+
50958+int
50959+gr_handle_proc_ptrace(struct task_struct *task)
50960+{
50961+ struct file *filp;
50962+ struct task_struct *tmp = task;
50963+ struct task_struct *curtemp = current;
50964+ __u32 retmode;
50965+
50966+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50967+ if (unlikely(!(gr_status & GR_READY)))
50968+ return 0;
50969+#endif
50970+
50971+ read_lock(&tasklist_lock);
50972+ read_lock(&grsec_exec_file_lock);
50973+ filp = task->exec_file;
50974+
50975+ while (tmp->pid > 0) {
50976+ if (tmp == curtemp)
50977+ break;
50978+ tmp = tmp->real_parent;
50979+ }
50980+
50981+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50982+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
50983+ read_unlock(&grsec_exec_file_lock);
50984+ read_unlock(&tasklist_lock);
50985+ return 1;
50986+ }
50987+
50988+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50989+ if (!(gr_status & GR_READY)) {
50990+ read_unlock(&grsec_exec_file_lock);
50991+ read_unlock(&tasklist_lock);
50992+ return 0;
50993+ }
50994+#endif
50995+
50996+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
50997+ read_unlock(&grsec_exec_file_lock);
50998+ read_unlock(&tasklist_lock);
50999+
51000+ if (retmode & GR_NOPTRACE)
51001+ return 1;
51002+
51003+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
51004+ && (current->acl != task->acl || (current->acl != current->role->root_label
51005+ && current->pid != task->pid)))
51006+ return 1;
51007+
51008+ return 0;
51009+}
51010+
51011+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
51012+{
51013+ if (unlikely(!(gr_status & GR_READY)))
51014+ return;
51015+
51016+ if (!(current->role->roletype & GR_ROLE_GOD))
51017+ return;
51018+
51019+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
51020+ p->role->rolename, gr_task_roletype_to_char(p),
51021+ p->acl->filename);
51022+}
51023+
51024+int
51025+gr_handle_ptrace(struct task_struct *task, const long request)
51026+{
51027+ struct task_struct *tmp = task;
51028+ struct task_struct *curtemp = current;
51029+ __u32 retmode;
51030+
51031+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
51032+ if (unlikely(!(gr_status & GR_READY)))
51033+ return 0;
51034+#endif
51035+
51036+ read_lock(&tasklist_lock);
51037+ while (tmp->pid > 0) {
51038+ if (tmp == curtemp)
51039+ break;
51040+ tmp = tmp->real_parent;
51041+ }
51042+
51043+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
51044+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
51045+ read_unlock(&tasklist_lock);
51046+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51047+ return 1;
51048+ }
51049+ read_unlock(&tasklist_lock);
51050+
51051+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51052+ if (!(gr_status & GR_READY))
51053+ return 0;
51054+#endif
51055+
51056+ read_lock(&grsec_exec_file_lock);
51057+ if (unlikely(!task->exec_file)) {
51058+ read_unlock(&grsec_exec_file_lock);
51059+ return 0;
51060+ }
51061+
51062+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
51063+ read_unlock(&grsec_exec_file_lock);
51064+
51065+ if (retmode & GR_NOPTRACE) {
51066+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51067+ return 1;
51068+ }
51069+
51070+ if (retmode & GR_PTRACERD) {
51071+ switch (request) {
51072+ case PTRACE_SEIZE:
51073+ case PTRACE_POKETEXT:
51074+ case PTRACE_POKEDATA:
51075+ case PTRACE_POKEUSR:
51076+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
51077+ case PTRACE_SETREGS:
51078+ case PTRACE_SETFPREGS:
51079+#endif
51080+#ifdef CONFIG_X86
51081+ case PTRACE_SETFPXREGS:
51082+#endif
51083+#ifdef CONFIG_ALTIVEC
51084+ case PTRACE_SETVRREGS:
51085+#endif
51086+ return 1;
51087+ default:
51088+ return 0;
51089+ }
51090+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
51091+ !(current->role->roletype & GR_ROLE_GOD) &&
51092+ (current->acl != task->acl)) {
51093+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51094+ return 1;
51095+ }
51096+
51097+ return 0;
51098+}
51099+
51100+static int is_writable_mmap(const struct file *filp)
51101+{
51102+ struct task_struct *task = current;
51103+ struct acl_object_label *obj, *obj2;
51104+
51105+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
51106+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
51107+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51108+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
51109+ task->role->root_label);
51110+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
51111+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
51112+ return 1;
51113+ }
51114+ }
51115+ return 0;
51116+}
51117+
51118+int
51119+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
51120+{
51121+ __u32 mode;
51122+
51123+ if (unlikely(!file || !(prot & PROT_EXEC)))
51124+ return 1;
51125+
51126+ if (is_writable_mmap(file))
51127+ return 0;
51128+
51129+ mode =
51130+ gr_search_file(file->f_path.dentry,
51131+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
51132+ file->f_path.mnt);
51133+
51134+ if (!gr_tpe_allow(file))
51135+ return 0;
51136+
51137+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
51138+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51139+ return 0;
51140+ } else if (unlikely(!(mode & GR_EXEC))) {
51141+ return 0;
51142+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
51143+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51144+ return 1;
51145+ }
51146+
51147+ return 1;
51148+}
51149+
51150+int
51151+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
51152+{
51153+ __u32 mode;
51154+
51155+ if (unlikely(!file || !(prot & PROT_EXEC)))
51156+ return 1;
51157+
51158+ if (is_writable_mmap(file))
51159+ return 0;
51160+
51161+ mode =
51162+ gr_search_file(file->f_path.dentry,
51163+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
51164+ file->f_path.mnt);
51165+
51166+ if (!gr_tpe_allow(file))
51167+ return 0;
51168+
51169+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
51170+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51171+ return 0;
51172+ } else if (unlikely(!(mode & GR_EXEC))) {
51173+ return 0;
51174+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
51175+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51176+ return 1;
51177+ }
51178+
51179+ return 1;
51180+}
51181+
51182+void
51183+gr_acl_handle_psacct(struct task_struct *task, const long code)
51184+{
51185+ unsigned long runtime;
51186+ unsigned long cputime;
51187+ unsigned int wday, cday;
51188+ __u8 whr, chr;
51189+ __u8 wmin, cmin;
51190+ __u8 wsec, csec;
51191+ struct timespec timeval;
51192+
51193+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
51194+ !(task->acl->mode & GR_PROCACCT)))
51195+ return;
51196+
51197+ do_posix_clock_monotonic_gettime(&timeval);
51198+ runtime = timeval.tv_sec - task->start_time.tv_sec;
51199+ wday = runtime / (3600 * 24);
51200+ runtime -= wday * (3600 * 24);
51201+ whr = runtime / 3600;
51202+ runtime -= whr * 3600;
51203+ wmin = runtime / 60;
51204+ runtime -= wmin * 60;
51205+ wsec = runtime;
51206+
51207+ cputime = (task->utime + task->stime) / HZ;
51208+ cday = cputime / (3600 * 24);
51209+ cputime -= cday * (3600 * 24);
51210+ chr = cputime / 3600;
51211+ cputime -= chr * 3600;
51212+ cmin = cputime / 60;
51213+ cputime -= cmin * 60;
51214+ csec = cputime;
51215+
51216+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
51217+
51218+ return;
51219+}
51220+
51221+void gr_set_kernel_label(struct task_struct *task)
51222+{
51223+ if (gr_status & GR_READY) {
51224+ task->role = kernel_role;
51225+ task->acl = kernel_role->root_label;
51226+ }
51227+ return;
51228+}
51229+
51230+#ifdef CONFIG_TASKSTATS
51231+int gr_is_taskstats_denied(int pid)
51232+{
51233+ struct task_struct *task;
51234+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51235+ const struct cred *cred;
51236+#endif
51237+ int ret = 0;
51238+
51239+ /* restrict taskstats viewing to un-chrooted root users
51240+ who have the 'view' subject flag if the RBAC system is enabled
51241+ */
51242+
51243+ rcu_read_lock();
51244+ read_lock(&tasklist_lock);
51245+ task = find_task_by_vpid(pid);
51246+ if (task) {
51247+#ifdef CONFIG_GRKERNSEC_CHROOT
51248+ if (proc_is_chrooted(task))
51249+ ret = -EACCES;
51250+#endif
51251+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51252+ cred = __task_cred(task);
51253+#ifdef CONFIG_GRKERNSEC_PROC_USER
51254+ if (cred->uid != 0)
51255+ ret = -EACCES;
51256+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51257+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
51258+ ret = -EACCES;
51259+#endif
51260+#endif
51261+ if (gr_status & GR_READY) {
51262+ if (!(task->acl->mode & GR_VIEW))
51263+ ret = -EACCES;
51264+ }
51265+ } else
51266+ ret = -ENOENT;
51267+
51268+ read_unlock(&tasklist_lock);
51269+ rcu_read_unlock();
51270+
51271+ return ret;
51272+}
51273+#endif
51274+
51275+/* AUXV entries are filled via a descendant of search_binary_handler
51276+ after we've already applied the subject for the target
51277+*/
51278+int gr_acl_enable_at_secure(void)
51279+{
51280+ if (unlikely(!(gr_status & GR_READY)))
51281+ return 0;
51282+
51283+ if (current->acl->mode & GR_ATSECURE)
51284+ return 1;
51285+
51286+ return 0;
51287+}
51288+
51289+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
51290+{
51291+ struct task_struct *task = current;
51292+ struct dentry *dentry = file->f_path.dentry;
51293+ struct vfsmount *mnt = file->f_path.mnt;
51294+ struct acl_object_label *obj, *tmp;
51295+ struct acl_subject_label *subj;
51296+ unsigned int bufsize;
51297+ int is_not_root;
51298+ char *path;
51299+ dev_t dev = __get_dev(dentry);
51300+
51301+ if (unlikely(!(gr_status & GR_READY)))
51302+ return 1;
51303+
51304+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
51305+ return 1;
51306+
51307+ /* ignore Eric Biederman */
51308+ if (IS_PRIVATE(dentry->d_inode))
51309+ return 1;
51310+
51311+ subj = task->acl;
51312+ do {
51313+ obj = lookup_acl_obj_label(ino, dev, subj);
51314+ if (obj != NULL)
51315+ return (obj->mode & GR_FIND) ? 1 : 0;
51316+ } while ((subj = subj->parent_subject));
51317+
51318+ /* this is purely an optimization since we're looking for an object
51319+ for the directory we're doing a readdir on
51320+ if it's possible for any globbed object to match the entry we're
51321+ filling into the directory, then the object we find here will be
51322+ an anchor point with attached globbed objects
51323+ */
51324+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
51325+ if (obj->globbed == NULL)
51326+ return (obj->mode & GR_FIND) ? 1 : 0;
51327+
51328+ is_not_root = ((obj->filename[0] == '/') &&
51329+ (obj->filename[1] == '\0')) ? 0 : 1;
51330+ bufsize = PAGE_SIZE - namelen - is_not_root;
51331+
51332+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
51333+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
51334+ return 1;
51335+
51336+ preempt_disable();
51337+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
51338+ bufsize);
51339+
51340+ bufsize = strlen(path);
51341+
51342+ /* if base is "/", don't append an additional slash */
51343+ if (is_not_root)
51344+ *(path + bufsize) = '/';
51345+ memcpy(path + bufsize + is_not_root, name, namelen);
51346+ *(path + bufsize + namelen + is_not_root) = '\0';
51347+
51348+ tmp = obj->globbed;
51349+ while (tmp) {
51350+ if (!glob_match(tmp->filename, path)) {
51351+ preempt_enable();
51352+ return (tmp->mode & GR_FIND) ? 1 : 0;
51353+ }
51354+ tmp = tmp->next;
51355+ }
51356+ preempt_enable();
51357+ return (obj->mode & GR_FIND) ? 1 : 0;
51358+}
51359+
51360+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
51361+EXPORT_SYMBOL(gr_acl_is_enabled);
51362+#endif
51363+EXPORT_SYMBOL(gr_learn_resource);
51364+EXPORT_SYMBOL(gr_set_kernel_label);
51365+#ifdef CONFIG_SECURITY
51366+EXPORT_SYMBOL(gr_check_user_change);
51367+EXPORT_SYMBOL(gr_check_group_change);
51368+#endif
51369+
51370diff -urNp linux-3.1.1/grsecurity/gracl_cap.c linux-3.1.1/grsecurity/gracl_cap.c
51371--- linux-3.1.1/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
51372+++ linux-3.1.1/grsecurity/gracl_cap.c 2011-11-16 18:40:31.000000000 -0500
51373@@ -0,0 +1,101 @@
51374+#include <linux/kernel.h>
51375+#include <linux/module.h>
51376+#include <linux/sched.h>
51377+#include <linux/gracl.h>
51378+#include <linux/grsecurity.h>
51379+#include <linux/grinternal.h>
51380+
51381+extern const char *captab_log[];
51382+extern int captab_log_entries;
51383+
51384+int
51385+gr_acl_is_capable(const int cap)
51386+{
51387+ struct task_struct *task = current;
51388+ const struct cred *cred = current_cred();
51389+ struct acl_subject_label *curracl;
51390+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51391+ kernel_cap_t cap_audit = __cap_empty_set;
51392+
51393+ if (!gr_acl_is_enabled())
51394+ return 1;
51395+
51396+ curracl = task->acl;
51397+
51398+ cap_drop = curracl->cap_lower;
51399+ cap_mask = curracl->cap_mask;
51400+ cap_audit = curracl->cap_invert_audit;
51401+
51402+ while ((curracl = curracl->parent_subject)) {
51403+ /* if the cap isn't specified in the current computed mask but is specified in the
51404+ current level subject, and is lowered in the current level subject, then add
51405+ it to the set of dropped capabilities
51406+ otherwise, add the current level subject's mask to the current computed mask
51407+ */
51408+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51409+ cap_raise(cap_mask, cap);
51410+ if (cap_raised(curracl->cap_lower, cap))
51411+ cap_raise(cap_drop, cap);
51412+ if (cap_raised(curracl->cap_invert_audit, cap))
51413+ cap_raise(cap_audit, cap);
51414+ }
51415+ }
51416+
51417+ if (!cap_raised(cap_drop, cap)) {
51418+ if (cap_raised(cap_audit, cap))
51419+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
51420+ return 1;
51421+ }
51422+
51423+ curracl = task->acl;
51424+
51425+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
51426+ && cap_raised(cred->cap_effective, cap)) {
51427+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51428+ task->role->roletype, cred->uid,
51429+ cred->gid, task->exec_file ?
51430+ gr_to_filename(task->exec_file->f_path.dentry,
51431+ task->exec_file->f_path.mnt) : curracl->filename,
51432+ curracl->filename, 0UL,
51433+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
51434+ return 1;
51435+ }
51436+
51437+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
51438+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
51439+ return 0;
51440+}
51441+
51442+int
51443+gr_acl_is_capable_nolog(const int cap)
51444+{
51445+ struct acl_subject_label *curracl;
51446+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51447+
51448+ if (!gr_acl_is_enabled())
51449+ return 1;
51450+
51451+ curracl = current->acl;
51452+
51453+ cap_drop = curracl->cap_lower;
51454+ cap_mask = curracl->cap_mask;
51455+
51456+ while ((curracl = curracl->parent_subject)) {
51457+ /* if the cap isn't specified in the current computed mask but is specified in the
51458+ current level subject, and is lowered in the current level subject, then add
51459+ it to the set of dropped capabilities
51460+ otherwise, add the current level subject's mask to the current computed mask
51461+ */
51462+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51463+ cap_raise(cap_mask, cap);
51464+ if (cap_raised(curracl->cap_lower, cap))
51465+ cap_raise(cap_drop, cap);
51466+ }
51467+ }
51468+
51469+ if (!cap_raised(cap_drop, cap))
51470+ return 1;
51471+
51472+ return 0;
51473+}
51474+
51475diff -urNp linux-3.1.1/grsecurity/gracl_fs.c linux-3.1.1/grsecurity/gracl_fs.c
51476--- linux-3.1.1/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
51477+++ linux-3.1.1/grsecurity/gracl_fs.c 2011-11-17 00:25:32.000000000 -0500
51478@@ -0,0 +1,433 @@
51479+#include <linux/kernel.h>
51480+#include <linux/sched.h>
51481+#include <linux/types.h>
51482+#include <linux/fs.h>
51483+#include <linux/file.h>
51484+#include <linux/stat.h>
51485+#include <linux/grsecurity.h>
51486+#include <linux/grinternal.h>
51487+#include <linux/gracl.h>
51488+
51489+__u32
51490+gr_acl_handle_hidden_file(const struct dentry * dentry,
51491+ const struct vfsmount * mnt)
51492+{
51493+ __u32 mode;
51494+
51495+ if (unlikely(!dentry->d_inode))
51496+ return GR_FIND;
51497+
51498+ mode =
51499+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
51500+
51501+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
51502+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51503+ return mode;
51504+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
51505+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51506+ return 0;
51507+ } else if (unlikely(!(mode & GR_FIND)))
51508+ return 0;
51509+
51510+ return GR_FIND;
51511+}
51512+
51513+__u32
51514+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
51515+ int acc_mode)
51516+{
51517+ __u32 reqmode = GR_FIND;
51518+ __u32 mode;
51519+
51520+ if (unlikely(!dentry->d_inode))
51521+ return reqmode;
51522+
51523+ if (acc_mode & MAY_APPEND)
51524+ reqmode |= GR_APPEND;
51525+ else if (acc_mode & MAY_WRITE)
51526+ reqmode |= GR_WRITE;
51527+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
51528+ reqmode |= GR_READ;
51529+
51530+ mode =
51531+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51532+ mnt);
51533+
51534+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51535+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51536+ reqmode & GR_READ ? " reading" : "",
51537+ reqmode & GR_WRITE ? " writing" : reqmode &
51538+ GR_APPEND ? " appending" : "");
51539+ return reqmode;
51540+ } else
51541+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51542+ {
51543+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51544+ reqmode & GR_READ ? " reading" : "",
51545+ reqmode & GR_WRITE ? " writing" : reqmode &
51546+ GR_APPEND ? " appending" : "");
51547+ return 0;
51548+ } else if (unlikely((mode & reqmode) != reqmode))
51549+ return 0;
51550+
51551+ return reqmode;
51552+}
51553+
51554+__u32
51555+gr_acl_handle_creat(const struct dentry * dentry,
51556+ const struct dentry * p_dentry,
51557+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
51558+ const int imode)
51559+{
51560+ __u32 reqmode = GR_WRITE | GR_CREATE;
51561+ __u32 mode;
51562+
51563+ if (acc_mode & MAY_APPEND)
51564+ reqmode |= GR_APPEND;
51565+ // if a directory was required or the directory already exists, then
51566+ // don't count this open as a read
51567+ if ((acc_mode & MAY_READ) &&
51568+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
51569+ reqmode |= GR_READ;
51570+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
51571+ reqmode |= GR_SETID;
51572+
51573+ mode =
51574+ gr_check_create(dentry, p_dentry, p_mnt,
51575+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51576+
51577+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51578+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51579+ reqmode & GR_READ ? " reading" : "",
51580+ reqmode & GR_WRITE ? " writing" : reqmode &
51581+ GR_APPEND ? " appending" : "");
51582+ return reqmode;
51583+ } else
51584+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51585+ {
51586+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51587+ reqmode & GR_READ ? " reading" : "",
51588+ reqmode & GR_WRITE ? " writing" : reqmode &
51589+ GR_APPEND ? " appending" : "");
51590+ return 0;
51591+ } else if (unlikely((mode & reqmode) != reqmode))
51592+ return 0;
51593+
51594+ return reqmode;
51595+}
51596+
51597+__u32
51598+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
51599+ const int fmode)
51600+{
51601+ __u32 mode, reqmode = GR_FIND;
51602+
51603+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
51604+ reqmode |= GR_EXEC;
51605+ if (fmode & S_IWOTH)
51606+ reqmode |= GR_WRITE;
51607+ if (fmode & S_IROTH)
51608+ reqmode |= GR_READ;
51609+
51610+ mode =
51611+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51612+ mnt);
51613+
51614+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51615+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51616+ reqmode & GR_READ ? " reading" : "",
51617+ reqmode & GR_WRITE ? " writing" : "",
51618+ reqmode & GR_EXEC ? " executing" : "");
51619+ return reqmode;
51620+ } else
51621+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51622+ {
51623+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51624+ reqmode & GR_READ ? " reading" : "",
51625+ reqmode & GR_WRITE ? " writing" : "",
51626+ reqmode & GR_EXEC ? " executing" : "");
51627+ return 0;
51628+ } else if (unlikely((mode & reqmode) != reqmode))
51629+ return 0;
51630+
51631+ return reqmode;
51632+}
51633+
51634+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
51635+{
51636+ __u32 mode;
51637+
51638+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
51639+
51640+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51641+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
51642+ return mode;
51643+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51644+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
51645+ return 0;
51646+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
51647+ return 0;
51648+
51649+ return (reqmode);
51650+}
51651+
51652+__u32
51653+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
51654+{
51655+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
51656+}
51657+
51658+__u32
51659+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
51660+{
51661+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
51662+}
51663+
51664+__u32
51665+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
51666+{
51667+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
51668+}
51669+
51670+__u32
51671+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
51672+{
51673+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
51674+}
51675+
51676+__u32
51677+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
51678+ mode_t mode)
51679+{
51680+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
51681+ return 1;
51682+
51683+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51684+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51685+ GR_FCHMOD_ACL_MSG);
51686+ } else {
51687+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
51688+ }
51689+}
51690+
51691+__u32
51692+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
51693+ mode_t mode)
51694+{
51695+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51696+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51697+ GR_CHMOD_ACL_MSG);
51698+ } else {
51699+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
51700+ }
51701+}
51702+
51703+__u32
51704+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
51705+{
51706+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
51707+}
51708+
51709+__u32
51710+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
51711+{
51712+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
51713+}
51714+
51715+__u32
51716+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
51717+{
51718+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
51719+}
51720+
51721+__u32
51722+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
51723+{
51724+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
51725+ GR_UNIXCONNECT_ACL_MSG);
51726+}
51727+
51728+/* hardlinks require at minimum create and link permission,
51729+ any additional privilege required is based on the
51730+ privilege of the file being linked to
51731+*/
51732+__u32
51733+gr_acl_handle_link(const struct dentry * new_dentry,
51734+ const struct dentry * parent_dentry,
51735+ const struct vfsmount * parent_mnt,
51736+ const struct dentry * old_dentry,
51737+ const struct vfsmount * old_mnt, const char *to)
51738+{
51739+ __u32 mode;
51740+ __u32 needmode = GR_CREATE | GR_LINK;
51741+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
51742+
51743+ mode =
51744+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
51745+ old_mnt);
51746+
51747+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
51748+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51749+ return mode;
51750+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51751+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51752+ return 0;
51753+ } else if (unlikely((mode & needmode) != needmode))
51754+ return 0;
51755+
51756+ return 1;
51757+}
51758+
51759+__u32
51760+gr_acl_handle_symlink(const struct dentry * new_dentry,
51761+ const struct dentry * parent_dentry,
51762+ const struct vfsmount * parent_mnt, const char *from)
51763+{
51764+ __u32 needmode = GR_WRITE | GR_CREATE;
51765+ __u32 mode;
51766+
51767+ mode =
51768+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
51769+ GR_CREATE | GR_AUDIT_CREATE |
51770+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
51771+
51772+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
51773+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51774+ return mode;
51775+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51776+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51777+ return 0;
51778+ } else if (unlikely((mode & needmode) != needmode))
51779+ return 0;
51780+
51781+ return (GR_WRITE | GR_CREATE);
51782+}
51783+
51784+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
51785+{
51786+ __u32 mode;
51787+
51788+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51789+
51790+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51791+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
51792+ return mode;
51793+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51794+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
51795+ return 0;
51796+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
51797+ return 0;
51798+
51799+ return (reqmode);
51800+}
51801+
51802+__u32
51803+gr_acl_handle_mknod(const struct dentry * new_dentry,
51804+ const struct dentry * parent_dentry,
51805+ const struct vfsmount * parent_mnt,
51806+ const int mode)
51807+{
51808+ __u32 reqmode = GR_WRITE | GR_CREATE;
51809+ if (unlikely(mode & (S_ISUID | S_ISGID)))
51810+ reqmode |= GR_SETID;
51811+
51812+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51813+ reqmode, GR_MKNOD_ACL_MSG);
51814+}
51815+
51816+__u32
51817+gr_acl_handle_mkdir(const struct dentry *new_dentry,
51818+ const struct dentry *parent_dentry,
51819+ const struct vfsmount *parent_mnt)
51820+{
51821+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51822+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
51823+}
51824+
51825+#define RENAME_CHECK_SUCCESS(old, new) \
51826+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
51827+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
51828+
51829+int
51830+gr_acl_handle_rename(struct dentry *new_dentry,
51831+ struct dentry *parent_dentry,
51832+ const struct vfsmount *parent_mnt,
51833+ struct dentry *old_dentry,
51834+ struct inode *old_parent_inode,
51835+ struct vfsmount *old_mnt, const char *newname)
51836+{
51837+ __u32 comp1, comp2;
51838+ int error = 0;
51839+
51840+ if (unlikely(!gr_acl_is_enabled()))
51841+ return 0;
51842+
51843+ if (!new_dentry->d_inode) {
51844+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
51845+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
51846+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
51847+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
51848+ GR_DELETE | GR_AUDIT_DELETE |
51849+ GR_AUDIT_READ | GR_AUDIT_WRITE |
51850+ GR_SUPPRESS, old_mnt);
51851+ } else {
51852+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
51853+ GR_CREATE | GR_DELETE |
51854+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
51855+ GR_AUDIT_READ | GR_AUDIT_WRITE |
51856+ GR_SUPPRESS, parent_mnt);
51857+ comp2 =
51858+ gr_search_file(old_dentry,
51859+ GR_READ | GR_WRITE | GR_AUDIT_READ |
51860+ GR_DELETE | GR_AUDIT_DELETE |
51861+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
51862+ }
51863+
51864+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
51865+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
51866+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51867+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
51868+ && !(comp2 & GR_SUPPRESS)) {
51869+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51870+ error = -EACCES;
51871+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
51872+ error = -EACCES;
51873+
51874+ return error;
51875+}
51876+
51877+void
51878+gr_acl_handle_exit(void)
51879+{
51880+ u16 id;
51881+ char *rolename;
51882+ struct file *exec_file;
51883+
51884+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
51885+ !(current->role->roletype & GR_ROLE_PERSIST))) {
51886+ id = current->acl_role_id;
51887+ rolename = current->role->rolename;
51888+ gr_set_acls(1);
51889+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
51890+ }
51891+
51892+ write_lock(&grsec_exec_file_lock);
51893+ exec_file = current->exec_file;
51894+ current->exec_file = NULL;
51895+ write_unlock(&grsec_exec_file_lock);
51896+
51897+ if (exec_file)
51898+ fput(exec_file);
51899+}
51900+
51901+int
51902+gr_acl_handle_procpidmem(const struct task_struct *task)
51903+{
51904+ if (unlikely(!gr_acl_is_enabled()))
51905+ return 0;
51906+
51907+ if (task != current && task->acl->mode & GR_PROTPROCFD)
51908+ return -EACCES;
51909+
51910+ return 0;
51911+}
51912diff -urNp linux-3.1.1/grsecurity/gracl_ip.c linux-3.1.1/grsecurity/gracl_ip.c
51913--- linux-3.1.1/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
51914+++ linux-3.1.1/grsecurity/gracl_ip.c 2011-11-16 18:40:31.000000000 -0500
51915@@ -0,0 +1,381 @@
51916+#include <linux/kernel.h>
51917+#include <asm/uaccess.h>
51918+#include <asm/errno.h>
51919+#include <net/sock.h>
51920+#include <linux/file.h>
51921+#include <linux/fs.h>
51922+#include <linux/net.h>
51923+#include <linux/in.h>
51924+#include <linux/skbuff.h>
51925+#include <linux/ip.h>
51926+#include <linux/udp.h>
51927+#include <linux/types.h>
51928+#include <linux/sched.h>
51929+#include <linux/netdevice.h>
51930+#include <linux/inetdevice.h>
51931+#include <linux/gracl.h>
51932+#include <linux/grsecurity.h>
51933+#include <linux/grinternal.h>
51934+
51935+#define GR_BIND 0x01
51936+#define GR_CONNECT 0x02
51937+#define GR_INVERT 0x04
51938+#define GR_BINDOVERRIDE 0x08
51939+#define GR_CONNECTOVERRIDE 0x10
51940+#define GR_SOCK_FAMILY 0x20
51941+
51942+static const char * gr_protocols[IPPROTO_MAX] = {
51943+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
51944+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
51945+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
51946+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
51947+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
51948+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
51949+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
51950+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
51951+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
51952+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
51953+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
51954+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
51955+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
51956+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
51957+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
51958+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
51959+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
51960+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
51961+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
51962+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
51963+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
51964+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
51965+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
51966+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
51967+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
51968+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
51969+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
51970+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
51971+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
51972+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
51973+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
51974+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
51975+ };
51976+
51977+static const char * gr_socktypes[SOCK_MAX] = {
51978+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
51979+ "unknown:7", "unknown:8", "unknown:9", "packet"
51980+ };
51981+
51982+static const char * gr_sockfamilies[AF_MAX+1] = {
51983+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
51984+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
51985+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
51986+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
51987+ };
51988+
51989+const char *
51990+gr_proto_to_name(unsigned char proto)
51991+{
51992+ return gr_protocols[proto];
51993+}
51994+
51995+const char *
51996+gr_socktype_to_name(unsigned char type)
51997+{
51998+ return gr_socktypes[type];
51999+}
52000+
52001+const char *
52002+gr_sockfamily_to_name(unsigned char family)
52003+{
52004+ return gr_sockfamilies[family];
52005+}
52006+
52007+int
52008+gr_search_socket(const int domain, const int type, const int protocol)
52009+{
52010+ struct acl_subject_label *curr;
52011+ const struct cred *cred = current_cred();
52012+
52013+ if (unlikely(!gr_acl_is_enabled()))
52014+ goto exit;
52015+
52016+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
52017+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
52018+ goto exit; // let the kernel handle it
52019+
52020+ curr = current->acl;
52021+
52022+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
52023+ /* the family is allowed, if this is PF_INET allow it only if
52024+ the extra sock type/protocol checks pass */
52025+ if (domain == PF_INET)
52026+ goto inet_check;
52027+ goto exit;
52028+ } else {
52029+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52030+ __u32 fakeip = 0;
52031+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52032+ current->role->roletype, cred->uid,
52033+ cred->gid, current->exec_file ?
52034+ gr_to_filename(current->exec_file->f_path.dentry,
52035+ current->exec_file->f_path.mnt) :
52036+ curr->filename, curr->filename,
52037+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
52038+ &current->signal->saved_ip);
52039+ goto exit;
52040+ }
52041+ goto exit_fail;
52042+ }
52043+
52044+inet_check:
52045+ /* the rest of this checking is for IPv4 only */
52046+ if (!curr->ips)
52047+ goto exit;
52048+
52049+ if ((curr->ip_type & (1 << type)) &&
52050+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
52051+ goto exit;
52052+
52053+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52054+ /* we don't place acls on raw sockets , and sometimes
52055+ dgram/ip sockets are opened for ioctl and not
52056+ bind/connect, so we'll fake a bind learn log */
52057+ if (type == SOCK_RAW || type == SOCK_PACKET) {
52058+ __u32 fakeip = 0;
52059+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52060+ current->role->roletype, cred->uid,
52061+ cred->gid, current->exec_file ?
52062+ gr_to_filename(current->exec_file->f_path.dentry,
52063+ current->exec_file->f_path.mnt) :
52064+ curr->filename, curr->filename,
52065+ &fakeip, 0, type,
52066+ protocol, GR_CONNECT, &current->signal->saved_ip);
52067+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
52068+ __u32 fakeip = 0;
52069+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52070+ current->role->roletype, cred->uid,
52071+ cred->gid, current->exec_file ?
52072+ gr_to_filename(current->exec_file->f_path.dentry,
52073+ current->exec_file->f_path.mnt) :
52074+ curr->filename, curr->filename,
52075+ &fakeip, 0, type,
52076+ protocol, GR_BIND, &current->signal->saved_ip);
52077+ }
52078+ /* we'll log when they use connect or bind */
52079+ goto exit;
52080+ }
52081+
52082+exit_fail:
52083+ if (domain == PF_INET)
52084+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
52085+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
52086+ else
52087+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
52088+ gr_socktype_to_name(type), protocol);
52089+
52090+ return 0;
52091+exit:
52092+ return 1;
52093+}
52094+
52095+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
52096+{
52097+ if ((ip->mode & mode) &&
52098+ (ip_port >= ip->low) &&
52099+ (ip_port <= ip->high) &&
52100+ ((ntohl(ip_addr) & our_netmask) ==
52101+ (ntohl(our_addr) & our_netmask))
52102+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
52103+ && (ip->type & (1 << type))) {
52104+ if (ip->mode & GR_INVERT)
52105+ return 2; // specifically denied
52106+ else
52107+ return 1; // allowed
52108+ }
52109+
52110+ return 0; // not specifically allowed, may continue parsing
52111+}
52112+
52113+static int
52114+gr_search_connectbind(const int full_mode, struct sock *sk,
52115+ struct sockaddr_in *addr, const int type)
52116+{
52117+ char iface[IFNAMSIZ] = {0};
52118+ struct acl_subject_label *curr;
52119+ struct acl_ip_label *ip;
52120+ struct inet_sock *isk;
52121+ struct net_device *dev;
52122+ struct in_device *idev;
52123+ unsigned long i;
52124+ int ret;
52125+ int mode = full_mode & (GR_BIND | GR_CONNECT);
52126+ __u32 ip_addr = 0;
52127+ __u32 our_addr;
52128+ __u32 our_netmask;
52129+ char *p;
52130+ __u16 ip_port = 0;
52131+ const struct cred *cred = current_cred();
52132+
52133+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
52134+ return 0;
52135+
52136+ curr = current->acl;
52137+ isk = inet_sk(sk);
52138+
52139+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
52140+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
52141+ addr->sin_addr.s_addr = curr->inaddr_any_override;
52142+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
52143+ struct sockaddr_in saddr;
52144+ int err;
52145+
52146+ saddr.sin_family = AF_INET;
52147+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
52148+ saddr.sin_port = isk->inet_sport;
52149+
52150+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
52151+ if (err)
52152+ return err;
52153+
52154+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
52155+ if (err)
52156+ return err;
52157+ }
52158+
52159+ if (!curr->ips)
52160+ return 0;
52161+
52162+ ip_addr = addr->sin_addr.s_addr;
52163+ ip_port = ntohs(addr->sin_port);
52164+
52165+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52166+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52167+ current->role->roletype, cred->uid,
52168+ cred->gid, current->exec_file ?
52169+ gr_to_filename(current->exec_file->f_path.dentry,
52170+ current->exec_file->f_path.mnt) :
52171+ curr->filename, curr->filename,
52172+ &ip_addr, ip_port, type,
52173+ sk->sk_protocol, mode, &current->signal->saved_ip);
52174+ return 0;
52175+ }
52176+
52177+ for (i = 0; i < curr->ip_num; i++) {
52178+ ip = *(curr->ips + i);
52179+ if (ip->iface != NULL) {
52180+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
52181+ p = strchr(iface, ':');
52182+ if (p != NULL)
52183+ *p = '\0';
52184+ dev = dev_get_by_name(sock_net(sk), iface);
52185+ if (dev == NULL)
52186+ continue;
52187+ idev = in_dev_get(dev);
52188+ if (idev == NULL) {
52189+ dev_put(dev);
52190+ continue;
52191+ }
52192+ rcu_read_lock();
52193+ for_ifa(idev) {
52194+ if (!strcmp(ip->iface, ifa->ifa_label)) {
52195+ our_addr = ifa->ifa_address;
52196+ our_netmask = 0xffffffff;
52197+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
52198+ if (ret == 1) {
52199+ rcu_read_unlock();
52200+ in_dev_put(idev);
52201+ dev_put(dev);
52202+ return 0;
52203+ } else if (ret == 2) {
52204+ rcu_read_unlock();
52205+ in_dev_put(idev);
52206+ dev_put(dev);
52207+ goto denied;
52208+ }
52209+ }
52210+ } endfor_ifa(idev);
52211+ rcu_read_unlock();
52212+ in_dev_put(idev);
52213+ dev_put(dev);
52214+ } else {
52215+ our_addr = ip->addr;
52216+ our_netmask = ip->netmask;
52217+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
52218+ if (ret == 1)
52219+ return 0;
52220+ else if (ret == 2)
52221+ goto denied;
52222+ }
52223+ }
52224+
52225+denied:
52226+ if (mode == GR_BIND)
52227+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
52228+ else if (mode == GR_CONNECT)
52229+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
52230+
52231+ return -EACCES;
52232+}
52233+
52234+int
52235+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
52236+{
52237+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
52238+}
52239+
52240+int
52241+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
52242+{
52243+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
52244+}
52245+
52246+int gr_search_listen(struct socket *sock)
52247+{
52248+ struct sock *sk = sock->sk;
52249+ struct sockaddr_in addr;
52250+
52251+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
52252+ addr.sin_port = inet_sk(sk)->inet_sport;
52253+
52254+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
52255+}
52256+
52257+int gr_search_accept(struct socket *sock)
52258+{
52259+ struct sock *sk = sock->sk;
52260+ struct sockaddr_in addr;
52261+
52262+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
52263+ addr.sin_port = inet_sk(sk)->inet_sport;
52264+
52265+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
52266+}
52267+
52268+int
52269+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
52270+{
52271+ if (addr)
52272+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
52273+ else {
52274+ struct sockaddr_in sin;
52275+ const struct inet_sock *inet = inet_sk(sk);
52276+
52277+ sin.sin_addr.s_addr = inet->inet_daddr;
52278+ sin.sin_port = inet->inet_dport;
52279+
52280+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
52281+ }
52282+}
52283+
52284+int
52285+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
52286+{
52287+ struct sockaddr_in sin;
52288+
52289+ if (unlikely(skb->len < sizeof (struct udphdr)))
52290+ return 0; // skip this packet
52291+
52292+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
52293+ sin.sin_port = udp_hdr(skb)->source;
52294+
52295+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
52296+}
52297diff -urNp linux-3.1.1/grsecurity/gracl_learn.c linux-3.1.1/grsecurity/gracl_learn.c
52298--- linux-3.1.1/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
52299+++ linux-3.1.1/grsecurity/gracl_learn.c 2011-11-16 18:40:31.000000000 -0500
52300@@ -0,0 +1,207 @@
52301+#include <linux/kernel.h>
52302+#include <linux/mm.h>
52303+#include <linux/sched.h>
52304+#include <linux/poll.h>
52305+#include <linux/string.h>
52306+#include <linux/file.h>
52307+#include <linux/types.h>
52308+#include <linux/vmalloc.h>
52309+#include <linux/grinternal.h>
52310+
52311+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
52312+ size_t count, loff_t *ppos);
52313+extern int gr_acl_is_enabled(void);
52314+
52315+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
52316+static int gr_learn_attached;
52317+
52318+/* use a 512k buffer */
52319+#define LEARN_BUFFER_SIZE (512 * 1024)
52320+
52321+static DEFINE_SPINLOCK(gr_learn_lock);
52322+static DEFINE_MUTEX(gr_learn_user_mutex);
52323+
52324+/* we need to maintain two buffers, so that the kernel context of grlearn
52325+ uses a semaphore around the userspace copying, and the other kernel contexts
52326+ use a spinlock when copying into the buffer, since they cannot sleep
52327+*/
52328+static char *learn_buffer;
52329+static char *learn_buffer_user;
52330+static int learn_buffer_len;
52331+static int learn_buffer_user_len;
52332+
52333+static ssize_t
52334+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
52335+{
52336+ DECLARE_WAITQUEUE(wait, current);
52337+ ssize_t retval = 0;
52338+
52339+ add_wait_queue(&learn_wait, &wait);
52340+ set_current_state(TASK_INTERRUPTIBLE);
52341+ do {
52342+ mutex_lock(&gr_learn_user_mutex);
52343+ spin_lock(&gr_learn_lock);
52344+ if (learn_buffer_len)
52345+ break;
52346+ spin_unlock(&gr_learn_lock);
52347+ mutex_unlock(&gr_learn_user_mutex);
52348+ if (file->f_flags & O_NONBLOCK) {
52349+ retval = -EAGAIN;
52350+ goto out;
52351+ }
52352+ if (signal_pending(current)) {
52353+ retval = -ERESTARTSYS;
52354+ goto out;
52355+ }
52356+
52357+ schedule();
52358+ } while (1);
52359+
52360+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
52361+ learn_buffer_user_len = learn_buffer_len;
52362+ retval = learn_buffer_len;
52363+ learn_buffer_len = 0;
52364+
52365+ spin_unlock(&gr_learn_lock);
52366+
52367+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
52368+ retval = -EFAULT;
52369+
52370+ mutex_unlock(&gr_learn_user_mutex);
52371+out:
52372+ set_current_state(TASK_RUNNING);
52373+ remove_wait_queue(&learn_wait, &wait);
52374+ return retval;
52375+}
52376+
52377+static unsigned int
52378+poll_learn(struct file * file, poll_table * wait)
52379+{
52380+ poll_wait(file, &learn_wait, wait);
52381+
52382+ if (learn_buffer_len)
52383+ return (POLLIN | POLLRDNORM);
52384+
52385+ return 0;
52386+}
52387+
52388+void
52389+gr_clear_learn_entries(void)
52390+{
52391+ char *tmp;
52392+
52393+ mutex_lock(&gr_learn_user_mutex);
52394+ spin_lock(&gr_learn_lock);
52395+ tmp = learn_buffer;
52396+ learn_buffer = NULL;
52397+ spin_unlock(&gr_learn_lock);
52398+ if (tmp)
52399+ vfree(tmp);
52400+ if (learn_buffer_user != NULL) {
52401+ vfree(learn_buffer_user);
52402+ learn_buffer_user = NULL;
52403+ }
52404+ learn_buffer_len = 0;
52405+ mutex_unlock(&gr_learn_user_mutex);
52406+
52407+ return;
52408+}
52409+
52410+void
52411+gr_add_learn_entry(const char *fmt, ...)
52412+{
52413+ va_list args;
52414+ unsigned int len;
52415+
52416+ if (!gr_learn_attached)
52417+ return;
52418+
52419+ spin_lock(&gr_learn_lock);
52420+
52421+ /* leave a gap at the end so we know when it's "full" but don't have to
52422+ compute the exact length of the string we're trying to append
52423+ */
52424+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
52425+ spin_unlock(&gr_learn_lock);
52426+ wake_up_interruptible(&learn_wait);
52427+ return;
52428+ }
52429+ if (learn_buffer == NULL) {
52430+ spin_unlock(&gr_learn_lock);
52431+ return;
52432+ }
52433+
52434+ va_start(args, fmt);
52435+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
52436+ va_end(args);
52437+
52438+ learn_buffer_len += len + 1;
52439+
52440+ spin_unlock(&gr_learn_lock);
52441+ wake_up_interruptible(&learn_wait);
52442+
52443+ return;
52444+}
52445+
52446+static int
52447+open_learn(struct inode *inode, struct file *file)
52448+{
52449+ if (file->f_mode & FMODE_READ && gr_learn_attached)
52450+ return -EBUSY;
52451+ if (file->f_mode & FMODE_READ) {
52452+ int retval = 0;
52453+ mutex_lock(&gr_learn_user_mutex);
52454+ if (learn_buffer == NULL)
52455+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
52456+ if (learn_buffer_user == NULL)
52457+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
52458+ if (learn_buffer == NULL) {
52459+ retval = -ENOMEM;
52460+ goto out_error;
52461+ }
52462+ if (learn_buffer_user == NULL) {
52463+ retval = -ENOMEM;
52464+ goto out_error;
52465+ }
52466+ learn_buffer_len = 0;
52467+ learn_buffer_user_len = 0;
52468+ gr_learn_attached = 1;
52469+out_error:
52470+ mutex_unlock(&gr_learn_user_mutex);
52471+ return retval;
52472+ }
52473+ return 0;
52474+}
52475+
52476+static int
52477+close_learn(struct inode *inode, struct file *file)
52478+{
52479+ if (file->f_mode & FMODE_READ) {
52480+ char *tmp = NULL;
52481+ mutex_lock(&gr_learn_user_mutex);
52482+ spin_lock(&gr_learn_lock);
52483+ tmp = learn_buffer;
52484+ learn_buffer = NULL;
52485+ spin_unlock(&gr_learn_lock);
52486+ if (tmp)
52487+ vfree(tmp);
52488+ if (learn_buffer_user != NULL) {
52489+ vfree(learn_buffer_user);
52490+ learn_buffer_user = NULL;
52491+ }
52492+ learn_buffer_len = 0;
52493+ learn_buffer_user_len = 0;
52494+ gr_learn_attached = 0;
52495+ mutex_unlock(&gr_learn_user_mutex);
52496+ }
52497+
52498+ return 0;
52499+}
52500+
52501+const struct file_operations grsec_fops = {
52502+ .read = read_learn,
52503+ .write = write_grsec_handler,
52504+ .open = open_learn,
52505+ .release = close_learn,
52506+ .poll = poll_learn,
52507+};
52508diff -urNp linux-3.1.1/grsecurity/gracl_res.c linux-3.1.1/grsecurity/gracl_res.c
52509--- linux-3.1.1/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
52510+++ linux-3.1.1/grsecurity/gracl_res.c 2011-11-16 18:40:31.000000000 -0500
52511@@ -0,0 +1,68 @@
52512+#include <linux/kernel.h>
52513+#include <linux/sched.h>
52514+#include <linux/gracl.h>
52515+#include <linux/grinternal.h>
52516+
52517+static const char *restab_log[] = {
52518+ [RLIMIT_CPU] = "RLIMIT_CPU",
52519+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
52520+ [RLIMIT_DATA] = "RLIMIT_DATA",
52521+ [RLIMIT_STACK] = "RLIMIT_STACK",
52522+ [RLIMIT_CORE] = "RLIMIT_CORE",
52523+ [RLIMIT_RSS] = "RLIMIT_RSS",
52524+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
52525+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
52526+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
52527+ [RLIMIT_AS] = "RLIMIT_AS",
52528+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
52529+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
52530+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
52531+ [RLIMIT_NICE] = "RLIMIT_NICE",
52532+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
52533+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
52534+ [GR_CRASH_RES] = "RLIMIT_CRASH"
52535+};
52536+
52537+void
52538+gr_log_resource(const struct task_struct *task,
52539+ const int res, const unsigned long wanted, const int gt)
52540+{
52541+ const struct cred *cred;
52542+ unsigned long rlim;
52543+
52544+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
52545+ return;
52546+
52547+ // not yet supported resource
52548+ if (unlikely(!restab_log[res]))
52549+ return;
52550+
52551+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
52552+ rlim = task_rlimit_max(task, res);
52553+ else
52554+ rlim = task_rlimit(task, res);
52555+
52556+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
52557+ return;
52558+
52559+ rcu_read_lock();
52560+ cred = __task_cred(task);
52561+
52562+ if (res == RLIMIT_NPROC &&
52563+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
52564+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
52565+ goto out_rcu_unlock;
52566+ else if (res == RLIMIT_MEMLOCK &&
52567+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
52568+ goto out_rcu_unlock;
52569+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
52570+ goto out_rcu_unlock;
52571+ rcu_read_unlock();
52572+
52573+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
52574+
52575+ return;
52576+out_rcu_unlock:
52577+ rcu_read_unlock();
52578+ return;
52579+}
52580diff -urNp linux-3.1.1/grsecurity/gracl_segv.c linux-3.1.1/grsecurity/gracl_segv.c
52581--- linux-3.1.1/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
52582+++ linux-3.1.1/grsecurity/gracl_segv.c 2011-11-16 18:40:31.000000000 -0500
52583@@ -0,0 +1,299 @@
52584+#include <linux/kernel.h>
52585+#include <linux/mm.h>
52586+#include <asm/uaccess.h>
52587+#include <asm/errno.h>
52588+#include <asm/mman.h>
52589+#include <net/sock.h>
52590+#include <linux/file.h>
52591+#include <linux/fs.h>
52592+#include <linux/net.h>
52593+#include <linux/in.h>
52594+#include <linux/slab.h>
52595+#include <linux/types.h>
52596+#include <linux/sched.h>
52597+#include <linux/timer.h>
52598+#include <linux/gracl.h>
52599+#include <linux/grsecurity.h>
52600+#include <linux/grinternal.h>
52601+
52602+static struct crash_uid *uid_set;
52603+static unsigned short uid_used;
52604+static DEFINE_SPINLOCK(gr_uid_lock);
52605+extern rwlock_t gr_inode_lock;
52606+extern struct acl_subject_label *
52607+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
52608+ struct acl_role_label *role);
52609+
52610+#ifdef CONFIG_BTRFS_FS
52611+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
52612+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
52613+#endif
52614+
52615+static inline dev_t __get_dev(const struct dentry *dentry)
52616+{
52617+#ifdef CONFIG_BTRFS_FS
52618+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
52619+ return get_btrfs_dev_from_inode(dentry->d_inode);
52620+ else
52621+#endif
52622+ return dentry->d_inode->i_sb->s_dev;
52623+}
52624+
52625+int
52626+gr_init_uidset(void)
52627+{
52628+ uid_set =
52629+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
52630+ uid_used = 0;
52631+
52632+ return uid_set ? 1 : 0;
52633+}
52634+
52635+void
52636+gr_free_uidset(void)
52637+{
52638+ if (uid_set)
52639+ kfree(uid_set);
52640+
52641+ return;
52642+}
52643+
52644+int
52645+gr_find_uid(const uid_t uid)
52646+{
52647+ struct crash_uid *tmp = uid_set;
52648+ uid_t buid;
52649+ int low = 0, high = uid_used - 1, mid;
52650+
52651+ while (high >= low) {
52652+ mid = (low + high) >> 1;
52653+ buid = tmp[mid].uid;
52654+ if (buid == uid)
52655+ return mid;
52656+ if (buid > uid)
52657+ high = mid - 1;
52658+ if (buid < uid)
52659+ low = mid + 1;
52660+ }
52661+
52662+ return -1;
52663+}
52664+
52665+static __inline__ void
52666+gr_insertsort(void)
52667+{
52668+ unsigned short i, j;
52669+ struct crash_uid index;
52670+
52671+ for (i = 1; i < uid_used; i++) {
52672+ index = uid_set[i];
52673+ j = i;
52674+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
52675+ uid_set[j] = uid_set[j - 1];
52676+ j--;
52677+ }
52678+ uid_set[j] = index;
52679+ }
52680+
52681+ return;
52682+}
52683+
52684+static __inline__ void
52685+gr_insert_uid(const uid_t uid, const unsigned long expires)
52686+{
52687+ int loc;
52688+
52689+ if (uid_used == GR_UIDTABLE_MAX)
52690+ return;
52691+
52692+ loc = gr_find_uid(uid);
52693+
52694+ if (loc >= 0) {
52695+ uid_set[loc].expires = expires;
52696+ return;
52697+ }
52698+
52699+ uid_set[uid_used].uid = uid;
52700+ uid_set[uid_used].expires = expires;
52701+ uid_used++;
52702+
52703+ gr_insertsort();
52704+
52705+ return;
52706+}
52707+
52708+void
52709+gr_remove_uid(const unsigned short loc)
52710+{
52711+ unsigned short i;
52712+
52713+ for (i = loc + 1; i < uid_used; i++)
52714+ uid_set[i - 1] = uid_set[i];
52715+
52716+ uid_used--;
52717+
52718+ return;
52719+}
52720+
52721+int
52722+gr_check_crash_uid(const uid_t uid)
52723+{
52724+ int loc;
52725+ int ret = 0;
52726+
52727+ if (unlikely(!gr_acl_is_enabled()))
52728+ return 0;
52729+
52730+ spin_lock(&gr_uid_lock);
52731+ loc = gr_find_uid(uid);
52732+
52733+ if (loc < 0)
52734+ goto out_unlock;
52735+
52736+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
52737+ gr_remove_uid(loc);
52738+ else
52739+ ret = 1;
52740+
52741+out_unlock:
52742+ spin_unlock(&gr_uid_lock);
52743+ return ret;
52744+}
52745+
52746+static __inline__ int
52747+proc_is_setxid(const struct cred *cred)
52748+{
52749+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
52750+ cred->uid != cred->fsuid)
52751+ return 1;
52752+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
52753+ cred->gid != cred->fsgid)
52754+ return 1;
52755+
52756+ return 0;
52757+}
52758+
52759+extern int gr_fake_force_sig(int sig, struct task_struct *t);
52760+
52761+void
52762+gr_handle_crash(struct task_struct *task, const int sig)
52763+{
52764+ struct acl_subject_label *curr;
52765+ struct task_struct *tsk, *tsk2;
52766+ const struct cred *cred;
52767+ const struct cred *cred2;
52768+
52769+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
52770+ return;
52771+
52772+ if (unlikely(!gr_acl_is_enabled()))
52773+ return;
52774+
52775+ curr = task->acl;
52776+
52777+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
52778+ return;
52779+
52780+ if (time_before_eq(curr->expires, get_seconds())) {
52781+ curr->expires = 0;
52782+ curr->crashes = 0;
52783+ }
52784+
52785+ curr->crashes++;
52786+
52787+ if (!curr->expires)
52788+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
52789+
52790+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52791+ time_after(curr->expires, get_seconds())) {
52792+ rcu_read_lock();
52793+ cred = __task_cred(task);
52794+ if (cred->uid && proc_is_setxid(cred)) {
52795+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52796+ spin_lock(&gr_uid_lock);
52797+ gr_insert_uid(cred->uid, curr->expires);
52798+ spin_unlock(&gr_uid_lock);
52799+ curr->expires = 0;
52800+ curr->crashes = 0;
52801+ read_lock(&tasklist_lock);
52802+ do_each_thread(tsk2, tsk) {
52803+ cred2 = __task_cred(tsk);
52804+ if (tsk != task && cred2->uid == cred->uid)
52805+ gr_fake_force_sig(SIGKILL, tsk);
52806+ } while_each_thread(tsk2, tsk);
52807+ read_unlock(&tasklist_lock);
52808+ } else {
52809+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52810+ read_lock(&tasklist_lock);
52811+ read_lock(&grsec_exec_file_lock);
52812+ do_each_thread(tsk2, tsk) {
52813+ if (likely(tsk != task)) {
52814+ // if this thread has the same subject as the one that triggered
52815+ // RES_CRASH and it's the same binary, kill it
52816+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
52817+ gr_fake_force_sig(SIGKILL, tsk);
52818+ }
52819+ } while_each_thread(tsk2, tsk);
52820+ read_unlock(&grsec_exec_file_lock);
52821+ read_unlock(&tasklist_lock);
52822+ }
52823+ rcu_read_unlock();
52824+ }
52825+
52826+ return;
52827+}
52828+
52829+int
52830+gr_check_crash_exec(const struct file *filp)
52831+{
52832+ struct acl_subject_label *curr;
52833+
52834+ if (unlikely(!gr_acl_is_enabled()))
52835+ return 0;
52836+
52837+ read_lock(&gr_inode_lock);
52838+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
52839+ __get_dev(filp->f_path.dentry),
52840+ current->role);
52841+ read_unlock(&gr_inode_lock);
52842+
52843+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
52844+ (!curr->crashes && !curr->expires))
52845+ return 0;
52846+
52847+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52848+ time_after(curr->expires, get_seconds()))
52849+ return 1;
52850+ else if (time_before_eq(curr->expires, get_seconds())) {
52851+ curr->crashes = 0;
52852+ curr->expires = 0;
52853+ }
52854+
52855+ return 0;
52856+}
52857+
52858+void
52859+gr_handle_alertkill(struct task_struct *task)
52860+{
52861+ struct acl_subject_label *curracl;
52862+ __u32 curr_ip;
52863+ struct task_struct *p, *p2;
52864+
52865+ if (unlikely(!gr_acl_is_enabled()))
52866+ return;
52867+
52868+ curracl = task->acl;
52869+ curr_ip = task->signal->curr_ip;
52870+
52871+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
52872+ read_lock(&tasklist_lock);
52873+ do_each_thread(p2, p) {
52874+ if (p->signal->curr_ip == curr_ip)
52875+ gr_fake_force_sig(SIGKILL, p);
52876+ } while_each_thread(p2, p);
52877+ read_unlock(&tasklist_lock);
52878+ } else if (curracl->mode & GR_KILLPROC)
52879+ gr_fake_force_sig(SIGKILL, task);
52880+
52881+ return;
52882+}
52883diff -urNp linux-3.1.1/grsecurity/gracl_shm.c linux-3.1.1/grsecurity/gracl_shm.c
52884--- linux-3.1.1/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
52885+++ linux-3.1.1/grsecurity/gracl_shm.c 2011-11-16 18:40:31.000000000 -0500
52886@@ -0,0 +1,40 @@
52887+#include <linux/kernel.h>
52888+#include <linux/mm.h>
52889+#include <linux/sched.h>
52890+#include <linux/file.h>
52891+#include <linux/ipc.h>
52892+#include <linux/gracl.h>
52893+#include <linux/grsecurity.h>
52894+#include <linux/grinternal.h>
52895+
52896+int
52897+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52898+ const time_t shm_createtime, const uid_t cuid, const int shmid)
52899+{
52900+ struct task_struct *task;
52901+
52902+ if (!gr_acl_is_enabled())
52903+ return 1;
52904+
52905+ rcu_read_lock();
52906+ read_lock(&tasklist_lock);
52907+
52908+ task = find_task_by_vpid(shm_cprid);
52909+
52910+ if (unlikely(!task))
52911+ task = find_task_by_vpid(shm_lapid);
52912+
52913+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
52914+ (task->pid == shm_lapid)) &&
52915+ (task->acl->mode & GR_PROTSHM) &&
52916+ (task->acl != current->acl))) {
52917+ read_unlock(&tasklist_lock);
52918+ rcu_read_unlock();
52919+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
52920+ return 0;
52921+ }
52922+ read_unlock(&tasklist_lock);
52923+ rcu_read_unlock();
52924+
52925+ return 1;
52926+}
52927diff -urNp linux-3.1.1/grsecurity/grsec_chdir.c linux-3.1.1/grsecurity/grsec_chdir.c
52928--- linux-3.1.1/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
52929+++ linux-3.1.1/grsecurity/grsec_chdir.c 2011-11-16 18:40:31.000000000 -0500
52930@@ -0,0 +1,19 @@
52931+#include <linux/kernel.h>
52932+#include <linux/sched.h>
52933+#include <linux/fs.h>
52934+#include <linux/file.h>
52935+#include <linux/grsecurity.h>
52936+#include <linux/grinternal.h>
52937+
52938+void
52939+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
52940+{
52941+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52942+ if ((grsec_enable_chdir && grsec_enable_group &&
52943+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
52944+ !grsec_enable_group)) {
52945+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
52946+ }
52947+#endif
52948+ return;
52949+}
52950diff -urNp linux-3.1.1/grsecurity/grsec_chroot.c linux-3.1.1/grsecurity/grsec_chroot.c
52951--- linux-3.1.1/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
52952+++ linux-3.1.1/grsecurity/grsec_chroot.c 2011-11-16 18:40:31.000000000 -0500
52953@@ -0,0 +1,351 @@
52954+#include <linux/kernel.h>
52955+#include <linux/module.h>
52956+#include <linux/sched.h>
52957+#include <linux/file.h>
52958+#include <linux/fs.h>
52959+#include <linux/mount.h>
52960+#include <linux/types.h>
52961+#include <linux/pid_namespace.h>
52962+#include <linux/grsecurity.h>
52963+#include <linux/grinternal.h>
52964+
52965+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
52966+{
52967+#ifdef CONFIG_GRKERNSEC
52968+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
52969+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
52970+ task->gr_is_chrooted = 1;
52971+ else
52972+ task->gr_is_chrooted = 0;
52973+
52974+ task->gr_chroot_dentry = path->dentry;
52975+#endif
52976+ return;
52977+}
52978+
52979+void gr_clear_chroot_entries(struct task_struct *task)
52980+{
52981+#ifdef CONFIG_GRKERNSEC
52982+ task->gr_is_chrooted = 0;
52983+ task->gr_chroot_dentry = NULL;
52984+#endif
52985+ return;
52986+}
52987+
52988+int
52989+gr_handle_chroot_unix(const pid_t pid)
52990+{
52991+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52992+ struct task_struct *p;
52993+
52994+ if (unlikely(!grsec_enable_chroot_unix))
52995+ return 1;
52996+
52997+ if (likely(!proc_is_chrooted(current)))
52998+ return 1;
52999+
53000+ rcu_read_lock();
53001+ read_lock(&tasklist_lock);
53002+ p = find_task_by_vpid_unrestricted(pid);
53003+ if (unlikely(p && !have_same_root(current, p))) {
53004+ read_unlock(&tasklist_lock);
53005+ rcu_read_unlock();
53006+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
53007+ return 0;
53008+ }
53009+ read_unlock(&tasklist_lock);
53010+ rcu_read_unlock();
53011+#endif
53012+ return 1;
53013+}
53014+
53015+int
53016+gr_handle_chroot_nice(void)
53017+{
53018+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53019+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
53020+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
53021+ return -EPERM;
53022+ }
53023+#endif
53024+ return 0;
53025+}
53026+
53027+int
53028+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
53029+{
53030+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53031+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
53032+ && proc_is_chrooted(current)) {
53033+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
53034+ return -EACCES;
53035+ }
53036+#endif
53037+ return 0;
53038+}
53039+
53040+int
53041+gr_handle_chroot_rawio(const struct inode *inode)
53042+{
53043+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53044+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
53045+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
53046+ return 1;
53047+#endif
53048+ return 0;
53049+}
53050+
53051+int
53052+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
53053+{
53054+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53055+ struct task_struct *p;
53056+ int ret = 0;
53057+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
53058+ return ret;
53059+
53060+ read_lock(&tasklist_lock);
53061+ do_each_pid_task(pid, type, p) {
53062+ if (!have_same_root(current, p)) {
53063+ ret = 1;
53064+ goto out;
53065+ }
53066+ } while_each_pid_task(pid, type, p);
53067+out:
53068+ read_unlock(&tasklist_lock);
53069+ return ret;
53070+#endif
53071+ return 0;
53072+}
53073+
53074+int
53075+gr_pid_is_chrooted(struct task_struct *p)
53076+{
53077+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53078+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
53079+ return 0;
53080+
53081+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
53082+ !have_same_root(current, p)) {
53083+ return 1;
53084+ }
53085+#endif
53086+ return 0;
53087+}
53088+
53089+EXPORT_SYMBOL(gr_pid_is_chrooted);
53090+
53091+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
53092+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
53093+{
53094+ struct path path, currentroot;
53095+ int ret = 0;
53096+
53097+ path.dentry = (struct dentry *)u_dentry;
53098+ path.mnt = (struct vfsmount *)u_mnt;
53099+ get_fs_root(current->fs, &currentroot);
53100+ if (path_is_under(&path, &currentroot))
53101+ ret = 1;
53102+ path_put(&currentroot);
53103+
53104+ return ret;
53105+}
53106+#endif
53107+
53108+int
53109+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
53110+{
53111+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
53112+ if (!grsec_enable_chroot_fchdir)
53113+ return 1;
53114+
53115+ if (!proc_is_chrooted(current))
53116+ return 1;
53117+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
53118+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
53119+ return 0;
53120+ }
53121+#endif
53122+ return 1;
53123+}
53124+
53125+int
53126+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53127+ const time_t shm_createtime)
53128+{
53129+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
53130+ struct task_struct *p;
53131+ time_t starttime;
53132+
53133+ if (unlikely(!grsec_enable_chroot_shmat))
53134+ return 1;
53135+
53136+ if (likely(!proc_is_chrooted(current)))
53137+ return 1;
53138+
53139+ rcu_read_lock();
53140+ read_lock(&tasklist_lock);
53141+
53142+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
53143+ starttime = p->start_time.tv_sec;
53144+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
53145+ if (have_same_root(current, p)) {
53146+ goto allow;
53147+ } else {
53148+ read_unlock(&tasklist_lock);
53149+ rcu_read_unlock();
53150+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
53151+ return 0;
53152+ }
53153+ }
53154+ /* creator exited, pid reuse, fall through to next check */
53155+ }
53156+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
53157+ if (unlikely(!have_same_root(current, p))) {
53158+ read_unlock(&tasklist_lock);
53159+ rcu_read_unlock();
53160+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
53161+ return 0;
53162+ }
53163+ }
53164+
53165+allow:
53166+ read_unlock(&tasklist_lock);
53167+ rcu_read_unlock();
53168+#endif
53169+ return 1;
53170+}
53171+
53172+void
53173+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
53174+{
53175+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
53176+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
53177+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
53178+#endif
53179+ return;
53180+}
53181+
53182+int
53183+gr_handle_chroot_mknod(const struct dentry *dentry,
53184+ const struct vfsmount *mnt, const int mode)
53185+{
53186+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
53187+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
53188+ proc_is_chrooted(current)) {
53189+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
53190+ return -EPERM;
53191+ }
53192+#endif
53193+ return 0;
53194+}
53195+
53196+int
53197+gr_handle_chroot_mount(const struct dentry *dentry,
53198+ const struct vfsmount *mnt, const char *dev_name)
53199+{
53200+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
53201+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
53202+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
53203+ return -EPERM;
53204+ }
53205+#endif
53206+ return 0;
53207+}
53208+
53209+int
53210+gr_handle_chroot_pivot(void)
53211+{
53212+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
53213+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
53214+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
53215+ return -EPERM;
53216+ }
53217+#endif
53218+ return 0;
53219+}
53220+
53221+int
53222+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
53223+{
53224+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
53225+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
53226+ !gr_is_outside_chroot(dentry, mnt)) {
53227+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
53228+ return -EPERM;
53229+ }
53230+#endif
53231+ return 0;
53232+}
53233+
53234+extern const char *captab_log[];
53235+extern int captab_log_entries;
53236+
53237+int
53238+gr_chroot_is_capable(const int cap)
53239+{
53240+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53241+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
53242+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
53243+ if (cap_raised(chroot_caps, cap)) {
53244+ const struct cred *creds = current_cred();
53245+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
53246+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
53247+ }
53248+ return 0;
53249+ }
53250+ }
53251+#endif
53252+ return 1;
53253+}
53254+
53255+int
53256+gr_chroot_is_capable_nolog(const int cap)
53257+{
53258+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53259+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
53260+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
53261+ if (cap_raised(chroot_caps, cap)) {
53262+ return 0;
53263+ }
53264+ }
53265+#endif
53266+ return 1;
53267+}
53268+
53269+int
53270+gr_handle_chroot_sysctl(const int op)
53271+{
53272+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
53273+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
53274+ proc_is_chrooted(current))
53275+ return -EACCES;
53276+#endif
53277+ return 0;
53278+}
53279+
53280+void
53281+gr_handle_chroot_chdir(struct path *path)
53282+{
53283+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
53284+ if (grsec_enable_chroot_chdir)
53285+ set_fs_pwd(current->fs, path);
53286+#endif
53287+ return;
53288+}
53289+
53290+int
53291+gr_handle_chroot_chmod(const struct dentry *dentry,
53292+ const struct vfsmount *mnt, const int mode)
53293+{
53294+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
53295+ /* allow chmod +s on directories, but not files */
53296+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
53297+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
53298+ proc_is_chrooted(current)) {
53299+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
53300+ return -EPERM;
53301+ }
53302+#endif
53303+ return 0;
53304+}
53305diff -urNp linux-3.1.1/grsecurity/grsec_disabled.c linux-3.1.1/grsecurity/grsec_disabled.c
53306--- linux-3.1.1/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
53307+++ linux-3.1.1/grsecurity/grsec_disabled.c 2011-11-17 00:16:25.000000000 -0500
53308@@ -0,0 +1,439 @@
53309+#include <linux/kernel.h>
53310+#include <linux/module.h>
53311+#include <linux/sched.h>
53312+#include <linux/file.h>
53313+#include <linux/fs.h>
53314+#include <linux/kdev_t.h>
53315+#include <linux/net.h>
53316+#include <linux/in.h>
53317+#include <linux/ip.h>
53318+#include <linux/skbuff.h>
53319+#include <linux/sysctl.h>
53320+
53321+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
53322+void
53323+pax_set_initial_flags(struct linux_binprm *bprm)
53324+{
53325+ return;
53326+}
53327+#endif
53328+
53329+#ifdef CONFIG_SYSCTL
53330+__u32
53331+gr_handle_sysctl(const struct ctl_table * table, const int op)
53332+{
53333+ return 0;
53334+}
53335+#endif
53336+
53337+#ifdef CONFIG_TASKSTATS
53338+int gr_is_taskstats_denied(int pid)
53339+{
53340+ return 0;
53341+}
53342+#endif
53343+
53344+int
53345+gr_acl_is_enabled(void)
53346+{
53347+ return 0;
53348+}
53349+
53350+void
53351+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
53352+{
53353+ return;
53354+}
53355+
53356+int
53357+gr_handle_rawio(const struct inode *inode)
53358+{
53359+ return 0;
53360+}
53361+
53362+void
53363+gr_acl_handle_psacct(struct task_struct *task, const long code)
53364+{
53365+ return;
53366+}
53367+
53368+int
53369+gr_handle_ptrace(struct task_struct *task, const long request)
53370+{
53371+ return 0;
53372+}
53373+
53374+int
53375+gr_handle_proc_ptrace(struct task_struct *task)
53376+{
53377+ return 0;
53378+}
53379+
53380+void
53381+gr_learn_resource(const struct task_struct *task,
53382+ const int res, const unsigned long wanted, const int gt)
53383+{
53384+ return;
53385+}
53386+
53387+int
53388+gr_set_acls(const int type)
53389+{
53390+ return 0;
53391+}
53392+
53393+int
53394+gr_check_hidden_task(const struct task_struct *tsk)
53395+{
53396+ return 0;
53397+}
53398+
53399+int
53400+gr_check_protected_task(const struct task_struct *task)
53401+{
53402+ return 0;
53403+}
53404+
53405+int
53406+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
53407+{
53408+ return 0;
53409+}
53410+
53411+void
53412+gr_copy_label(struct task_struct *tsk)
53413+{
53414+ return;
53415+}
53416+
53417+void
53418+gr_set_pax_flags(struct task_struct *task)
53419+{
53420+ return;
53421+}
53422+
53423+int
53424+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53425+ const int unsafe_share)
53426+{
53427+ return 0;
53428+}
53429+
53430+void
53431+gr_handle_delete(const ino_t ino, const dev_t dev)
53432+{
53433+ return;
53434+}
53435+
53436+void
53437+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53438+{
53439+ return;
53440+}
53441+
53442+void
53443+gr_handle_crash(struct task_struct *task, const int sig)
53444+{
53445+ return;
53446+}
53447+
53448+int
53449+gr_check_crash_exec(const struct file *filp)
53450+{
53451+ return 0;
53452+}
53453+
53454+int
53455+gr_check_crash_uid(const uid_t uid)
53456+{
53457+ return 0;
53458+}
53459+
53460+void
53461+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53462+ struct dentry *old_dentry,
53463+ struct dentry *new_dentry,
53464+ struct vfsmount *mnt, const __u8 replace)
53465+{
53466+ return;
53467+}
53468+
53469+int
53470+gr_search_socket(const int family, const int type, const int protocol)
53471+{
53472+ return 1;
53473+}
53474+
53475+int
53476+gr_search_connectbind(const int mode, const struct socket *sock,
53477+ const struct sockaddr_in *addr)
53478+{
53479+ return 0;
53480+}
53481+
53482+void
53483+gr_handle_alertkill(struct task_struct *task)
53484+{
53485+ return;
53486+}
53487+
53488+__u32
53489+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
53490+{
53491+ return 1;
53492+}
53493+
53494+__u32
53495+gr_acl_handle_hidden_file(const struct dentry * dentry,
53496+ const struct vfsmount * mnt)
53497+{
53498+ return 1;
53499+}
53500+
53501+__u32
53502+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
53503+ int acc_mode)
53504+{
53505+ return 1;
53506+}
53507+
53508+__u32
53509+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
53510+{
53511+ return 1;
53512+}
53513+
53514+__u32
53515+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
53516+{
53517+ return 1;
53518+}
53519+
53520+int
53521+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
53522+ unsigned int *vm_flags)
53523+{
53524+ return 1;
53525+}
53526+
53527+__u32
53528+gr_acl_handle_truncate(const struct dentry * dentry,
53529+ const struct vfsmount * mnt)
53530+{
53531+ return 1;
53532+}
53533+
53534+__u32
53535+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
53536+{
53537+ return 1;
53538+}
53539+
53540+__u32
53541+gr_acl_handle_access(const struct dentry * dentry,
53542+ const struct vfsmount * mnt, const int fmode)
53543+{
53544+ return 1;
53545+}
53546+
53547+__u32
53548+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
53549+ mode_t mode)
53550+{
53551+ return 1;
53552+}
53553+
53554+__u32
53555+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
53556+ mode_t mode)
53557+{
53558+ return 1;
53559+}
53560+
53561+__u32
53562+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
53563+{
53564+ return 1;
53565+}
53566+
53567+__u32
53568+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
53569+{
53570+ return 1;
53571+}
53572+
53573+void
53574+grsecurity_init(void)
53575+{
53576+ return;
53577+}
53578+
53579+__u32
53580+gr_acl_handle_mknod(const struct dentry * new_dentry,
53581+ const struct dentry * parent_dentry,
53582+ const struct vfsmount * parent_mnt,
53583+ const int mode)
53584+{
53585+ return 1;
53586+}
53587+
53588+__u32
53589+gr_acl_handle_mkdir(const struct dentry * new_dentry,
53590+ const struct dentry * parent_dentry,
53591+ const struct vfsmount * parent_mnt)
53592+{
53593+ return 1;
53594+}
53595+
53596+__u32
53597+gr_acl_handle_symlink(const struct dentry * new_dentry,
53598+ const struct dentry * parent_dentry,
53599+ const struct vfsmount * parent_mnt, const char *from)
53600+{
53601+ return 1;
53602+}
53603+
53604+__u32
53605+gr_acl_handle_link(const struct dentry * new_dentry,
53606+ const struct dentry * parent_dentry,
53607+ const struct vfsmount * parent_mnt,
53608+ const struct dentry * old_dentry,
53609+ const struct vfsmount * old_mnt, const char *to)
53610+{
53611+ return 1;
53612+}
53613+
53614+int
53615+gr_acl_handle_rename(const struct dentry *new_dentry,
53616+ const struct dentry *parent_dentry,
53617+ const struct vfsmount *parent_mnt,
53618+ const struct dentry *old_dentry,
53619+ const struct inode *old_parent_inode,
53620+ const struct vfsmount *old_mnt, const char *newname)
53621+{
53622+ return 0;
53623+}
53624+
53625+int
53626+gr_acl_handle_filldir(const struct file *file, const char *name,
53627+ const int namelen, const ino_t ino)
53628+{
53629+ return 1;
53630+}
53631+
53632+int
53633+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53634+ const time_t shm_createtime, const uid_t cuid, const int shmid)
53635+{
53636+ return 1;
53637+}
53638+
53639+int
53640+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
53641+{
53642+ return 0;
53643+}
53644+
53645+int
53646+gr_search_accept(const struct socket *sock)
53647+{
53648+ return 0;
53649+}
53650+
53651+int
53652+gr_search_listen(const struct socket *sock)
53653+{
53654+ return 0;
53655+}
53656+
53657+int
53658+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
53659+{
53660+ return 0;
53661+}
53662+
53663+__u32
53664+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
53665+{
53666+ return 1;
53667+}
53668+
53669+__u32
53670+gr_acl_handle_creat(const struct dentry * dentry,
53671+ const struct dentry * p_dentry,
53672+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
53673+ const int imode)
53674+{
53675+ return 1;
53676+}
53677+
53678+void
53679+gr_acl_handle_exit(void)
53680+{
53681+ return;
53682+}
53683+
53684+int
53685+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53686+{
53687+ return 1;
53688+}
53689+
53690+void
53691+gr_set_role_label(const uid_t uid, const gid_t gid)
53692+{
53693+ return;
53694+}
53695+
53696+int
53697+gr_acl_handle_procpidmem(const struct task_struct *task)
53698+{
53699+ return 0;
53700+}
53701+
53702+int
53703+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
53704+{
53705+ return 0;
53706+}
53707+
53708+int
53709+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
53710+{
53711+ return 0;
53712+}
53713+
53714+void
53715+gr_set_kernel_label(struct task_struct *task)
53716+{
53717+ return;
53718+}
53719+
53720+int
53721+gr_check_user_change(int real, int effective, int fs)
53722+{
53723+ return 0;
53724+}
53725+
53726+int
53727+gr_check_group_change(int real, int effective, int fs)
53728+{
53729+ return 0;
53730+}
53731+
53732+int gr_acl_enable_at_secure(void)
53733+{
53734+ return 0;
53735+}
53736+
53737+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
53738+{
53739+ return dentry->d_inode->i_sb->s_dev;
53740+}
53741+
53742+EXPORT_SYMBOL(gr_learn_resource);
53743+EXPORT_SYMBOL(gr_set_kernel_label);
53744+#ifdef CONFIG_SECURITY
53745+EXPORT_SYMBOL(gr_check_user_change);
53746+EXPORT_SYMBOL(gr_check_group_change);
53747+#endif
53748diff -urNp linux-3.1.1/grsecurity/grsec_exec.c linux-3.1.1/grsecurity/grsec_exec.c
53749--- linux-3.1.1/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
53750+++ linux-3.1.1/grsecurity/grsec_exec.c 2011-11-16 18:40:31.000000000 -0500
53751@@ -0,0 +1,146 @@
53752+#include <linux/kernel.h>
53753+#include <linux/sched.h>
53754+#include <linux/file.h>
53755+#include <linux/binfmts.h>
53756+#include <linux/fs.h>
53757+#include <linux/types.h>
53758+#include <linux/grdefs.h>
53759+#include <linux/grsecurity.h>
53760+#include <linux/grinternal.h>
53761+#include <linux/capability.h>
53762+#include <linux/module.h>
53763+
53764+#include <asm/uaccess.h>
53765+
53766+#ifdef CONFIG_GRKERNSEC_EXECLOG
53767+static char gr_exec_arg_buf[132];
53768+static DEFINE_MUTEX(gr_exec_arg_mutex);
53769+#endif
53770+
53771+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
53772+
53773+void
53774+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
53775+{
53776+#ifdef CONFIG_GRKERNSEC_EXECLOG
53777+ char *grarg = gr_exec_arg_buf;
53778+ unsigned int i, x, execlen = 0;
53779+ char c;
53780+
53781+ if (!((grsec_enable_execlog && grsec_enable_group &&
53782+ in_group_p(grsec_audit_gid))
53783+ || (grsec_enable_execlog && !grsec_enable_group)))
53784+ return;
53785+
53786+ mutex_lock(&gr_exec_arg_mutex);
53787+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
53788+
53789+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
53790+ const char __user *p;
53791+ unsigned int len;
53792+
53793+ p = get_user_arg_ptr(argv, i);
53794+ if (IS_ERR(p))
53795+ goto log;
53796+
53797+ len = strnlen_user(p, 128 - execlen);
53798+ if (len > 128 - execlen)
53799+ len = 128 - execlen;
53800+ else if (len > 0)
53801+ len--;
53802+ if (copy_from_user(grarg + execlen, p, len))
53803+ goto log;
53804+
53805+ /* rewrite unprintable characters */
53806+ for (x = 0; x < len; x++) {
53807+ c = *(grarg + execlen + x);
53808+ if (c < 32 || c > 126)
53809+ *(grarg + execlen + x) = ' ';
53810+ }
53811+
53812+ execlen += len;
53813+ *(grarg + execlen) = ' ';
53814+ *(grarg + execlen + 1) = '\0';
53815+ execlen++;
53816+ }
53817+
53818+ log:
53819+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53820+ bprm->file->f_path.mnt, grarg);
53821+ mutex_unlock(&gr_exec_arg_mutex);
53822+#endif
53823+ return;
53824+}
53825+
53826+#ifdef CONFIG_GRKERNSEC
53827+extern int gr_acl_is_capable(const int cap);
53828+extern int gr_acl_is_capable_nolog(const int cap);
53829+extern int gr_chroot_is_capable(const int cap);
53830+extern int gr_chroot_is_capable_nolog(const int cap);
53831+#endif
53832+
53833+const char *captab_log[] = {
53834+ "CAP_CHOWN",
53835+ "CAP_DAC_OVERRIDE",
53836+ "CAP_DAC_READ_SEARCH",
53837+ "CAP_FOWNER",
53838+ "CAP_FSETID",
53839+ "CAP_KILL",
53840+ "CAP_SETGID",
53841+ "CAP_SETUID",
53842+ "CAP_SETPCAP",
53843+ "CAP_LINUX_IMMUTABLE",
53844+ "CAP_NET_BIND_SERVICE",
53845+ "CAP_NET_BROADCAST",
53846+ "CAP_NET_ADMIN",
53847+ "CAP_NET_RAW",
53848+ "CAP_IPC_LOCK",
53849+ "CAP_IPC_OWNER",
53850+ "CAP_SYS_MODULE",
53851+ "CAP_SYS_RAWIO",
53852+ "CAP_SYS_CHROOT",
53853+ "CAP_SYS_PTRACE",
53854+ "CAP_SYS_PACCT",
53855+ "CAP_SYS_ADMIN",
53856+ "CAP_SYS_BOOT",
53857+ "CAP_SYS_NICE",
53858+ "CAP_SYS_RESOURCE",
53859+ "CAP_SYS_TIME",
53860+ "CAP_SYS_TTY_CONFIG",
53861+ "CAP_MKNOD",
53862+ "CAP_LEASE",
53863+ "CAP_AUDIT_WRITE",
53864+ "CAP_AUDIT_CONTROL",
53865+ "CAP_SETFCAP",
53866+ "CAP_MAC_OVERRIDE",
53867+ "CAP_MAC_ADMIN",
53868+ "CAP_SYSLOG",
53869+ "CAP_WAKE_ALARM"
53870+};
53871+
53872+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
53873+
53874+int gr_is_capable(const int cap)
53875+{
53876+#ifdef CONFIG_GRKERNSEC
53877+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
53878+ return 1;
53879+ return 0;
53880+#else
53881+ return 1;
53882+#endif
53883+}
53884+
53885+int gr_is_capable_nolog(const int cap)
53886+{
53887+#ifdef CONFIG_GRKERNSEC
53888+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
53889+ return 1;
53890+ return 0;
53891+#else
53892+ return 1;
53893+#endif
53894+}
53895+
53896+EXPORT_SYMBOL(gr_is_capable);
53897+EXPORT_SYMBOL(gr_is_capable_nolog);
53898diff -urNp linux-3.1.1/grsecurity/grsec_fifo.c linux-3.1.1/grsecurity/grsec_fifo.c
53899--- linux-3.1.1/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
53900+++ linux-3.1.1/grsecurity/grsec_fifo.c 2011-11-16 18:40:31.000000000 -0500
53901@@ -0,0 +1,24 @@
53902+#include <linux/kernel.h>
53903+#include <linux/sched.h>
53904+#include <linux/fs.h>
53905+#include <linux/file.h>
53906+#include <linux/grinternal.h>
53907+
53908+int
53909+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
53910+ const struct dentry *dir, const int flag, const int acc_mode)
53911+{
53912+#ifdef CONFIG_GRKERNSEC_FIFO
53913+ const struct cred *cred = current_cred();
53914+
53915+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
53916+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
53917+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
53918+ (cred->fsuid != dentry->d_inode->i_uid)) {
53919+ if (!inode_permission(dentry->d_inode, acc_mode))
53920+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
53921+ return -EACCES;
53922+ }
53923+#endif
53924+ return 0;
53925+}
53926diff -urNp linux-3.1.1/grsecurity/grsec_fork.c linux-3.1.1/grsecurity/grsec_fork.c
53927--- linux-3.1.1/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
53928+++ linux-3.1.1/grsecurity/grsec_fork.c 2011-11-16 18:40:31.000000000 -0500
53929@@ -0,0 +1,23 @@
53930+#include <linux/kernel.h>
53931+#include <linux/sched.h>
53932+#include <linux/grsecurity.h>
53933+#include <linux/grinternal.h>
53934+#include <linux/errno.h>
53935+
53936+void
53937+gr_log_forkfail(const int retval)
53938+{
53939+#ifdef CONFIG_GRKERNSEC_FORKFAIL
53940+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
53941+ switch (retval) {
53942+ case -EAGAIN:
53943+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
53944+ break;
53945+ case -ENOMEM:
53946+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
53947+ break;
53948+ }
53949+ }
53950+#endif
53951+ return;
53952+}
53953diff -urNp linux-3.1.1/grsecurity/grsec_init.c linux-3.1.1/grsecurity/grsec_init.c
53954--- linux-3.1.1/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
53955+++ linux-3.1.1/grsecurity/grsec_init.c 2011-11-16 18:40:31.000000000 -0500
53956@@ -0,0 +1,269 @@
53957+#include <linux/kernel.h>
53958+#include <linux/sched.h>
53959+#include <linux/mm.h>
53960+#include <linux/gracl.h>
53961+#include <linux/slab.h>
53962+#include <linux/vmalloc.h>
53963+#include <linux/percpu.h>
53964+#include <linux/module.h>
53965+
53966+int grsec_enable_brute;
53967+int grsec_enable_link;
53968+int grsec_enable_dmesg;
53969+int grsec_enable_harden_ptrace;
53970+int grsec_enable_fifo;
53971+int grsec_enable_execlog;
53972+int grsec_enable_signal;
53973+int grsec_enable_forkfail;
53974+int grsec_enable_audit_ptrace;
53975+int grsec_enable_time;
53976+int grsec_enable_audit_textrel;
53977+int grsec_enable_group;
53978+int grsec_audit_gid;
53979+int grsec_enable_chdir;
53980+int grsec_enable_mount;
53981+int grsec_enable_rofs;
53982+int grsec_enable_chroot_findtask;
53983+int grsec_enable_chroot_mount;
53984+int grsec_enable_chroot_shmat;
53985+int grsec_enable_chroot_fchdir;
53986+int grsec_enable_chroot_double;
53987+int grsec_enable_chroot_pivot;
53988+int grsec_enable_chroot_chdir;
53989+int grsec_enable_chroot_chmod;
53990+int grsec_enable_chroot_mknod;
53991+int grsec_enable_chroot_nice;
53992+int grsec_enable_chroot_execlog;
53993+int grsec_enable_chroot_caps;
53994+int grsec_enable_chroot_sysctl;
53995+int grsec_enable_chroot_unix;
53996+int grsec_enable_tpe;
53997+int grsec_tpe_gid;
53998+int grsec_enable_blackhole;
53999+#ifdef CONFIG_IPV6_MODULE
54000+EXPORT_SYMBOL(grsec_enable_blackhole);
54001+#endif
54002+int grsec_lastack_retries;
54003+int grsec_enable_tpe_all;
54004+int grsec_enable_tpe_invert;
54005+int grsec_enable_socket_all;
54006+int grsec_socket_all_gid;
54007+int grsec_enable_socket_client;
54008+int grsec_socket_client_gid;
54009+int grsec_enable_socket_server;
54010+int grsec_socket_server_gid;
54011+int grsec_resource_logging;
54012+int grsec_disable_privio;
54013+int grsec_enable_log_rwxmaps;
54014+int grsec_lock;
54015+
54016+DEFINE_SPINLOCK(grsec_alert_lock);
54017+unsigned long grsec_alert_wtime = 0;
54018+unsigned long grsec_alert_fyet = 0;
54019+
54020+DEFINE_SPINLOCK(grsec_audit_lock);
54021+
54022+DEFINE_RWLOCK(grsec_exec_file_lock);
54023+
54024+char *gr_shared_page[4];
54025+
54026+char *gr_alert_log_fmt;
54027+char *gr_audit_log_fmt;
54028+char *gr_alert_log_buf;
54029+char *gr_audit_log_buf;
54030+
54031+extern struct gr_arg *gr_usermode;
54032+extern unsigned char *gr_system_salt;
54033+extern unsigned char *gr_system_sum;
54034+
54035+void __init
54036+grsecurity_init(void)
54037+{
54038+ int j;
54039+ /* create the per-cpu shared pages */
54040+
54041+#ifdef CONFIG_X86
54042+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
54043+#endif
54044+
54045+ for (j = 0; j < 4; j++) {
54046+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
54047+ if (gr_shared_page[j] == NULL) {
54048+ panic("Unable to allocate grsecurity shared page");
54049+ return;
54050+ }
54051+ }
54052+
54053+ /* allocate log buffers */
54054+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
54055+ if (!gr_alert_log_fmt) {
54056+ panic("Unable to allocate grsecurity alert log format buffer");
54057+ return;
54058+ }
54059+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
54060+ if (!gr_audit_log_fmt) {
54061+ panic("Unable to allocate grsecurity audit log format buffer");
54062+ return;
54063+ }
54064+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
54065+ if (!gr_alert_log_buf) {
54066+ panic("Unable to allocate grsecurity alert log buffer");
54067+ return;
54068+ }
54069+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
54070+ if (!gr_audit_log_buf) {
54071+ panic("Unable to allocate grsecurity audit log buffer");
54072+ return;
54073+ }
54074+
54075+ /* allocate memory for authentication structure */
54076+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
54077+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
54078+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
54079+
54080+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
54081+ panic("Unable to allocate grsecurity authentication structure");
54082+ return;
54083+ }
54084+
54085+
54086+#ifdef CONFIG_GRKERNSEC_IO
54087+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
54088+ grsec_disable_privio = 1;
54089+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
54090+ grsec_disable_privio = 1;
54091+#else
54092+ grsec_disable_privio = 0;
54093+#endif
54094+#endif
54095+
54096+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
54097+ /* for backward compatibility, tpe_invert always defaults to on if
54098+ enabled in the kernel
54099+ */
54100+ grsec_enable_tpe_invert = 1;
54101+#endif
54102+
54103+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
54104+#ifndef CONFIG_GRKERNSEC_SYSCTL
54105+ grsec_lock = 1;
54106+#endif
54107+
54108+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54109+ grsec_enable_audit_textrel = 1;
54110+#endif
54111+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54112+ grsec_enable_log_rwxmaps = 1;
54113+#endif
54114+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
54115+ grsec_enable_group = 1;
54116+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
54117+#endif
54118+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
54119+ grsec_enable_chdir = 1;
54120+#endif
54121+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54122+ grsec_enable_harden_ptrace = 1;
54123+#endif
54124+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54125+ grsec_enable_mount = 1;
54126+#endif
54127+#ifdef CONFIG_GRKERNSEC_LINK
54128+ grsec_enable_link = 1;
54129+#endif
54130+#ifdef CONFIG_GRKERNSEC_BRUTE
54131+ grsec_enable_brute = 1;
54132+#endif
54133+#ifdef CONFIG_GRKERNSEC_DMESG
54134+ grsec_enable_dmesg = 1;
54135+#endif
54136+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54137+ grsec_enable_blackhole = 1;
54138+ grsec_lastack_retries = 4;
54139+#endif
54140+#ifdef CONFIG_GRKERNSEC_FIFO
54141+ grsec_enable_fifo = 1;
54142+#endif
54143+#ifdef CONFIG_GRKERNSEC_EXECLOG
54144+ grsec_enable_execlog = 1;
54145+#endif
54146+#ifdef CONFIG_GRKERNSEC_SIGNAL
54147+ grsec_enable_signal = 1;
54148+#endif
54149+#ifdef CONFIG_GRKERNSEC_FORKFAIL
54150+ grsec_enable_forkfail = 1;
54151+#endif
54152+#ifdef CONFIG_GRKERNSEC_TIME
54153+ grsec_enable_time = 1;
54154+#endif
54155+#ifdef CONFIG_GRKERNSEC_RESLOG
54156+ grsec_resource_logging = 1;
54157+#endif
54158+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54159+ grsec_enable_chroot_findtask = 1;
54160+#endif
54161+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54162+ grsec_enable_chroot_unix = 1;
54163+#endif
54164+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54165+ grsec_enable_chroot_mount = 1;
54166+#endif
54167+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54168+ grsec_enable_chroot_fchdir = 1;
54169+#endif
54170+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54171+ grsec_enable_chroot_shmat = 1;
54172+#endif
54173+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54174+ grsec_enable_audit_ptrace = 1;
54175+#endif
54176+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54177+ grsec_enable_chroot_double = 1;
54178+#endif
54179+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54180+ grsec_enable_chroot_pivot = 1;
54181+#endif
54182+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54183+ grsec_enable_chroot_chdir = 1;
54184+#endif
54185+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54186+ grsec_enable_chroot_chmod = 1;
54187+#endif
54188+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54189+ grsec_enable_chroot_mknod = 1;
54190+#endif
54191+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54192+ grsec_enable_chroot_nice = 1;
54193+#endif
54194+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54195+ grsec_enable_chroot_execlog = 1;
54196+#endif
54197+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54198+ grsec_enable_chroot_caps = 1;
54199+#endif
54200+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54201+ grsec_enable_chroot_sysctl = 1;
54202+#endif
54203+#ifdef CONFIG_GRKERNSEC_TPE
54204+ grsec_enable_tpe = 1;
54205+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
54206+#ifdef CONFIG_GRKERNSEC_TPE_ALL
54207+ grsec_enable_tpe_all = 1;
54208+#endif
54209+#endif
54210+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
54211+ grsec_enable_socket_all = 1;
54212+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
54213+#endif
54214+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
54215+ grsec_enable_socket_client = 1;
54216+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
54217+#endif
54218+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54219+ grsec_enable_socket_server = 1;
54220+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
54221+#endif
54222+#endif
54223+
54224+ return;
54225+}
54226diff -urNp linux-3.1.1/grsecurity/grsec_link.c linux-3.1.1/grsecurity/grsec_link.c
54227--- linux-3.1.1/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
54228+++ linux-3.1.1/grsecurity/grsec_link.c 2011-11-16 18:40:31.000000000 -0500
54229@@ -0,0 +1,43 @@
54230+#include <linux/kernel.h>
54231+#include <linux/sched.h>
54232+#include <linux/fs.h>
54233+#include <linux/file.h>
54234+#include <linux/grinternal.h>
54235+
54236+int
54237+gr_handle_follow_link(const struct inode *parent,
54238+ const struct inode *inode,
54239+ const struct dentry *dentry, const struct vfsmount *mnt)
54240+{
54241+#ifdef CONFIG_GRKERNSEC_LINK
54242+ const struct cred *cred = current_cred();
54243+
54244+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
54245+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
54246+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
54247+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
54248+ return -EACCES;
54249+ }
54250+#endif
54251+ return 0;
54252+}
54253+
54254+int
54255+gr_handle_hardlink(const struct dentry *dentry,
54256+ const struct vfsmount *mnt,
54257+ struct inode *inode, const int mode, const char *to)
54258+{
54259+#ifdef CONFIG_GRKERNSEC_LINK
54260+ const struct cred *cred = current_cred();
54261+
54262+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
54263+ (!S_ISREG(mode) || (mode & S_ISUID) ||
54264+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
54265+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
54266+ !capable(CAP_FOWNER) && cred->uid) {
54267+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
54268+ return -EPERM;
54269+ }
54270+#endif
54271+ return 0;
54272+}
54273diff -urNp linux-3.1.1/grsecurity/grsec_log.c linux-3.1.1/grsecurity/grsec_log.c
54274--- linux-3.1.1/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
54275+++ linux-3.1.1/grsecurity/grsec_log.c 2011-11-16 18:40:31.000000000 -0500
54276@@ -0,0 +1,322 @@
54277+#include <linux/kernel.h>
54278+#include <linux/sched.h>
54279+#include <linux/file.h>
54280+#include <linux/tty.h>
54281+#include <linux/fs.h>
54282+#include <linux/grinternal.h>
54283+
54284+#ifdef CONFIG_TREE_PREEMPT_RCU
54285+#define DISABLE_PREEMPT() preempt_disable()
54286+#define ENABLE_PREEMPT() preempt_enable()
54287+#else
54288+#define DISABLE_PREEMPT()
54289+#define ENABLE_PREEMPT()
54290+#endif
54291+
54292+#define BEGIN_LOCKS(x) \
54293+ DISABLE_PREEMPT(); \
54294+ rcu_read_lock(); \
54295+ read_lock(&tasklist_lock); \
54296+ read_lock(&grsec_exec_file_lock); \
54297+ if (x != GR_DO_AUDIT) \
54298+ spin_lock(&grsec_alert_lock); \
54299+ else \
54300+ spin_lock(&grsec_audit_lock)
54301+
54302+#define END_LOCKS(x) \
54303+ if (x != GR_DO_AUDIT) \
54304+ spin_unlock(&grsec_alert_lock); \
54305+ else \
54306+ spin_unlock(&grsec_audit_lock); \
54307+ read_unlock(&grsec_exec_file_lock); \
54308+ read_unlock(&tasklist_lock); \
54309+ rcu_read_unlock(); \
54310+ ENABLE_PREEMPT(); \
54311+ if (x == GR_DONT_AUDIT) \
54312+ gr_handle_alertkill(current)
54313+
54314+enum {
54315+ FLOODING,
54316+ NO_FLOODING
54317+};
54318+
54319+extern char *gr_alert_log_fmt;
54320+extern char *gr_audit_log_fmt;
54321+extern char *gr_alert_log_buf;
54322+extern char *gr_audit_log_buf;
54323+
54324+static int gr_log_start(int audit)
54325+{
54326+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
54327+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
54328+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54329+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
54330+ unsigned long curr_secs = get_seconds();
54331+
54332+ if (audit == GR_DO_AUDIT)
54333+ goto set_fmt;
54334+
54335+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
54336+ grsec_alert_wtime = curr_secs;
54337+ grsec_alert_fyet = 0;
54338+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
54339+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
54340+ grsec_alert_fyet++;
54341+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
54342+ grsec_alert_wtime = curr_secs;
54343+ grsec_alert_fyet++;
54344+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
54345+ return FLOODING;
54346+ }
54347+ else return FLOODING;
54348+
54349+set_fmt:
54350+#endif
54351+ memset(buf, 0, PAGE_SIZE);
54352+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
54353+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
54354+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54355+ } else if (current->signal->curr_ip) {
54356+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
54357+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
54358+ } else if (gr_acl_is_enabled()) {
54359+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
54360+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54361+ } else {
54362+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
54363+ strcpy(buf, fmt);
54364+ }
54365+
54366+ return NO_FLOODING;
54367+}
54368+
54369+static void gr_log_middle(int audit, const char *msg, va_list ap)
54370+ __attribute__ ((format (printf, 2, 0)));
54371+
54372+static void gr_log_middle(int audit, const char *msg, va_list ap)
54373+{
54374+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54375+ unsigned int len = strlen(buf);
54376+
54377+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54378+
54379+ return;
54380+}
54381+
54382+static void gr_log_middle_varargs(int audit, const char *msg, ...)
54383+ __attribute__ ((format (printf, 2, 3)));
54384+
54385+static void gr_log_middle_varargs(int audit, const char *msg, ...)
54386+{
54387+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54388+ unsigned int len = strlen(buf);
54389+ va_list ap;
54390+
54391+ va_start(ap, msg);
54392+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54393+ va_end(ap);
54394+
54395+ return;
54396+}
54397+
54398+static void gr_log_end(int audit, int append_default)
54399+{
54400+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54401+
54402+ if (append_default) {
54403+ unsigned int len = strlen(buf);
54404+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
54405+ }
54406+
54407+ printk("%s\n", buf);
54408+
54409+ return;
54410+}
54411+
54412+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
54413+{
54414+ int logtype;
54415+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
54416+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
54417+ void *voidptr = NULL;
54418+ int num1 = 0, num2 = 0;
54419+ unsigned long ulong1 = 0, ulong2 = 0;
54420+ struct dentry *dentry = NULL;
54421+ struct vfsmount *mnt = NULL;
54422+ struct file *file = NULL;
54423+ struct task_struct *task = NULL;
54424+ const struct cred *cred, *pcred;
54425+ va_list ap;
54426+
54427+ BEGIN_LOCKS(audit);
54428+ logtype = gr_log_start(audit);
54429+ if (logtype == FLOODING) {
54430+ END_LOCKS(audit);
54431+ return;
54432+ }
54433+ va_start(ap, argtypes);
54434+ switch (argtypes) {
54435+ case GR_TTYSNIFF:
54436+ task = va_arg(ap, struct task_struct *);
54437+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
54438+ break;
54439+ case GR_SYSCTL_HIDDEN:
54440+ str1 = va_arg(ap, char *);
54441+ gr_log_middle_varargs(audit, msg, result, str1);
54442+ break;
54443+ case GR_RBAC:
54444+ dentry = va_arg(ap, struct dentry *);
54445+ mnt = va_arg(ap, struct vfsmount *);
54446+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
54447+ break;
54448+ case GR_RBAC_STR:
54449+ dentry = va_arg(ap, struct dentry *);
54450+ mnt = va_arg(ap, struct vfsmount *);
54451+ str1 = va_arg(ap, char *);
54452+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
54453+ break;
54454+ case GR_STR_RBAC:
54455+ str1 = va_arg(ap, char *);
54456+ dentry = va_arg(ap, struct dentry *);
54457+ mnt = va_arg(ap, struct vfsmount *);
54458+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
54459+ break;
54460+ case GR_RBAC_MODE2:
54461+ dentry = va_arg(ap, struct dentry *);
54462+ mnt = va_arg(ap, struct vfsmount *);
54463+ str1 = va_arg(ap, char *);
54464+ str2 = va_arg(ap, char *);
54465+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
54466+ break;
54467+ case GR_RBAC_MODE3:
54468+ dentry = va_arg(ap, struct dentry *);
54469+ mnt = va_arg(ap, struct vfsmount *);
54470+ str1 = va_arg(ap, char *);
54471+ str2 = va_arg(ap, char *);
54472+ str3 = va_arg(ap, char *);
54473+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
54474+ break;
54475+ case GR_FILENAME:
54476+ dentry = va_arg(ap, struct dentry *);
54477+ mnt = va_arg(ap, struct vfsmount *);
54478+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
54479+ break;
54480+ case GR_STR_FILENAME:
54481+ str1 = va_arg(ap, char *);
54482+ dentry = va_arg(ap, struct dentry *);
54483+ mnt = va_arg(ap, struct vfsmount *);
54484+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
54485+ break;
54486+ case GR_FILENAME_STR:
54487+ dentry = va_arg(ap, struct dentry *);
54488+ mnt = va_arg(ap, struct vfsmount *);
54489+ str1 = va_arg(ap, char *);
54490+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
54491+ break;
54492+ case GR_FILENAME_TWO_INT:
54493+ dentry = va_arg(ap, struct dentry *);
54494+ mnt = va_arg(ap, struct vfsmount *);
54495+ num1 = va_arg(ap, int);
54496+ num2 = va_arg(ap, int);
54497+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
54498+ break;
54499+ case GR_FILENAME_TWO_INT_STR:
54500+ dentry = va_arg(ap, struct dentry *);
54501+ mnt = va_arg(ap, struct vfsmount *);
54502+ num1 = va_arg(ap, int);
54503+ num2 = va_arg(ap, int);
54504+ str1 = va_arg(ap, char *);
54505+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
54506+ break;
54507+ case GR_TEXTREL:
54508+ file = va_arg(ap, struct file *);
54509+ ulong1 = va_arg(ap, unsigned long);
54510+ ulong2 = va_arg(ap, unsigned long);
54511+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
54512+ break;
54513+ case GR_PTRACE:
54514+ task = va_arg(ap, struct task_struct *);
54515+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
54516+ break;
54517+ case GR_RESOURCE:
54518+ task = va_arg(ap, struct task_struct *);
54519+ cred = __task_cred(task);
54520+ pcred = __task_cred(task->real_parent);
54521+ ulong1 = va_arg(ap, unsigned long);
54522+ str1 = va_arg(ap, char *);
54523+ ulong2 = va_arg(ap, unsigned long);
54524+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54525+ break;
54526+ case GR_CAP:
54527+ task = va_arg(ap, struct task_struct *);
54528+ cred = __task_cred(task);
54529+ pcred = __task_cred(task->real_parent);
54530+ str1 = va_arg(ap, char *);
54531+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54532+ break;
54533+ case GR_SIG:
54534+ str1 = va_arg(ap, char *);
54535+ voidptr = va_arg(ap, void *);
54536+ gr_log_middle_varargs(audit, msg, str1, voidptr);
54537+ break;
54538+ case GR_SIG2:
54539+ task = va_arg(ap, struct task_struct *);
54540+ cred = __task_cred(task);
54541+ pcred = __task_cred(task->real_parent);
54542+ num1 = va_arg(ap, int);
54543+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54544+ break;
54545+ case GR_CRASH1:
54546+ task = va_arg(ap, struct task_struct *);
54547+ cred = __task_cred(task);
54548+ pcred = __task_cred(task->real_parent);
54549+ ulong1 = va_arg(ap, unsigned long);
54550+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
54551+ break;
54552+ case GR_CRASH2:
54553+ task = va_arg(ap, struct task_struct *);
54554+ cred = __task_cred(task);
54555+ pcred = __task_cred(task->real_parent);
54556+ ulong1 = va_arg(ap, unsigned long);
54557+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
54558+ break;
54559+ case GR_RWXMAP:
54560+ file = va_arg(ap, struct file *);
54561+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
54562+ break;
54563+ case GR_PSACCT:
54564+ {
54565+ unsigned int wday, cday;
54566+ __u8 whr, chr;
54567+ __u8 wmin, cmin;
54568+ __u8 wsec, csec;
54569+ char cur_tty[64] = { 0 };
54570+ char parent_tty[64] = { 0 };
54571+
54572+ task = va_arg(ap, struct task_struct *);
54573+ wday = va_arg(ap, unsigned int);
54574+ cday = va_arg(ap, unsigned int);
54575+ whr = va_arg(ap, int);
54576+ chr = va_arg(ap, int);
54577+ wmin = va_arg(ap, int);
54578+ cmin = va_arg(ap, int);
54579+ wsec = va_arg(ap, int);
54580+ csec = va_arg(ap, int);
54581+ ulong1 = va_arg(ap, unsigned long);
54582+ cred = __task_cred(task);
54583+ pcred = __task_cred(task->real_parent);
54584+
54585+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54586+ }
54587+ break;
54588+ default:
54589+ gr_log_middle(audit, msg, ap);
54590+ }
54591+ va_end(ap);
54592+ // these don't need DEFAULTSECARGS printed on the end
54593+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
54594+ gr_log_end(audit, 0);
54595+ else
54596+ gr_log_end(audit, 1);
54597+ END_LOCKS(audit);
54598+}
54599diff -urNp linux-3.1.1/grsecurity/grsec_mem.c linux-3.1.1/grsecurity/grsec_mem.c
54600--- linux-3.1.1/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
54601+++ linux-3.1.1/grsecurity/grsec_mem.c 2011-11-16 18:40:31.000000000 -0500
54602@@ -0,0 +1,33 @@
54603+#include <linux/kernel.h>
54604+#include <linux/sched.h>
54605+#include <linux/mm.h>
54606+#include <linux/mman.h>
54607+#include <linux/grinternal.h>
54608+
54609+void
54610+gr_handle_ioperm(void)
54611+{
54612+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
54613+ return;
54614+}
54615+
54616+void
54617+gr_handle_iopl(void)
54618+{
54619+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
54620+ return;
54621+}
54622+
54623+void
54624+gr_handle_mem_readwrite(u64 from, u64 to)
54625+{
54626+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
54627+ return;
54628+}
54629+
54630+void
54631+gr_handle_vm86(void)
54632+{
54633+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
54634+ return;
54635+}
54636diff -urNp linux-3.1.1/grsecurity/grsec_mount.c linux-3.1.1/grsecurity/grsec_mount.c
54637--- linux-3.1.1/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
54638+++ linux-3.1.1/grsecurity/grsec_mount.c 2011-11-16 18:40:31.000000000 -0500
54639@@ -0,0 +1,62 @@
54640+#include <linux/kernel.h>
54641+#include <linux/sched.h>
54642+#include <linux/mount.h>
54643+#include <linux/grsecurity.h>
54644+#include <linux/grinternal.h>
54645+
54646+void
54647+gr_log_remount(const char *devname, const int retval)
54648+{
54649+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54650+ if (grsec_enable_mount && (retval >= 0))
54651+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
54652+#endif
54653+ return;
54654+}
54655+
54656+void
54657+gr_log_unmount(const char *devname, const int retval)
54658+{
54659+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54660+ if (grsec_enable_mount && (retval >= 0))
54661+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
54662+#endif
54663+ return;
54664+}
54665+
54666+void
54667+gr_log_mount(const char *from, const char *to, const int retval)
54668+{
54669+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54670+ if (grsec_enable_mount && (retval >= 0))
54671+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
54672+#endif
54673+ return;
54674+}
54675+
54676+int
54677+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
54678+{
54679+#ifdef CONFIG_GRKERNSEC_ROFS
54680+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
54681+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
54682+ return -EPERM;
54683+ } else
54684+ return 0;
54685+#endif
54686+ return 0;
54687+}
54688+
54689+int
54690+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
54691+{
54692+#ifdef CONFIG_GRKERNSEC_ROFS
54693+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
54694+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
54695+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
54696+ return -EPERM;
54697+ } else
54698+ return 0;
54699+#endif
54700+ return 0;
54701+}
54702diff -urNp linux-3.1.1/grsecurity/grsec_pax.c linux-3.1.1/grsecurity/grsec_pax.c
54703--- linux-3.1.1/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
54704+++ linux-3.1.1/grsecurity/grsec_pax.c 2011-11-16 18:40:31.000000000 -0500
54705@@ -0,0 +1,36 @@
54706+#include <linux/kernel.h>
54707+#include <linux/sched.h>
54708+#include <linux/mm.h>
54709+#include <linux/file.h>
54710+#include <linux/grinternal.h>
54711+#include <linux/grsecurity.h>
54712+
54713+void
54714+gr_log_textrel(struct vm_area_struct * vma)
54715+{
54716+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54717+ if (grsec_enable_audit_textrel)
54718+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
54719+#endif
54720+ return;
54721+}
54722+
54723+void
54724+gr_log_rwxmmap(struct file *file)
54725+{
54726+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54727+ if (grsec_enable_log_rwxmaps)
54728+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
54729+#endif
54730+ return;
54731+}
54732+
54733+void
54734+gr_log_rwxmprotect(struct file *file)
54735+{
54736+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54737+ if (grsec_enable_log_rwxmaps)
54738+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
54739+#endif
54740+ return;
54741+}
54742diff -urNp linux-3.1.1/grsecurity/grsec_ptrace.c linux-3.1.1/grsecurity/grsec_ptrace.c
54743--- linux-3.1.1/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
54744+++ linux-3.1.1/grsecurity/grsec_ptrace.c 2011-11-16 18:40:31.000000000 -0500
54745@@ -0,0 +1,14 @@
54746+#include <linux/kernel.h>
54747+#include <linux/sched.h>
54748+#include <linux/grinternal.h>
54749+#include <linux/grsecurity.h>
54750+
54751+void
54752+gr_audit_ptrace(struct task_struct *task)
54753+{
54754+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54755+ if (grsec_enable_audit_ptrace)
54756+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
54757+#endif
54758+ return;
54759+}
54760diff -urNp linux-3.1.1/grsecurity/grsec_sig.c linux-3.1.1/grsecurity/grsec_sig.c
54761--- linux-3.1.1/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
54762+++ linux-3.1.1/grsecurity/grsec_sig.c 2011-11-16 18:40:31.000000000 -0500
54763@@ -0,0 +1,206 @@
54764+#include <linux/kernel.h>
54765+#include <linux/sched.h>
54766+#include <linux/delay.h>
54767+#include <linux/grsecurity.h>
54768+#include <linux/grinternal.h>
54769+#include <linux/hardirq.h>
54770+
54771+char *signames[] = {
54772+ [SIGSEGV] = "Segmentation fault",
54773+ [SIGILL] = "Illegal instruction",
54774+ [SIGABRT] = "Abort",
54775+ [SIGBUS] = "Invalid alignment/Bus error"
54776+};
54777+
54778+void
54779+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
54780+{
54781+#ifdef CONFIG_GRKERNSEC_SIGNAL
54782+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
54783+ (sig == SIGABRT) || (sig == SIGBUS))) {
54784+ if (t->pid == current->pid) {
54785+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
54786+ } else {
54787+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
54788+ }
54789+ }
54790+#endif
54791+ return;
54792+}
54793+
54794+int
54795+gr_handle_signal(const struct task_struct *p, const int sig)
54796+{
54797+#ifdef CONFIG_GRKERNSEC
54798+ if (current->pid > 1 && gr_check_protected_task(p)) {
54799+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
54800+ return -EPERM;
54801+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
54802+ return -EPERM;
54803+ }
54804+#endif
54805+ return 0;
54806+}
54807+
54808+#ifdef CONFIG_GRKERNSEC
54809+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
54810+
54811+int gr_fake_force_sig(int sig, struct task_struct *t)
54812+{
54813+ unsigned long int flags;
54814+ int ret, blocked, ignored;
54815+ struct k_sigaction *action;
54816+
54817+ spin_lock_irqsave(&t->sighand->siglock, flags);
54818+ action = &t->sighand->action[sig-1];
54819+ ignored = action->sa.sa_handler == SIG_IGN;
54820+ blocked = sigismember(&t->blocked, sig);
54821+ if (blocked || ignored) {
54822+ action->sa.sa_handler = SIG_DFL;
54823+ if (blocked) {
54824+ sigdelset(&t->blocked, sig);
54825+ recalc_sigpending_and_wake(t);
54826+ }
54827+ }
54828+ if (action->sa.sa_handler == SIG_DFL)
54829+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
54830+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
54831+
54832+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
54833+
54834+ return ret;
54835+}
54836+#endif
54837+
54838+#ifdef CONFIG_GRKERNSEC_BRUTE
54839+#define GR_USER_BAN_TIME (15 * 60)
54840+
54841+static int __get_dumpable(unsigned long mm_flags)
54842+{
54843+ int ret;
54844+
54845+ ret = mm_flags & MMF_DUMPABLE_MASK;
54846+ return (ret >= 2) ? 2 : ret;
54847+}
54848+#endif
54849+
54850+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
54851+{
54852+#ifdef CONFIG_GRKERNSEC_BRUTE
54853+ uid_t uid = 0;
54854+
54855+ if (!grsec_enable_brute)
54856+ return;
54857+
54858+ rcu_read_lock();
54859+ read_lock(&tasklist_lock);
54860+ read_lock(&grsec_exec_file_lock);
54861+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
54862+ p->real_parent->brute = 1;
54863+ else {
54864+ const struct cred *cred = __task_cred(p), *cred2;
54865+ struct task_struct *tsk, *tsk2;
54866+
54867+ if (!__get_dumpable(mm_flags) && cred->uid) {
54868+ struct user_struct *user;
54869+
54870+ uid = cred->uid;
54871+
54872+ /* this is put upon execution past expiration */
54873+ user = find_user(uid);
54874+ if (user == NULL)
54875+ goto unlock;
54876+ user->banned = 1;
54877+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
54878+ if (user->ban_expires == ~0UL)
54879+ user->ban_expires--;
54880+
54881+ do_each_thread(tsk2, tsk) {
54882+ cred2 = __task_cred(tsk);
54883+ if (tsk != p && cred2->uid == uid)
54884+ gr_fake_force_sig(SIGKILL, tsk);
54885+ } while_each_thread(tsk2, tsk);
54886+ }
54887+ }
54888+unlock:
54889+ read_unlock(&grsec_exec_file_lock);
54890+ read_unlock(&tasklist_lock);
54891+ rcu_read_unlock();
54892+
54893+ if (uid)
54894+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
54895+
54896+#endif
54897+ return;
54898+}
54899+
54900+void gr_handle_brute_check(void)
54901+{
54902+#ifdef CONFIG_GRKERNSEC_BRUTE
54903+ if (current->brute)
54904+ msleep(30 * 1000);
54905+#endif
54906+ return;
54907+}
54908+
54909+void gr_handle_kernel_exploit(void)
54910+{
54911+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
54912+ const struct cred *cred;
54913+ struct task_struct *tsk, *tsk2;
54914+ struct user_struct *user;
54915+ uid_t uid;
54916+
54917+ if (in_irq() || in_serving_softirq() || in_nmi())
54918+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
54919+
54920+ uid = current_uid();
54921+
54922+ if (uid == 0)
54923+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
54924+ else {
54925+ /* kill all the processes of this user, hold a reference
54926+ to their creds struct, and prevent them from creating
54927+ another process until system reset
54928+ */
54929+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
54930+ /* we intentionally leak this ref */
54931+ user = get_uid(current->cred->user);
54932+ if (user) {
54933+ user->banned = 1;
54934+ user->ban_expires = ~0UL;
54935+ }
54936+
54937+ read_lock(&tasklist_lock);
54938+ do_each_thread(tsk2, tsk) {
54939+ cred = __task_cred(tsk);
54940+ if (cred->uid == uid)
54941+ gr_fake_force_sig(SIGKILL, tsk);
54942+ } while_each_thread(tsk2, tsk);
54943+ read_unlock(&tasklist_lock);
54944+ }
54945+#endif
54946+}
54947+
54948+int __gr_process_user_ban(struct user_struct *user)
54949+{
54950+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54951+ if (unlikely(user->banned)) {
54952+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
54953+ user->banned = 0;
54954+ user->ban_expires = 0;
54955+ free_uid(user);
54956+ } else
54957+ return -EPERM;
54958+ }
54959+#endif
54960+ return 0;
54961+}
54962+
54963+int gr_process_user_ban(void)
54964+{
54965+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54966+ return __gr_process_user_ban(current->cred->user);
54967+#endif
54968+ return 0;
54969+}
54970diff -urNp linux-3.1.1/grsecurity/grsec_sock.c linux-3.1.1/grsecurity/grsec_sock.c
54971--- linux-3.1.1/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
54972+++ linux-3.1.1/grsecurity/grsec_sock.c 2011-11-16 18:40:31.000000000 -0500
54973@@ -0,0 +1,244 @@
54974+#include <linux/kernel.h>
54975+#include <linux/module.h>
54976+#include <linux/sched.h>
54977+#include <linux/file.h>
54978+#include <linux/net.h>
54979+#include <linux/in.h>
54980+#include <linux/ip.h>
54981+#include <net/sock.h>
54982+#include <net/inet_sock.h>
54983+#include <linux/grsecurity.h>
54984+#include <linux/grinternal.h>
54985+#include <linux/gracl.h>
54986+
54987+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
54988+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
54989+
54990+EXPORT_SYMBOL(gr_search_udp_recvmsg);
54991+EXPORT_SYMBOL(gr_search_udp_sendmsg);
54992+
54993+#ifdef CONFIG_UNIX_MODULE
54994+EXPORT_SYMBOL(gr_acl_handle_unix);
54995+EXPORT_SYMBOL(gr_acl_handle_mknod);
54996+EXPORT_SYMBOL(gr_handle_chroot_unix);
54997+EXPORT_SYMBOL(gr_handle_create);
54998+#endif
54999+
55000+#ifdef CONFIG_GRKERNSEC
55001+#define gr_conn_table_size 32749
55002+struct conn_table_entry {
55003+ struct conn_table_entry *next;
55004+ struct signal_struct *sig;
55005+};
55006+
55007+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
55008+DEFINE_SPINLOCK(gr_conn_table_lock);
55009+
55010+extern const char * gr_socktype_to_name(unsigned char type);
55011+extern const char * gr_proto_to_name(unsigned char proto);
55012+extern const char * gr_sockfamily_to_name(unsigned char family);
55013+
55014+static __inline__ int
55015+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
55016+{
55017+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
55018+}
55019+
55020+static __inline__ int
55021+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
55022+ __u16 sport, __u16 dport)
55023+{
55024+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
55025+ sig->gr_sport == sport && sig->gr_dport == dport))
55026+ return 1;
55027+ else
55028+ return 0;
55029+}
55030+
55031+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
55032+{
55033+ struct conn_table_entry **match;
55034+ unsigned int index;
55035+
55036+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
55037+ sig->gr_sport, sig->gr_dport,
55038+ gr_conn_table_size);
55039+
55040+ newent->sig = sig;
55041+
55042+ match = &gr_conn_table[index];
55043+ newent->next = *match;
55044+ *match = newent;
55045+
55046+ return;
55047+}
55048+
55049+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
55050+{
55051+ struct conn_table_entry *match, *last = NULL;
55052+ unsigned int index;
55053+
55054+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
55055+ sig->gr_sport, sig->gr_dport,
55056+ gr_conn_table_size);
55057+
55058+ match = gr_conn_table[index];
55059+ while (match && !conn_match(match->sig,
55060+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
55061+ sig->gr_dport)) {
55062+ last = match;
55063+ match = match->next;
55064+ }
55065+
55066+ if (match) {
55067+ if (last)
55068+ last->next = match->next;
55069+ else
55070+ gr_conn_table[index] = NULL;
55071+ kfree(match);
55072+ }
55073+
55074+ return;
55075+}
55076+
55077+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
55078+ __u16 sport, __u16 dport)
55079+{
55080+ struct conn_table_entry *match;
55081+ unsigned int index;
55082+
55083+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
55084+
55085+ match = gr_conn_table[index];
55086+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
55087+ match = match->next;
55088+
55089+ if (match)
55090+ return match->sig;
55091+ else
55092+ return NULL;
55093+}
55094+
55095+#endif
55096+
55097+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
55098+{
55099+#ifdef CONFIG_GRKERNSEC
55100+ struct signal_struct *sig = task->signal;
55101+ struct conn_table_entry *newent;
55102+
55103+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
55104+ if (newent == NULL)
55105+ return;
55106+ /* no bh lock needed since we are called with bh disabled */
55107+ spin_lock(&gr_conn_table_lock);
55108+ gr_del_task_from_ip_table_nolock(sig);
55109+ sig->gr_saddr = inet->inet_rcv_saddr;
55110+ sig->gr_daddr = inet->inet_daddr;
55111+ sig->gr_sport = inet->inet_sport;
55112+ sig->gr_dport = inet->inet_dport;
55113+ gr_add_to_task_ip_table_nolock(sig, newent);
55114+ spin_unlock(&gr_conn_table_lock);
55115+#endif
55116+ return;
55117+}
55118+
55119+void gr_del_task_from_ip_table(struct task_struct *task)
55120+{
55121+#ifdef CONFIG_GRKERNSEC
55122+ spin_lock_bh(&gr_conn_table_lock);
55123+ gr_del_task_from_ip_table_nolock(task->signal);
55124+ spin_unlock_bh(&gr_conn_table_lock);
55125+#endif
55126+ return;
55127+}
55128+
55129+void
55130+gr_attach_curr_ip(const struct sock *sk)
55131+{
55132+#ifdef CONFIG_GRKERNSEC
55133+ struct signal_struct *p, *set;
55134+ const struct inet_sock *inet = inet_sk(sk);
55135+
55136+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
55137+ return;
55138+
55139+ set = current->signal;
55140+
55141+ spin_lock_bh(&gr_conn_table_lock);
55142+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
55143+ inet->inet_dport, inet->inet_sport);
55144+ if (unlikely(p != NULL)) {
55145+ set->curr_ip = p->curr_ip;
55146+ set->used_accept = 1;
55147+ gr_del_task_from_ip_table_nolock(p);
55148+ spin_unlock_bh(&gr_conn_table_lock);
55149+ return;
55150+ }
55151+ spin_unlock_bh(&gr_conn_table_lock);
55152+
55153+ set->curr_ip = inet->inet_daddr;
55154+ set->used_accept = 1;
55155+#endif
55156+ return;
55157+}
55158+
55159+int
55160+gr_handle_sock_all(const int family, const int type, const int protocol)
55161+{
55162+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55163+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
55164+ (family != AF_UNIX)) {
55165+ if (family == AF_INET)
55166+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
55167+ else
55168+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
55169+ return -EACCES;
55170+ }
55171+#endif
55172+ return 0;
55173+}
55174+
55175+int
55176+gr_handle_sock_server(const struct sockaddr *sck)
55177+{
55178+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55179+ if (grsec_enable_socket_server &&
55180+ in_group_p(grsec_socket_server_gid) &&
55181+ sck && (sck->sa_family != AF_UNIX) &&
55182+ (sck->sa_family != AF_LOCAL)) {
55183+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
55184+ return -EACCES;
55185+ }
55186+#endif
55187+ return 0;
55188+}
55189+
55190+int
55191+gr_handle_sock_server_other(const struct sock *sck)
55192+{
55193+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55194+ if (grsec_enable_socket_server &&
55195+ in_group_p(grsec_socket_server_gid) &&
55196+ sck && (sck->sk_family != AF_UNIX) &&
55197+ (sck->sk_family != AF_LOCAL)) {
55198+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
55199+ return -EACCES;
55200+ }
55201+#endif
55202+ return 0;
55203+}
55204+
55205+int
55206+gr_handle_sock_client(const struct sockaddr *sck)
55207+{
55208+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55209+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
55210+ sck && (sck->sa_family != AF_UNIX) &&
55211+ (sck->sa_family != AF_LOCAL)) {
55212+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
55213+ return -EACCES;
55214+ }
55215+#endif
55216+ return 0;
55217+}
55218diff -urNp linux-3.1.1/grsecurity/grsec_sysctl.c linux-3.1.1/grsecurity/grsec_sysctl.c
55219--- linux-3.1.1/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
55220+++ linux-3.1.1/grsecurity/grsec_sysctl.c 2011-11-16 18:40:31.000000000 -0500
55221@@ -0,0 +1,433 @@
55222+#include <linux/kernel.h>
55223+#include <linux/sched.h>
55224+#include <linux/sysctl.h>
55225+#include <linux/grsecurity.h>
55226+#include <linux/grinternal.h>
55227+
55228+int
55229+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
55230+{
55231+#ifdef CONFIG_GRKERNSEC_SYSCTL
55232+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
55233+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
55234+ return -EACCES;
55235+ }
55236+#endif
55237+ return 0;
55238+}
55239+
55240+#ifdef CONFIG_GRKERNSEC_ROFS
55241+static int __maybe_unused one = 1;
55242+#endif
55243+
55244+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
55245+struct ctl_table grsecurity_table[] = {
55246+#ifdef CONFIG_GRKERNSEC_SYSCTL
55247+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
55248+#ifdef CONFIG_GRKERNSEC_IO
55249+ {
55250+ .procname = "disable_priv_io",
55251+ .data = &grsec_disable_privio,
55252+ .maxlen = sizeof(int),
55253+ .mode = 0600,
55254+ .proc_handler = &proc_dointvec,
55255+ },
55256+#endif
55257+#endif
55258+#ifdef CONFIG_GRKERNSEC_LINK
55259+ {
55260+ .procname = "linking_restrictions",
55261+ .data = &grsec_enable_link,
55262+ .maxlen = sizeof(int),
55263+ .mode = 0600,
55264+ .proc_handler = &proc_dointvec,
55265+ },
55266+#endif
55267+#ifdef CONFIG_GRKERNSEC_BRUTE
55268+ {
55269+ .procname = "deter_bruteforce",
55270+ .data = &grsec_enable_brute,
55271+ .maxlen = sizeof(int),
55272+ .mode = 0600,
55273+ .proc_handler = &proc_dointvec,
55274+ },
55275+#endif
55276+#ifdef CONFIG_GRKERNSEC_FIFO
55277+ {
55278+ .procname = "fifo_restrictions",
55279+ .data = &grsec_enable_fifo,
55280+ .maxlen = sizeof(int),
55281+ .mode = 0600,
55282+ .proc_handler = &proc_dointvec,
55283+ },
55284+#endif
55285+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
55286+ {
55287+ .procname = "ip_blackhole",
55288+ .data = &grsec_enable_blackhole,
55289+ .maxlen = sizeof(int),
55290+ .mode = 0600,
55291+ .proc_handler = &proc_dointvec,
55292+ },
55293+ {
55294+ .procname = "lastack_retries",
55295+ .data = &grsec_lastack_retries,
55296+ .maxlen = sizeof(int),
55297+ .mode = 0600,
55298+ .proc_handler = &proc_dointvec,
55299+ },
55300+#endif
55301+#ifdef CONFIG_GRKERNSEC_EXECLOG
55302+ {
55303+ .procname = "exec_logging",
55304+ .data = &grsec_enable_execlog,
55305+ .maxlen = sizeof(int),
55306+ .mode = 0600,
55307+ .proc_handler = &proc_dointvec,
55308+ },
55309+#endif
55310+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55311+ {
55312+ .procname = "rwxmap_logging",
55313+ .data = &grsec_enable_log_rwxmaps,
55314+ .maxlen = sizeof(int),
55315+ .mode = 0600,
55316+ .proc_handler = &proc_dointvec,
55317+ },
55318+#endif
55319+#ifdef CONFIG_GRKERNSEC_SIGNAL
55320+ {
55321+ .procname = "signal_logging",
55322+ .data = &grsec_enable_signal,
55323+ .maxlen = sizeof(int),
55324+ .mode = 0600,
55325+ .proc_handler = &proc_dointvec,
55326+ },
55327+#endif
55328+#ifdef CONFIG_GRKERNSEC_FORKFAIL
55329+ {
55330+ .procname = "forkfail_logging",
55331+ .data = &grsec_enable_forkfail,
55332+ .maxlen = sizeof(int),
55333+ .mode = 0600,
55334+ .proc_handler = &proc_dointvec,
55335+ },
55336+#endif
55337+#ifdef CONFIG_GRKERNSEC_TIME
55338+ {
55339+ .procname = "timechange_logging",
55340+ .data = &grsec_enable_time,
55341+ .maxlen = sizeof(int),
55342+ .mode = 0600,
55343+ .proc_handler = &proc_dointvec,
55344+ },
55345+#endif
55346+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55347+ {
55348+ .procname = "chroot_deny_shmat",
55349+ .data = &grsec_enable_chroot_shmat,
55350+ .maxlen = sizeof(int),
55351+ .mode = 0600,
55352+ .proc_handler = &proc_dointvec,
55353+ },
55354+#endif
55355+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55356+ {
55357+ .procname = "chroot_deny_unix",
55358+ .data = &grsec_enable_chroot_unix,
55359+ .maxlen = sizeof(int),
55360+ .mode = 0600,
55361+ .proc_handler = &proc_dointvec,
55362+ },
55363+#endif
55364+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55365+ {
55366+ .procname = "chroot_deny_mount",
55367+ .data = &grsec_enable_chroot_mount,
55368+ .maxlen = sizeof(int),
55369+ .mode = 0600,
55370+ .proc_handler = &proc_dointvec,
55371+ },
55372+#endif
55373+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55374+ {
55375+ .procname = "chroot_deny_fchdir",
55376+ .data = &grsec_enable_chroot_fchdir,
55377+ .maxlen = sizeof(int),
55378+ .mode = 0600,
55379+ .proc_handler = &proc_dointvec,
55380+ },
55381+#endif
55382+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55383+ {
55384+ .procname = "chroot_deny_chroot",
55385+ .data = &grsec_enable_chroot_double,
55386+ .maxlen = sizeof(int),
55387+ .mode = 0600,
55388+ .proc_handler = &proc_dointvec,
55389+ },
55390+#endif
55391+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55392+ {
55393+ .procname = "chroot_deny_pivot",
55394+ .data = &grsec_enable_chroot_pivot,
55395+ .maxlen = sizeof(int),
55396+ .mode = 0600,
55397+ .proc_handler = &proc_dointvec,
55398+ },
55399+#endif
55400+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55401+ {
55402+ .procname = "chroot_enforce_chdir",
55403+ .data = &grsec_enable_chroot_chdir,
55404+ .maxlen = sizeof(int),
55405+ .mode = 0600,
55406+ .proc_handler = &proc_dointvec,
55407+ },
55408+#endif
55409+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55410+ {
55411+ .procname = "chroot_deny_chmod",
55412+ .data = &grsec_enable_chroot_chmod,
55413+ .maxlen = sizeof(int),
55414+ .mode = 0600,
55415+ .proc_handler = &proc_dointvec,
55416+ },
55417+#endif
55418+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55419+ {
55420+ .procname = "chroot_deny_mknod",
55421+ .data = &grsec_enable_chroot_mknod,
55422+ .maxlen = sizeof(int),
55423+ .mode = 0600,
55424+ .proc_handler = &proc_dointvec,
55425+ },
55426+#endif
55427+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55428+ {
55429+ .procname = "chroot_restrict_nice",
55430+ .data = &grsec_enable_chroot_nice,
55431+ .maxlen = sizeof(int),
55432+ .mode = 0600,
55433+ .proc_handler = &proc_dointvec,
55434+ },
55435+#endif
55436+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55437+ {
55438+ .procname = "chroot_execlog",
55439+ .data = &grsec_enable_chroot_execlog,
55440+ .maxlen = sizeof(int),
55441+ .mode = 0600,
55442+ .proc_handler = &proc_dointvec,
55443+ },
55444+#endif
55445+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55446+ {
55447+ .procname = "chroot_caps",
55448+ .data = &grsec_enable_chroot_caps,
55449+ .maxlen = sizeof(int),
55450+ .mode = 0600,
55451+ .proc_handler = &proc_dointvec,
55452+ },
55453+#endif
55454+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55455+ {
55456+ .procname = "chroot_deny_sysctl",
55457+ .data = &grsec_enable_chroot_sysctl,
55458+ .maxlen = sizeof(int),
55459+ .mode = 0600,
55460+ .proc_handler = &proc_dointvec,
55461+ },
55462+#endif
55463+#ifdef CONFIG_GRKERNSEC_TPE
55464+ {
55465+ .procname = "tpe",
55466+ .data = &grsec_enable_tpe,
55467+ .maxlen = sizeof(int),
55468+ .mode = 0600,
55469+ .proc_handler = &proc_dointvec,
55470+ },
55471+ {
55472+ .procname = "tpe_gid",
55473+ .data = &grsec_tpe_gid,
55474+ .maxlen = sizeof(int),
55475+ .mode = 0600,
55476+ .proc_handler = &proc_dointvec,
55477+ },
55478+#endif
55479+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55480+ {
55481+ .procname = "tpe_invert",
55482+ .data = &grsec_enable_tpe_invert,
55483+ .maxlen = sizeof(int),
55484+ .mode = 0600,
55485+ .proc_handler = &proc_dointvec,
55486+ },
55487+#endif
55488+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55489+ {
55490+ .procname = "tpe_restrict_all",
55491+ .data = &grsec_enable_tpe_all,
55492+ .maxlen = sizeof(int),
55493+ .mode = 0600,
55494+ .proc_handler = &proc_dointvec,
55495+ },
55496+#endif
55497+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55498+ {
55499+ .procname = "socket_all",
55500+ .data = &grsec_enable_socket_all,
55501+ .maxlen = sizeof(int),
55502+ .mode = 0600,
55503+ .proc_handler = &proc_dointvec,
55504+ },
55505+ {
55506+ .procname = "socket_all_gid",
55507+ .data = &grsec_socket_all_gid,
55508+ .maxlen = sizeof(int),
55509+ .mode = 0600,
55510+ .proc_handler = &proc_dointvec,
55511+ },
55512+#endif
55513+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55514+ {
55515+ .procname = "socket_client",
55516+ .data = &grsec_enable_socket_client,
55517+ .maxlen = sizeof(int),
55518+ .mode = 0600,
55519+ .proc_handler = &proc_dointvec,
55520+ },
55521+ {
55522+ .procname = "socket_client_gid",
55523+ .data = &grsec_socket_client_gid,
55524+ .maxlen = sizeof(int),
55525+ .mode = 0600,
55526+ .proc_handler = &proc_dointvec,
55527+ },
55528+#endif
55529+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55530+ {
55531+ .procname = "socket_server",
55532+ .data = &grsec_enable_socket_server,
55533+ .maxlen = sizeof(int),
55534+ .mode = 0600,
55535+ .proc_handler = &proc_dointvec,
55536+ },
55537+ {
55538+ .procname = "socket_server_gid",
55539+ .data = &grsec_socket_server_gid,
55540+ .maxlen = sizeof(int),
55541+ .mode = 0600,
55542+ .proc_handler = &proc_dointvec,
55543+ },
55544+#endif
55545+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55546+ {
55547+ .procname = "audit_group",
55548+ .data = &grsec_enable_group,
55549+ .maxlen = sizeof(int),
55550+ .mode = 0600,
55551+ .proc_handler = &proc_dointvec,
55552+ },
55553+ {
55554+ .procname = "audit_gid",
55555+ .data = &grsec_audit_gid,
55556+ .maxlen = sizeof(int),
55557+ .mode = 0600,
55558+ .proc_handler = &proc_dointvec,
55559+ },
55560+#endif
55561+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55562+ {
55563+ .procname = "audit_chdir",
55564+ .data = &grsec_enable_chdir,
55565+ .maxlen = sizeof(int),
55566+ .mode = 0600,
55567+ .proc_handler = &proc_dointvec,
55568+ },
55569+#endif
55570+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55571+ {
55572+ .procname = "audit_mount",
55573+ .data = &grsec_enable_mount,
55574+ .maxlen = sizeof(int),
55575+ .mode = 0600,
55576+ .proc_handler = &proc_dointvec,
55577+ },
55578+#endif
55579+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55580+ {
55581+ .procname = "audit_textrel",
55582+ .data = &grsec_enable_audit_textrel,
55583+ .maxlen = sizeof(int),
55584+ .mode = 0600,
55585+ .proc_handler = &proc_dointvec,
55586+ },
55587+#endif
55588+#ifdef CONFIG_GRKERNSEC_DMESG
55589+ {
55590+ .procname = "dmesg",
55591+ .data = &grsec_enable_dmesg,
55592+ .maxlen = sizeof(int),
55593+ .mode = 0600,
55594+ .proc_handler = &proc_dointvec,
55595+ },
55596+#endif
55597+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55598+ {
55599+ .procname = "chroot_findtask",
55600+ .data = &grsec_enable_chroot_findtask,
55601+ .maxlen = sizeof(int),
55602+ .mode = 0600,
55603+ .proc_handler = &proc_dointvec,
55604+ },
55605+#endif
55606+#ifdef CONFIG_GRKERNSEC_RESLOG
55607+ {
55608+ .procname = "resource_logging",
55609+ .data = &grsec_resource_logging,
55610+ .maxlen = sizeof(int),
55611+ .mode = 0600,
55612+ .proc_handler = &proc_dointvec,
55613+ },
55614+#endif
55615+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55616+ {
55617+ .procname = "audit_ptrace",
55618+ .data = &grsec_enable_audit_ptrace,
55619+ .maxlen = sizeof(int),
55620+ .mode = 0600,
55621+ .proc_handler = &proc_dointvec,
55622+ },
55623+#endif
55624+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55625+ {
55626+ .procname = "harden_ptrace",
55627+ .data = &grsec_enable_harden_ptrace,
55628+ .maxlen = sizeof(int),
55629+ .mode = 0600,
55630+ .proc_handler = &proc_dointvec,
55631+ },
55632+#endif
55633+ {
55634+ .procname = "grsec_lock",
55635+ .data = &grsec_lock,
55636+ .maxlen = sizeof(int),
55637+ .mode = 0600,
55638+ .proc_handler = &proc_dointvec,
55639+ },
55640+#endif
55641+#ifdef CONFIG_GRKERNSEC_ROFS
55642+ {
55643+ .procname = "romount_protect",
55644+ .data = &grsec_enable_rofs,
55645+ .maxlen = sizeof(int),
55646+ .mode = 0600,
55647+ .proc_handler = &proc_dointvec_minmax,
55648+ .extra1 = &one,
55649+ .extra2 = &one,
55650+ },
55651+#endif
55652+ { }
55653+};
55654+#endif
55655diff -urNp linux-3.1.1/grsecurity/grsec_time.c linux-3.1.1/grsecurity/grsec_time.c
55656--- linux-3.1.1/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
55657+++ linux-3.1.1/grsecurity/grsec_time.c 2011-11-16 18:40:31.000000000 -0500
55658@@ -0,0 +1,16 @@
55659+#include <linux/kernel.h>
55660+#include <linux/sched.h>
55661+#include <linux/grinternal.h>
55662+#include <linux/module.h>
55663+
55664+void
55665+gr_log_timechange(void)
55666+{
55667+#ifdef CONFIG_GRKERNSEC_TIME
55668+ if (grsec_enable_time)
55669+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
55670+#endif
55671+ return;
55672+}
55673+
55674+EXPORT_SYMBOL(gr_log_timechange);
55675diff -urNp linux-3.1.1/grsecurity/grsec_tpe.c linux-3.1.1/grsecurity/grsec_tpe.c
55676--- linux-3.1.1/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
55677+++ linux-3.1.1/grsecurity/grsec_tpe.c 2011-11-16 18:40:31.000000000 -0500
55678@@ -0,0 +1,39 @@
55679+#include <linux/kernel.h>
55680+#include <linux/sched.h>
55681+#include <linux/file.h>
55682+#include <linux/fs.h>
55683+#include <linux/grinternal.h>
55684+
55685+extern int gr_acl_tpe_check(void);
55686+
55687+int
55688+gr_tpe_allow(const struct file *file)
55689+{
55690+#ifdef CONFIG_GRKERNSEC
55691+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
55692+ const struct cred *cred = current_cred();
55693+
55694+ if (cred->uid && ((grsec_enable_tpe &&
55695+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55696+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
55697+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
55698+#else
55699+ in_group_p(grsec_tpe_gid)
55700+#endif
55701+ ) || gr_acl_tpe_check()) &&
55702+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
55703+ (inode->i_mode & S_IWOTH))))) {
55704+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55705+ return 0;
55706+ }
55707+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55708+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
55709+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
55710+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
55711+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55712+ return 0;
55713+ }
55714+#endif
55715+#endif
55716+ return 1;
55717+}
55718diff -urNp linux-3.1.1/grsecurity/grsum.c linux-3.1.1/grsecurity/grsum.c
55719--- linux-3.1.1/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
55720+++ linux-3.1.1/grsecurity/grsum.c 2011-11-16 18:40:31.000000000 -0500
55721@@ -0,0 +1,61 @@
55722+#include <linux/err.h>
55723+#include <linux/kernel.h>
55724+#include <linux/sched.h>
55725+#include <linux/mm.h>
55726+#include <linux/scatterlist.h>
55727+#include <linux/crypto.h>
55728+#include <linux/gracl.h>
55729+
55730+
55731+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
55732+#error "crypto and sha256 must be built into the kernel"
55733+#endif
55734+
55735+int
55736+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
55737+{
55738+ char *p;
55739+ struct crypto_hash *tfm;
55740+ struct hash_desc desc;
55741+ struct scatterlist sg;
55742+ unsigned char temp_sum[GR_SHA_LEN];
55743+ volatile int retval = 0;
55744+ volatile int dummy = 0;
55745+ unsigned int i;
55746+
55747+ sg_init_table(&sg, 1);
55748+
55749+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
55750+ if (IS_ERR(tfm)) {
55751+ /* should never happen, since sha256 should be built in */
55752+ return 1;
55753+ }
55754+
55755+ desc.tfm = tfm;
55756+ desc.flags = 0;
55757+
55758+ crypto_hash_init(&desc);
55759+
55760+ p = salt;
55761+ sg_set_buf(&sg, p, GR_SALT_LEN);
55762+ crypto_hash_update(&desc, &sg, sg.length);
55763+
55764+ p = entry->pw;
55765+ sg_set_buf(&sg, p, strlen(p));
55766+
55767+ crypto_hash_update(&desc, &sg, sg.length);
55768+
55769+ crypto_hash_final(&desc, temp_sum);
55770+
55771+ memset(entry->pw, 0, GR_PW_LEN);
55772+
55773+ for (i = 0; i < GR_SHA_LEN; i++)
55774+ if (sum[i] != temp_sum[i])
55775+ retval = 1;
55776+ else
55777+ dummy = 1; // waste a cycle
55778+
55779+ crypto_free_hash(tfm);
55780+
55781+ return retval;
55782+}
55783diff -urNp linux-3.1.1/grsecurity/Kconfig linux-3.1.1/grsecurity/Kconfig
55784--- linux-3.1.1/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
55785+++ linux-3.1.1/grsecurity/Kconfig 2011-11-16 18:40:31.000000000 -0500
55786@@ -0,0 +1,1037 @@
55787+#
55788+# grecurity configuration
55789+#
55790+
55791+menu "Grsecurity"
55792+
55793+config GRKERNSEC
55794+ bool "Grsecurity"
55795+ select CRYPTO
55796+ select CRYPTO_SHA256
55797+ help
55798+ If you say Y here, you will be able to configure many features
55799+ that will enhance the security of your system. It is highly
55800+ recommended that you say Y here and read through the help
55801+ for each option so that you fully understand the features and
55802+ can evaluate their usefulness for your machine.
55803+
55804+choice
55805+ prompt "Security Level"
55806+ depends on GRKERNSEC
55807+ default GRKERNSEC_CUSTOM
55808+
55809+config GRKERNSEC_LOW
55810+ bool "Low"
55811+ select GRKERNSEC_LINK
55812+ select GRKERNSEC_FIFO
55813+ select GRKERNSEC_RANDNET
55814+ select GRKERNSEC_DMESG
55815+ select GRKERNSEC_CHROOT
55816+ select GRKERNSEC_CHROOT_CHDIR
55817+
55818+ help
55819+ If you choose this option, several of the grsecurity options will
55820+ be enabled that will give you greater protection against a number
55821+ of attacks, while assuring that none of your software will have any
55822+ conflicts with the additional security measures. If you run a lot
55823+ of unusual software, or you are having problems with the higher
55824+ security levels, you should say Y here. With this option, the
55825+ following features are enabled:
55826+
55827+ - Linking restrictions
55828+ - FIFO restrictions
55829+ - Restricted dmesg
55830+ - Enforced chdir("/") on chroot
55831+ - Runtime module disabling
55832+
55833+config GRKERNSEC_MEDIUM
55834+ bool "Medium"
55835+ select PAX
55836+ select PAX_EI_PAX
55837+ select PAX_PT_PAX_FLAGS
55838+ select PAX_HAVE_ACL_FLAGS
55839+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55840+ select GRKERNSEC_CHROOT
55841+ select GRKERNSEC_CHROOT_SYSCTL
55842+ select GRKERNSEC_LINK
55843+ select GRKERNSEC_FIFO
55844+ select GRKERNSEC_DMESG
55845+ select GRKERNSEC_RANDNET
55846+ select GRKERNSEC_FORKFAIL
55847+ select GRKERNSEC_TIME
55848+ select GRKERNSEC_SIGNAL
55849+ select GRKERNSEC_CHROOT
55850+ select GRKERNSEC_CHROOT_UNIX
55851+ select GRKERNSEC_CHROOT_MOUNT
55852+ select GRKERNSEC_CHROOT_PIVOT
55853+ select GRKERNSEC_CHROOT_DOUBLE
55854+ select GRKERNSEC_CHROOT_CHDIR
55855+ select GRKERNSEC_CHROOT_MKNOD
55856+ select GRKERNSEC_PROC
55857+ select GRKERNSEC_PROC_USERGROUP
55858+ select PAX_RANDUSTACK
55859+ select PAX_ASLR
55860+ select PAX_RANDMMAP
55861+ select PAX_REFCOUNT if (X86 || SPARC64)
55862+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55863+
55864+ help
55865+ If you say Y here, several features in addition to those included
55866+ in the low additional security level will be enabled. These
55867+ features provide even more security to your system, though in rare
55868+ cases they may be incompatible with very old or poorly written
55869+ software. If you enable this option, make sure that your auth
55870+ service (identd) is running as gid 1001. With this option,
55871+ the following features (in addition to those provided in the
55872+ low additional security level) will be enabled:
55873+
55874+ - Failed fork logging
55875+ - Time change logging
55876+ - Signal logging
55877+ - Deny mounts in chroot
55878+ - Deny double chrooting
55879+ - Deny sysctl writes in chroot
55880+ - Deny mknod in chroot
55881+ - Deny access to abstract AF_UNIX sockets out of chroot
55882+ - Deny pivot_root in chroot
55883+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
55884+ - /proc restrictions with special GID set to 10 (usually wheel)
55885+ - Address Space Layout Randomization (ASLR)
55886+ - Prevent exploitation of most refcount overflows
55887+ - Bounds checking of copying between the kernel and userland
55888+
55889+config GRKERNSEC_HIGH
55890+ bool "High"
55891+ select GRKERNSEC_LINK
55892+ select GRKERNSEC_FIFO
55893+ select GRKERNSEC_DMESG
55894+ select GRKERNSEC_FORKFAIL
55895+ select GRKERNSEC_TIME
55896+ select GRKERNSEC_SIGNAL
55897+ select GRKERNSEC_CHROOT
55898+ select GRKERNSEC_CHROOT_SHMAT
55899+ select GRKERNSEC_CHROOT_UNIX
55900+ select GRKERNSEC_CHROOT_MOUNT
55901+ select GRKERNSEC_CHROOT_FCHDIR
55902+ select GRKERNSEC_CHROOT_PIVOT
55903+ select GRKERNSEC_CHROOT_DOUBLE
55904+ select GRKERNSEC_CHROOT_CHDIR
55905+ select GRKERNSEC_CHROOT_MKNOD
55906+ select GRKERNSEC_CHROOT_CAPS
55907+ select GRKERNSEC_CHROOT_SYSCTL
55908+ select GRKERNSEC_CHROOT_FINDTASK
55909+ select GRKERNSEC_SYSFS_RESTRICT
55910+ select GRKERNSEC_PROC
55911+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55912+ select GRKERNSEC_HIDESYM
55913+ select GRKERNSEC_BRUTE
55914+ select GRKERNSEC_PROC_USERGROUP
55915+ select GRKERNSEC_KMEM
55916+ select GRKERNSEC_RESLOG
55917+ select GRKERNSEC_RANDNET
55918+ select GRKERNSEC_PROC_ADD
55919+ select GRKERNSEC_CHROOT_CHMOD
55920+ select GRKERNSEC_CHROOT_NICE
55921+ select GRKERNSEC_AUDIT_MOUNT
55922+ select GRKERNSEC_MODHARDEN if (MODULES)
55923+ select GRKERNSEC_HARDEN_PTRACE
55924+ select GRKERNSEC_VM86 if (X86_32)
55925+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
55926+ select PAX
55927+ select PAX_RANDUSTACK
55928+ select PAX_ASLR
55929+ select PAX_RANDMMAP
55930+ select PAX_NOEXEC
55931+ select PAX_MPROTECT
55932+ select PAX_EI_PAX
55933+ select PAX_PT_PAX_FLAGS
55934+ select PAX_HAVE_ACL_FLAGS
55935+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
55936+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
55937+ select PAX_RANDKSTACK if (X86_TSC && X86)
55938+ select PAX_SEGMEXEC if (X86_32)
55939+ select PAX_PAGEEXEC
55940+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
55941+ select PAX_EMUTRAMP if (PARISC)
55942+ select PAX_EMUSIGRT if (PARISC)
55943+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
55944+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
55945+ select PAX_REFCOUNT if (X86 || SPARC64)
55946+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
55947+ help
55948+ If you say Y here, many of the features of grsecurity will be
55949+ enabled, which will protect you against many kinds of attacks
55950+ against your system. The heightened security comes at a cost
55951+ of an increased chance of incompatibilities with rare software
55952+ on your machine. Since this security level enables PaX, you should
55953+ view <http://pax.grsecurity.net> and read about the PaX
55954+ project. While you are there, download chpax and run it on
55955+ binaries that cause problems with PaX. Also remember that
55956+ since the /proc restrictions are enabled, you must run your
55957+ identd as gid 1001. This security level enables the following
55958+ features in addition to those listed in the low and medium
55959+ security levels:
55960+
55961+ - Additional /proc restrictions
55962+ - Chmod restrictions in chroot
55963+ - No signals, ptrace, or viewing of processes outside of chroot
55964+ - Capability restrictions in chroot
55965+ - Deny fchdir out of chroot
55966+ - Priority restrictions in chroot
55967+ - Segmentation-based implementation of PaX
55968+ - Mprotect restrictions
55969+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
55970+ - Kernel stack randomization
55971+ - Mount/unmount/remount logging
55972+ - Kernel symbol hiding
55973+ - Hardening of module auto-loading
55974+ - Ptrace restrictions
55975+ - Restricted vm86 mode
55976+ - Restricted sysfs/debugfs
55977+ - Active kernel exploit response
55978+
55979+config GRKERNSEC_CUSTOM
55980+ bool "Custom"
55981+ help
55982+ If you say Y here, you will be able to configure every grsecurity
55983+ option, which allows you to enable many more features that aren't
55984+ covered in the basic security levels. These additional features
55985+ include TPE, socket restrictions, and the sysctl system for
55986+ grsecurity. It is advised that you read through the help for
55987+ each option to determine its usefulness in your situation.
55988+
55989+endchoice
55990+
55991+menu "Address Space Protection"
55992+depends on GRKERNSEC
55993+
55994+config GRKERNSEC_KMEM
55995+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
55996+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55997+ help
55998+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55999+ be written to or read from to modify or leak the contents of the running
56000+ kernel. /dev/port will also not be allowed to be opened. If you have module
56001+ support disabled, enabling this will close up four ways that are
56002+ currently used to insert malicious code into the running kernel.
56003+ Even with all these features enabled, we still highly recommend that
56004+ you use the RBAC system, as it is still possible for an attacker to
56005+ modify the running kernel through privileged I/O granted by ioperm/iopl.
56006+ If you are not using XFree86, you may be able to stop this additional
56007+ case by enabling the 'Disable privileged I/O' option. Though nothing
56008+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
56009+ but only to video memory, which is the only writing we allow in this
56010+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
56011+ not be allowed to mprotect it with PROT_WRITE later.
56012+ It is highly recommended that you say Y here if you meet all the
56013+ conditions above.
56014+
56015+config GRKERNSEC_VM86
56016+ bool "Restrict VM86 mode"
56017+ depends on X86_32
56018+
56019+ help
56020+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
56021+ make use of a special execution mode on 32bit x86 processors called
56022+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
56023+ video cards and will still work with this option enabled. The purpose
56024+ of the option is to prevent exploitation of emulation errors in
56025+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
56026+ Nearly all users should be able to enable this option.
56027+
56028+config GRKERNSEC_IO
56029+ bool "Disable privileged I/O"
56030+ depends on X86
56031+ select RTC_CLASS
56032+ select RTC_INTF_DEV
56033+ select RTC_DRV_CMOS
56034+
56035+ help
56036+ If you say Y here, all ioperm and iopl calls will return an error.
56037+ Ioperm and iopl can be used to modify the running kernel.
56038+ Unfortunately, some programs need this access to operate properly,
56039+ the most notable of which are XFree86 and hwclock. hwclock can be
56040+ remedied by having RTC support in the kernel, so real-time
56041+ clock support is enabled if this option is enabled, to ensure
56042+ that hwclock operates correctly. XFree86 still will not
56043+ operate correctly with this option enabled, so DO NOT CHOOSE Y
56044+ IF YOU USE XFree86. If you use XFree86 and you still want to
56045+ protect your kernel against modification, use the RBAC system.
56046+
56047+config GRKERNSEC_PROC_MEMMAP
56048+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
56049+ default y if (PAX_NOEXEC || PAX_ASLR)
56050+ depends on PAX_NOEXEC || PAX_ASLR
56051+ help
56052+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
56053+ give no information about the addresses of its mappings if
56054+ PaX features that rely on random addresses are enabled on the task.
56055+ If you use PaX it is greatly recommended that you say Y here as it
56056+ closes up a hole that makes the full ASLR useless for suid
56057+ binaries.
56058+
56059+config GRKERNSEC_BRUTE
56060+ bool "Deter exploit bruteforcing"
56061+ help
56062+ If you say Y here, attempts to bruteforce exploits against forking
56063+ daemons such as apache or sshd, as well as against suid/sgid binaries
56064+ will be deterred. When a child of a forking daemon is killed by PaX
56065+ or crashes due to an illegal instruction or other suspicious signal,
56066+ the parent process will be delayed 30 seconds upon every subsequent
56067+ fork until the administrator is able to assess the situation and
56068+ restart the daemon.
56069+ In the suid/sgid case, the attempt is logged, the user has all their
56070+ processes terminated, and they are prevented from executing any further
56071+ processes for 15 minutes.
56072+ It is recommended that you also enable signal logging in the auditing
56073+ section so that logs are generated when a process triggers a suspicious
56074+ signal.
56075+ If the sysctl option is enabled, a sysctl option with name
56076+ "deter_bruteforce" is created.
56077+
56078+
56079+config GRKERNSEC_MODHARDEN
56080+ bool "Harden module auto-loading"
56081+ depends on MODULES
56082+ help
56083+ If you say Y here, module auto-loading in response to use of some
56084+ feature implemented by an unloaded module will be restricted to
56085+ root users. Enabling this option helps defend against attacks
56086+ by unprivileged users who abuse the auto-loading behavior to
56087+ cause a vulnerable module to load that is then exploited.
56088+
56089+ If this option prevents a legitimate use of auto-loading for a
56090+ non-root user, the administrator can execute modprobe manually
56091+ with the exact name of the module mentioned in the alert log.
56092+ Alternatively, the administrator can add the module to the list
56093+ of modules loaded at boot by modifying init scripts.
56094+
56095+ Modification of init scripts will most likely be needed on
56096+ Ubuntu servers with encrypted home directory support enabled,
56097+ as the first non-root user logging in will cause the ecb(aes),
56098+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
56099+
56100+config GRKERNSEC_HIDESYM
56101+ bool "Hide kernel symbols"
56102+ help
56103+ If you say Y here, getting information on loaded modules, and
56104+ displaying all kernel symbols through a syscall will be restricted
56105+ to users with CAP_SYS_MODULE. For software compatibility reasons,
56106+ /proc/kallsyms will be restricted to the root user. The RBAC
56107+ system can hide that entry even from root.
56108+
56109+ This option also prevents leaking of kernel addresses through
56110+ several /proc entries.
56111+
56112+ Note that this option is only effective provided the following
56113+ conditions are met:
56114+ 1) The kernel using grsecurity is not precompiled by some distribution
56115+ 2) You have also enabled GRKERNSEC_DMESG
56116+ 3) You are using the RBAC system and hiding other files such as your
56117+ kernel image and System.map. Alternatively, enabling this option
56118+ causes the permissions on /boot, /lib/modules, and the kernel
56119+ source directory to change at compile time to prevent
56120+ reading by non-root users.
56121+ If the above conditions are met, this option will aid in providing a
56122+ useful protection against local kernel exploitation of overflows
56123+ and arbitrary read/write vulnerabilities.
56124+
56125+config GRKERNSEC_KERN_LOCKOUT
56126+ bool "Active kernel exploit response"
56127+ depends on X86 || ARM || PPC || SPARC
56128+ help
56129+ If you say Y here, when a PaX alert is triggered due to suspicious
56130+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
56131+ or an OOPs occurs due to bad memory accesses, instead of just
56132+ terminating the offending process (and potentially allowing
56133+ a subsequent exploit from the same user), we will take one of two
56134+ actions:
56135+ If the user was root, we will panic the system
56136+ If the user was non-root, we will log the attempt, terminate
56137+ all processes owned by the user, then prevent them from creating
56138+ any new processes until the system is restarted
56139+ This deters repeated kernel exploitation/bruteforcing attempts
56140+ and is useful for later forensics.
56141+
56142+endmenu
56143+menu "Role Based Access Control Options"
56144+depends on GRKERNSEC
56145+
56146+config GRKERNSEC_RBAC_DEBUG
56147+ bool
56148+
56149+config GRKERNSEC_NO_RBAC
56150+ bool "Disable RBAC system"
56151+ help
56152+ If you say Y here, the /dev/grsec device will be removed from the kernel,
56153+ preventing the RBAC system from being enabled. You should only say Y
56154+ here if you have no intention of using the RBAC system, so as to prevent
56155+ an attacker with root access from misusing the RBAC system to hide files
56156+ and processes when loadable module support and /dev/[k]mem have been
56157+ locked down.
56158+
56159+config GRKERNSEC_ACL_HIDEKERN
56160+ bool "Hide kernel processes"
56161+ help
56162+ If you say Y here, all kernel threads will be hidden to all
56163+ processes but those whose subject has the "view hidden processes"
56164+ flag.
56165+
56166+config GRKERNSEC_ACL_MAXTRIES
56167+ int "Maximum tries before password lockout"
56168+ default 3
56169+ help
56170+ This option enforces the maximum number of times a user can attempt
56171+ to authorize themselves with the grsecurity RBAC system before being
56172+ denied the ability to attempt authorization again for a specified time.
56173+ The lower the number, the harder it will be to brute-force a password.
56174+
56175+config GRKERNSEC_ACL_TIMEOUT
56176+ int "Time to wait after max password tries, in seconds"
56177+ default 30
56178+ help
56179+ This option specifies the time the user must wait after attempting to
56180+ authorize to the RBAC system with the maximum number of invalid
56181+ passwords. The higher the number, the harder it will be to brute-force
56182+ a password.
56183+
56184+endmenu
56185+menu "Filesystem Protections"
56186+depends on GRKERNSEC
56187+
56188+config GRKERNSEC_PROC
56189+ bool "Proc restrictions"
56190+ help
56191+ If you say Y here, the permissions of the /proc filesystem
56192+ will be altered to enhance system security and privacy. You MUST
56193+ choose either a user only restriction or a user and group restriction.
56194+ Depending upon the option you choose, you can either restrict users to
56195+ see only the processes they themselves run, or choose a group that can
56196+ view all processes and files normally restricted to root if you choose
56197+ the "restrict to user only" option. NOTE: If you're running identd as
56198+ a non-root user, you will have to run it as the group you specify here.
56199+
56200+config GRKERNSEC_PROC_USER
56201+ bool "Restrict /proc to user only"
56202+ depends on GRKERNSEC_PROC
56203+ help
56204+ If you say Y here, non-root users will only be able to view their own
56205+ processes, and restricts them from viewing network-related information,
56206+ and viewing kernel symbol and module information.
56207+
56208+config GRKERNSEC_PROC_USERGROUP
56209+ bool "Allow special group"
56210+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
56211+ help
56212+ If you say Y here, you will be able to select a group that will be
56213+ able to view all processes and network-related information. If you've
56214+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
56215+ remain hidden. This option is useful if you want to run identd as
56216+ a non-root user.
56217+
56218+config GRKERNSEC_PROC_GID
56219+ int "GID for special group"
56220+ depends on GRKERNSEC_PROC_USERGROUP
56221+ default 1001
56222+
56223+config GRKERNSEC_PROC_ADD
56224+ bool "Additional restrictions"
56225+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
56226+ help
56227+ If you say Y here, additional restrictions will be placed on
56228+ /proc that keep normal users from viewing device information and
56229+ slabinfo information that could be useful for exploits.
56230+
56231+config GRKERNSEC_LINK
56232+ bool "Linking restrictions"
56233+ help
56234+ If you say Y here, /tmp race exploits will be prevented, since users
56235+ will no longer be able to follow symlinks owned by other users in
56236+ world-writable +t directories (e.g. /tmp), unless the owner of the
56237+ symlink is the owner of the directory. users will also not be
56238+ able to hardlink to files they do not own. If the sysctl option is
56239+ enabled, a sysctl option with name "linking_restrictions" is created.
56240+
56241+config GRKERNSEC_FIFO
56242+ bool "FIFO restrictions"
56243+ help
56244+ If you say Y here, users will not be able to write to FIFOs they don't
56245+ own in world-writable +t directories (e.g. /tmp), unless the owner of
56246+ the FIFO is the same owner of the directory it's held in. If the sysctl
56247+ option is enabled, a sysctl option with name "fifo_restrictions" is
56248+ created.
56249+
56250+config GRKERNSEC_SYSFS_RESTRICT
56251+ bool "Sysfs/debugfs restriction"
56252+ depends on SYSFS
56253+ help
56254+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
56255+ any filesystem normally mounted under it (e.g. debugfs) will only
56256+ be accessible by root. These filesystems generally provide access
56257+ to hardware and debug information that isn't appropriate for unprivileged
56258+ users of the system. Sysfs and debugfs have also become a large source
56259+ of new vulnerabilities, ranging from infoleaks to local compromise.
56260+ There has been very little oversight with an eye toward security involved
56261+ in adding new exporters of information to these filesystems, so their
56262+ use is discouraged.
56263+ This option is equivalent to a chmod 0700 of the mount paths.
56264+
56265+config GRKERNSEC_ROFS
56266+ bool "Runtime read-only mount protection"
56267+ help
56268+ If you say Y here, a sysctl option with name "romount_protect" will
56269+ be created. By setting this option to 1 at runtime, filesystems
56270+ will be protected in the following ways:
56271+ * No new writable mounts will be allowed
56272+ * Existing read-only mounts won't be able to be remounted read/write
56273+ * Write operations will be denied on all block devices
56274+ This option acts independently of grsec_lock: once it is set to 1,
56275+ it cannot be turned off. Therefore, please be mindful of the resulting
56276+ behavior if this option is enabled in an init script on a read-only
56277+ filesystem. This feature is mainly intended for secure embedded systems.
56278+
56279+config GRKERNSEC_CHROOT
56280+ bool "Chroot jail restrictions"
56281+ help
56282+ If you say Y here, you will be able to choose several options that will
56283+ make breaking out of a chrooted jail much more difficult. If you
56284+ encounter no software incompatibilities with the following options, it
56285+ is recommended that you enable each one.
56286+
56287+config GRKERNSEC_CHROOT_MOUNT
56288+ bool "Deny mounts"
56289+ depends on GRKERNSEC_CHROOT
56290+ help
56291+ If you say Y here, processes inside a chroot will not be able to
56292+ mount or remount filesystems. If the sysctl option is enabled, a
56293+ sysctl option with name "chroot_deny_mount" is created.
56294+
56295+config GRKERNSEC_CHROOT_DOUBLE
56296+ bool "Deny double-chroots"
56297+ depends on GRKERNSEC_CHROOT
56298+ help
56299+ If you say Y here, processes inside a chroot will not be able to chroot
56300+ again outside the chroot. This is a widely used method of breaking
56301+ out of a chroot jail and should not be allowed. If the sysctl
56302+ option is enabled, a sysctl option with name
56303+ "chroot_deny_chroot" is created.
56304+
56305+config GRKERNSEC_CHROOT_PIVOT
56306+ bool "Deny pivot_root in chroot"
56307+ depends on GRKERNSEC_CHROOT
56308+ help
56309+ If you say Y here, processes inside a chroot will not be able to use
56310+ a function called pivot_root() that was introduced in Linux 2.3.41. It
56311+ works similar to chroot in that it changes the root filesystem. This
56312+ function could be misused in a chrooted process to attempt to break out
56313+ of the chroot, and therefore should not be allowed. If the sysctl
56314+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
56315+ created.
56316+
56317+config GRKERNSEC_CHROOT_CHDIR
56318+ bool "Enforce chdir(\"/\") on all chroots"
56319+ depends on GRKERNSEC_CHROOT
56320+ help
56321+ If you say Y here, the current working directory of all newly-chrooted
56322+ applications will be set to the the root directory of the chroot.
56323+ The man page on chroot(2) states:
56324+ Note that this call does not change the current working
56325+ directory, so that `.' can be outside the tree rooted at
56326+ `/'. In particular, the super-user can escape from a
56327+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
56328+
56329+ It is recommended that you say Y here, since it's not known to break
56330+ any software. If the sysctl option is enabled, a sysctl option with
56331+ name "chroot_enforce_chdir" is created.
56332+
56333+config GRKERNSEC_CHROOT_CHMOD
56334+ bool "Deny (f)chmod +s"
56335+ depends on GRKERNSEC_CHROOT
56336+ help
56337+ If you say Y here, processes inside a chroot will not be able to chmod
56338+ or fchmod files to make them have suid or sgid bits. This protects
56339+ against another published method of breaking a chroot. If the sysctl
56340+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
56341+ created.
56342+
56343+config GRKERNSEC_CHROOT_FCHDIR
56344+ bool "Deny fchdir out of chroot"
56345+ depends on GRKERNSEC_CHROOT
56346+ help
56347+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
56348+ to a file descriptor of the chrooting process that points to a directory
56349+ outside the filesystem will be stopped. If the sysctl option
56350+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
56351+
56352+config GRKERNSEC_CHROOT_MKNOD
56353+ bool "Deny mknod"
56354+ depends on GRKERNSEC_CHROOT
56355+ help
56356+ If you say Y here, processes inside a chroot will not be allowed to
56357+ mknod. The problem with using mknod inside a chroot is that it
56358+ would allow an attacker to create a device entry that is the same
56359+ as one on the physical root of your system, which could range from
56360+ anything from the console device to a device for your harddrive (which
56361+ they could then use to wipe the drive or steal data). It is recommended
56362+ that you say Y here, unless you run into software incompatibilities.
56363+ If the sysctl option is enabled, a sysctl option with name
56364+ "chroot_deny_mknod" is created.
56365+
56366+config GRKERNSEC_CHROOT_SHMAT
56367+ bool "Deny shmat() out of chroot"
56368+ depends on GRKERNSEC_CHROOT
56369+ help
56370+ If you say Y here, processes inside a chroot will not be able to attach
56371+ to shared memory segments that were created outside of the chroot jail.
56372+ It is recommended that you say Y here. If the sysctl option is enabled,
56373+ a sysctl option with name "chroot_deny_shmat" is created.
56374+
56375+config GRKERNSEC_CHROOT_UNIX
56376+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
56377+ depends on GRKERNSEC_CHROOT
56378+ help
56379+ If you say Y here, processes inside a chroot will not be able to
56380+ connect to abstract (meaning not belonging to a filesystem) Unix
56381+ domain sockets that were bound outside of a chroot. It is recommended
56382+ that you say Y here. If the sysctl option is enabled, a sysctl option
56383+ with name "chroot_deny_unix" is created.
56384+
56385+config GRKERNSEC_CHROOT_FINDTASK
56386+ bool "Protect outside processes"
56387+ depends on GRKERNSEC_CHROOT
56388+ help
56389+ If you say Y here, processes inside a chroot will not be able to
56390+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
56391+ getsid, or view any process outside of the chroot. If the sysctl
56392+ option is enabled, a sysctl option with name "chroot_findtask" is
56393+ created.
56394+
56395+config GRKERNSEC_CHROOT_NICE
56396+ bool "Restrict priority changes"
56397+ depends on GRKERNSEC_CHROOT
56398+ help
56399+ If you say Y here, processes inside a chroot will not be able to raise
56400+ the priority of processes in the chroot, or alter the priority of
56401+ processes outside the chroot. This provides more security than simply
56402+ removing CAP_SYS_NICE from the process' capability set. If the
56403+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
56404+ is created.
56405+
56406+config GRKERNSEC_CHROOT_SYSCTL
56407+ bool "Deny sysctl writes"
56408+ depends on GRKERNSEC_CHROOT
56409+ help
56410+ If you say Y here, an attacker in a chroot will not be able to
56411+ write to sysctl entries, either by sysctl(2) or through a /proc
56412+ interface. It is strongly recommended that you say Y here. If the
56413+ sysctl option is enabled, a sysctl option with name
56414+ "chroot_deny_sysctl" is created.
56415+
56416+config GRKERNSEC_CHROOT_CAPS
56417+ bool "Capability restrictions"
56418+ depends on GRKERNSEC_CHROOT
56419+ help
56420+ If you say Y here, the capabilities on all processes within a
56421+ chroot jail will be lowered to stop module insertion, raw i/o,
56422+ system and net admin tasks, rebooting the system, modifying immutable
56423+ files, modifying IPC owned by another, and changing the system time.
56424+ This is left an option because it can break some apps. Disable this
56425+ if your chrooted apps are having problems performing those kinds of
56426+ tasks. If the sysctl option is enabled, a sysctl option with
56427+ name "chroot_caps" is created.
56428+
56429+endmenu
56430+menu "Kernel Auditing"
56431+depends on GRKERNSEC
56432+
56433+config GRKERNSEC_AUDIT_GROUP
56434+ bool "Single group for auditing"
56435+ help
56436+ If you say Y here, the exec, chdir, and (un)mount logging features
56437+ will only operate on a group you specify. This option is recommended
56438+ if you only want to watch certain users instead of having a large
56439+ amount of logs from the entire system. If the sysctl option is enabled,
56440+ a sysctl option with name "audit_group" is created.
56441+
56442+config GRKERNSEC_AUDIT_GID
56443+ int "GID for auditing"
56444+ depends on GRKERNSEC_AUDIT_GROUP
56445+ default 1007
56446+
56447+config GRKERNSEC_EXECLOG
56448+ bool "Exec logging"
56449+ help
56450+ If you say Y here, all execve() calls will be logged (since the
56451+ other exec*() calls are frontends to execve(), all execution
56452+ will be logged). Useful for shell-servers that like to keep track
56453+ of their users. If the sysctl option is enabled, a sysctl option with
56454+ name "exec_logging" is created.
56455+ WARNING: This option when enabled will produce a LOT of logs, especially
56456+ on an active system.
56457+
56458+config GRKERNSEC_RESLOG
56459+ bool "Resource logging"
56460+ help
56461+ If you say Y here, all attempts to overstep resource limits will
56462+ be logged with the resource name, the requested size, and the current
56463+ limit. It is highly recommended that you say Y here. If the sysctl
56464+ option is enabled, a sysctl option with name "resource_logging" is
56465+ created. If the RBAC system is enabled, the sysctl value is ignored.
56466+
56467+config GRKERNSEC_CHROOT_EXECLOG
56468+ bool "Log execs within chroot"
56469+ help
56470+ If you say Y here, all executions inside a chroot jail will be logged
56471+ to syslog. This can cause a large amount of logs if certain
56472+ applications (eg. djb's daemontools) are installed on the system, and
56473+ is therefore left as an option. If the sysctl option is enabled, a
56474+ sysctl option with name "chroot_execlog" is created.
56475+
56476+config GRKERNSEC_AUDIT_PTRACE
56477+ bool "Ptrace logging"
56478+ help
56479+ If you say Y here, all attempts to attach to a process via ptrace
56480+ will be logged. If the sysctl option is enabled, a sysctl option
56481+ with name "audit_ptrace" is created.
56482+
56483+config GRKERNSEC_AUDIT_CHDIR
56484+ bool "Chdir logging"
56485+ help
56486+ If you say Y here, all chdir() calls will be logged. If the sysctl
56487+ option is enabled, a sysctl option with name "audit_chdir" is created.
56488+
56489+config GRKERNSEC_AUDIT_MOUNT
56490+ bool "(Un)Mount logging"
56491+ help
56492+ If you say Y here, all mounts and unmounts will be logged. If the
56493+ sysctl option is enabled, a sysctl option with name "audit_mount" is
56494+ created.
56495+
56496+config GRKERNSEC_SIGNAL
56497+ bool "Signal logging"
56498+ help
56499+ If you say Y here, certain important signals will be logged, such as
56500+ SIGSEGV, which will as a result inform you of when a error in a program
56501+ occurred, which in some cases could mean a possible exploit attempt.
56502+ If the sysctl option is enabled, a sysctl option with name
56503+ "signal_logging" is created.
56504+
56505+config GRKERNSEC_FORKFAIL
56506+ bool "Fork failure logging"
56507+ help
56508+ If you say Y here, all failed fork() attempts will be logged.
56509+ This could suggest a fork bomb, or someone attempting to overstep
56510+ their process limit. If the sysctl option is enabled, a sysctl option
56511+ with name "forkfail_logging" is created.
56512+
56513+config GRKERNSEC_TIME
56514+ bool "Time change logging"
56515+ help
56516+ If you say Y here, any changes of the system clock will be logged.
56517+ If the sysctl option is enabled, a sysctl option with name
56518+ "timechange_logging" is created.
56519+
56520+config GRKERNSEC_PROC_IPADDR
56521+ bool "/proc/<pid>/ipaddr support"
56522+ help
56523+ If you say Y here, a new entry will be added to each /proc/<pid>
56524+ directory that contains the IP address of the person using the task.
56525+ The IP is carried across local TCP and AF_UNIX stream sockets.
56526+ This information can be useful for IDS/IPSes to perform remote response
56527+ to a local attack. The entry is readable by only the owner of the
56528+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56529+ the RBAC system), and thus does not create privacy concerns.
56530+
56531+config GRKERNSEC_RWXMAP_LOG
56532+ bool 'Denied RWX mmap/mprotect logging'
56533+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56534+ help
56535+ If you say Y here, calls to mmap() and mprotect() with explicit
56536+ usage of PROT_WRITE and PROT_EXEC together will be logged when
56537+ denied by the PAX_MPROTECT feature. If the sysctl option is
56538+ enabled, a sysctl option with name "rwxmap_logging" is created.
56539+
56540+config GRKERNSEC_AUDIT_TEXTREL
56541+ bool 'ELF text relocations logging (READ HELP)'
56542+ depends on PAX_MPROTECT
56543+ help
56544+ If you say Y here, text relocations will be logged with the filename
56545+ of the offending library or binary. The purpose of the feature is
56546+ to help Linux distribution developers get rid of libraries and
56547+ binaries that need text relocations which hinder the future progress
56548+ of PaX. Only Linux distribution developers should say Y here, and
56549+ never on a production machine, as this option creates an information
56550+ leak that could aid an attacker in defeating the randomization of
56551+ a single memory region. If the sysctl option is enabled, a sysctl
56552+ option with name "audit_textrel" is created.
56553+
56554+endmenu
56555+
56556+menu "Executable Protections"
56557+depends on GRKERNSEC
56558+
56559+config GRKERNSEC_DMESG
56560+ bool "Dmesg(8) restriction"
56561+ help
56562+ If you say Y here, non-root users will not be able to use dmesg(8)
56563+ to view up to the last 4kb of messages in the kernel's log buffer.
56564+ The kernel's log buffer often contains kernel addresses and other
56565+ identifying information useful to an attacker in fingerprinting a
56566+ system for a targeted exploit.
56567+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
56568+ created.
56569+
56570+config GRKERNSEC_HARDEN_PTRACE
56571+ bool "Deter ptrace-based process snooping"
56572+ help
56573+ If you say Y here, TTY sniffers and other malicious monitoring
56574+ programs implemented through ptrace will be defeated. If you
56575+ have been using the RBAC system, this option has already been
56576+ enabled for several years for all users, with the ability to make
56577+ fine-grained exceptions.
56578+
56579+ This option only affects the ability of non-root users to ptrace
56580+ processes that are not a descendent of the ptracing process.
56581+ This means that strace ./binary and gdb ./binary will still work,
56582+ but attaching to arbitrary processes will not. If the sysctl
56583+ option is enabled, a sysctl option with name "harden_ptrace" is
56584+ created.
56585+
56586+config GRKERNSEC_TPE
56587+ bool "Trusted Path Execution (TPE)"
56588+ help
56589+ If you say Y here, you will be able to choose a gid to add to the
56590+ supplementary groups of users you want to mark as "untrusted."
56591+ These users will not be able to execute any files that are not in
56592+ root-owned directories writable only by root. If the sysctl option
56593+ is enabled, a sysctl option with name "tpe" is created.
56594+
56595+config GRKERNSEC_TPE_ALL
56596+ bool "Partially restrict all non-root users"
56597+ depends on GRKERNSEC_TPE
56598+ help
56599+ If you say Y here, all non-root users will be covered under
56600+ a weaker TPE restriction. This is separate from, and in addition to,
56601+ the main TPE options that you have selected elsewhere. Thus, if a
56602+ "trusted" GID is chosen, this restriction applies to even that GID.
56603+ Under this restriction, all non-root users will only be allowed to
56604+ execute files in directories they own that are not group or
56605+ world-writable, or in directories owned by root and writable only by
56606+ root. If the sysctl option is enabled, a sysctl option with name
56607+ "tpe_restrict_all" is created.
56608+
56609+config GRKERNSEC_TPE_INVERT
56610+ bool "Invert GID option"
56611+ depends on GRKERNSEC_TPE
56612+ help
56613+ If you say Y here, the group you specify in the TPE configuration will
56614+ decide what group TPE restrictions will be *disabled* for. This
56615+ option is useful if you want TPE restrictions to be applied to most
56616+ users on the system. If the sysctl option is enabled, a sysctl option
56617+ with name "tpe_invert" is created. Unlike other sysctl options, this
56618+ entry will default to on for backward-compatibility.
56619+
56620+config GRKERNSEC_TPE_GID
56621+ int "GID for untrusted users"
56622+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56623+ default 1005
56624+ help
56625+ Setting this GID determines what group TPE restrictions will be
56626+ *enabled* for. If the sysctl option is enabled, a sysctl option
56627+ with name "tpe_gid" is created.
56628+
56629+config GRKERNSEC_TPE_GID
56630+ int "GID for trusted users"
56631+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56632+ default 1005
56633+ help
56634+ Setting this GID determines what group TPE restrictions will be
56635+ *disabled* for. If the sysctl option is enabled, a sysctl option
56636+ with name "tpe_gid" is created.
56637+
56638+endmenu
56639+menu "Network Protections"
56640+depends on GRKERNSEC
56641+
56642+config GRKERNSEC_RANDNET
56643+ bool "Larger entropy pools"
56644+ help
56645+ If you say Y here, the entropy pools used for many features of Linux
56646+ and grsecurity will be doubled in size. Since several grsecurity
56647+ features use additional randomness, it is recommended that you say Y
56648+ here. Saying Y here has a similar effect as modifying
56649+ /proc/sys/kernel/random/poolsize.
56650+
56651+config GRKERNSEC_BLACKHOLE
56652+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56653+ depends on NET
56654+ help
56655+ If you say Y here, neither TCP resets nor ICMP
56656+ destination-unreachable packets will be sent in response to packets
56657+ sent to ports for which no associated listening process exists.
56658+ This feature supports both IPV4 and IPV6 and exempts the
56659+ loopback interface from blackholing. Enabling this feature
56660+ makes a host more resilient to DoS attacks and reduces network
56661+ visibility against scanners.
56662+
56663+ The blackhole feature as-implemented is equivalent to the FreeBSD
56664+ blackhole feature, as it prevents RST responses to all packets, not
56665+ just SYNs. Under most application behavior this causes no
56666+ problems, but applications (like haproxy) may not close certain
56667+ connections in a way that cleanly terminates them on the remote
56668+ end, leaving the remote host in LAST_ACK state. Because of this
56669+ side-effect and to prevent intentional LAST_ACK DoSes, this
56670+ feature also adds automatic mitigation against such attacks.
56671+ The mitigation drastically reduces the amount of time a socket
56672+ can spend in LAST_ACK state. If you're using haproxy and not
56673+ all servers it connects to have this option enabled, consider
56674+ disabling this feature on the haproxy host.
56675+
56676+ If the sysctl option is enabled, two sysctl options with names
56677+ "ip_blackhole" and "lastack_retries" will be created.
56678+ While "ip_blackhole" takes the standard zero/non-zero on/off
56679+ toggle, "lastack_retries" uses the same kinds of values as
56680+ "tcp_retries1" and "tcp_retries2". The default value of 4
56681+ prevents a socket from lasting more than 45 seconds in LAST_ACK
56682+ state.
56683+
56684+config GRKERNSEC_SOCKET
56685+ bool "Socket restrictions"
56686+ depends on NET
56687+ help
56688+ If you say Y here, you will be able to choose from several options.
56689+ If you assign a GID on your system and add it to the supplementary
56690+ groups of users you want to restrict socket access to, this patch
56691+ will perform up to three things, based on the option(s) you choose.
56692+
56693+config GRKERNSEC_SOCKET_ALL
56694+ bool "Deny any sockets to group"
56695+ depends on GRKERNSEC_SOCKET
56696+ help
56697+ If you say Y here, you will be able to choose a GID of whose users will
56698+ be unable to connect to other hosts from your machine or run server
56699+ applications from your machine. If the sysctl option is enabled, a
56700+ sysctl option with name "socket_all" is created.
56701+
56702+config GRKERNSEC_SOCKET_ALL_GID
56703+ int "GID to deny all sockets for"
56704+ depends on GRKERNSEC_SOCKET_ALL
56705+ default 1004
56706+ help
56707+ Here you can choose the GID to disable socket access for. Remember to
56708+ add the users you want socket access disabled for to the GID
56709+ specified here. If the sysctl option is enabled, a sysctl option
56710+ with name "socket_all_gid" is created.
56711+
56712+config GRKERNSEC_SOCKET_CLIENT
56713+ bool "Deny client sockets to group"
56714+ depends on GRKERNSEC_SOCKET
56715+ help
56716+ If you say Y here, you will be able to choose a GID of whose users will
56717+ be unable to connect to other hosts from your machine, but will be
56718+ able to run servers. If this option is enabled, all users in the group
56719+ you specify will have to use passive mode when initiating ftp transfers
56720+ from the shell on your machine. If the sysctl option is enabled, a
56721+ sysctl option with name "socket_client" is created.
56722+
56723+config GRKERNSEC_SOCKET_CLIENT_GID
56724+ int "GID to deny client sockets for"
56725+ depends on GRKERNSEC_SOCKET_CLIENT
56726+ default 1003
56727+ help
56728+ Here you can choose the GID to disable client socket access for.
56729+ Remember to add the users you want client socket access disabled for to
56730+ the GID specified here. If the sysctl option is enabled, a sysctl
56731+ option with name "socket_client_gid" is created.
56732+
56733+config GRKERNSEC_SOCKET_SERVER
56734+ bool "Deny server sockets to group"
56735+ depends on GRKERNSEC_SOCKET
56736+ help
56737+ If you say Y here, you will be able to choose a GID of whose users will
56738+ be unable to run server applications from your machine. If the sysctl
56739+ option is enabled, a sysctl option with name "socket_server" is created.
56740+
56741+config GRKERNSEC_SOCKET_SERVER_GID
56742+ int "GID to deny server sockets for"
56743+ depends on GRKERNSEC_SOCKET_SERVER
56744+ default 1002
56745+ help
56746+ Here you can choose the GID to disable server socket access for.
56747+ Remember to add the users you want server socket access disabled for to
56748+ the GID specified here. If the sysctl option is enabled, a sysctl
56749+ option with name "socket_server_gid" is created.
56750+
56751+endmenu
56752+menu "Sysctl support"
56753+depends on GRKERNSEC && SYSCTL
56754+
56755+config GRKERNSEC_SYSCTL
56756+ bool "Sysctl support"
56757+ help
56758+ If you say Y here, you will be able to change the options that
56759+ grsecurity runs with at bootup, without having to recompile your
56760+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56761+ to enable (1) or disable (0) various features. All the sysctl entries
56762+ are mutable until the "grsec_lock" entry is set to a non-zero value.
56763+ All features enabled in the kernel configuration are disabled at boot
56764+ if you do not say Y to the "Turn on features by default" option.
56765+ All options should be set at startup, and the grsec_lock entry should
56766+ be set to a non-zero value after all the options are set.
56767+ *THIS IS EXTREMELY IMPORTANT*
56768+
56769+config GRKERNSEC_SYSCTL_DISTRO
56770+ bool "Extra sysctl support for distro makers (READ HELP)"
56771+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56772+ help
56773+ If you say Y here, additional sysctl options will be created
56774+ for features that affect processes running as root. Therefore,
56775+ it is critical when using this option that the grsec_lock entry be
56776+ enabled after boot. Only distros with prebuilt kernel packages
56777+ with this option enabled that can ensure grsec_lock is enabled
56778+ after boot should use this option.
56779+ *Failure to set grsec_lock after boot makes all grsec features
56780+ this option covers useless*
56781+
56782+ Currently this option creates the following sysctl entries:
56783+ "Disable Privileged I/O": "disable_priv_io"
56784+
56785+config GRKERNSEC_SYSCTL_ON
56786+ bool "Turn on features by default"
56787+ depends on GRKERNSEC_SYSCTL
56788+ help
56789+ If you say Y here, instead of having all features enabled in the
56790+ kernel configuration disabled at boot time, the features will be
56791+ enabled at boot time. It is recommended you say Y here unless
56792+ there is some reason you would want all sysctl-tunable features to
56793+ be disabled by default. As mentioned elsewhere, it is important
56794+ to enable the grsec_lock entry once you have finished modifying
56795+ the sysctl entries.
56796+
56797+endmenu
56798+menu "Logging Options"
56799+depends on GRKERNSEC
56800+
56801+config GRKERNSEC_FLOODTIME
56802+ int "Seconds in between log messages (minimum)"
56803+ default 10
56804+ help
56805+ This option allows you to enforce the number of seconds between
56806+ grsecurity log messages. The default should be suitable for most
56807+ people, however, if you choose to change it, choose a value small enough
56808+ to allow informative logs to be produced, but large enough to
56809+ prevent flooding.
56810+
56811+config GRKERNSEC_FLOODBURST
56812+ int "Number of messages in a burst (maximum)"
56813+ default 6
56814+ help
56815+ This option allows you to choose the maximum number of messages allowed
56816+ within the flood time interval you chose in a separate option. The
56817+ default should be suitable for most people, however if you find that
56818+ many of your logs are being interpreted as flooding, you may want to
56819+ raise this value.
56820+
56821+endmenu
56822+
56823+endmenu
56824diff -urNp linux-3.1.1/grsecurity/Makefile linux-3.1.1/grsecurity/Makefile
56825--- linux-3.1.1/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
56826+++ linux-3.1.1/grsecurity/Makefile 2011-11-16 18:40:31.000000000 -0500
56827@@ -0,0 +1,36 @@
56828+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56829+# during 2001-2009 it has been completely redesigned by Brad Spengler
56830+# into an RBAC system
56831+#
56832+# All code in this directory and various hooks inserted throughout the kernel
56833+# are copyright Brad Spengler - Open Source Security, Inc., and released
56834+# under the GPL v2 or higher
56835+
56836+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56837+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
56838+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56839+
56840+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56841+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56842+ gracl_learn.o grsec_log.o
56843+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56844+
56845+ifdef CONFIG_NET
56846+obj-y += grsec_sock.o
56847+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
56848+endif
56849+
56850+ifndef CONFIG_GRKERNSEC
56851+obj-y += grsec_disabled.o
56852+endif
56853+
56854+ifdef CONFIG_GRKERNSEC_HIDESYM
56855+extra-y := grsec_hidesym.o
56856+$(obj)/grsec_hidesym.o:
56857+ @-chmod -f 500 /boot
56858+ @-chmod -f 500 /lib/modules
56859+ @-chmod -f 500 /lib64/modules
56860+ @-chmod -f 500 /lib32/modules
56861+ @-chmod -f 700 .
56862+ @echo ' grsec: protected kernel image paths'
56863+endif
56864diff -urNp linux-3.1.1/include/acpi/acpi_bus.h linux-3.1.1/include/acpi/acpi_bus.h
56865--- linux-3.1.1/include/acpi/acpi_bus.h 2011-11-11 15:19:27.000000000 -0500
56866+++ linux-3.1.1/include/acpi/acpi_bus.h 2011-11-16 18:39:08.000000000 -0500
56867@@ -107,7 +107,7 @@ struct acpi_device_ops {
56868 acpi_op_bind bind;
56869 acpi_op_unbind unbind;
56870 acpi_op_notify notify;
56871-};
56872+} __no_const;
56873
56874 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
56875
56876diff -urNp linux-3.1.1/include/asm-generic/atomic-long.h linux-3.1.1/include/asm-generic/atomic-long.h
56877--- linux-3.1.1/include/asm-generic/atomic-long.h 2011-11-11 15:19:27.000000000 -0500
56878+++ linux-3.1.1/include/asm-generic/atomic-long.h 2011-11-16 18:39:08.000000000 -0500
56879@@ -22,6 +22,12 @@
56880
56881 typedef atomic64_t atomic_long_t;
56882
56883+#ifdef CONFIG_PAX_REFCOUNT
56884+typedef atomic64_unchecked_t atomic_long_unchecked_t;
56885+#else
56886+typedef atomic64_t atomic_long_unchecked_t;
56887+#endif
56888+
56889 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
56890
56891 static inline long atomic_long_read(atomic_long_t *l)
56892@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
56893 return (long)atomic64_read(v);
56894 }
56895
56896+#ifdef CONFIG_PAX_REFCOUNT
56897+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56898+{
56899+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56900+
56901+ return (long)atomic64_read_unchecked(v);
56902+}
56903+#endif
56904+
56905 static inline void atomic_long_set(atomic_long_t *l, long i)
56906 {
56907 atomic64_t *v = (atomic64_t *)l;
56908@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
56909 atomic64_set(v, i);
56910 }
56911
56912+#ifdef CONFIG_PAX_REFCOUNT
56913+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56914+{
56915+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56916+
56917+ atomic64_set_unchecked(v, i);
56918+}
56919+#endif
56920+
56921 static inline void atomic_long_inc(atomic_long_t *l)
56922 {
56923 atomic64_t *v = (atomic64_t *)l;
56924@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
56925 atomic64_inc(v);
56926 }
56927
56928+#ifdef CONFIG_PAX_REFCOUNT
56929+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56930+{
56931+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56932+
56933+ atomic64_inc_unchecked(v);
56934+}
56935+#endif
56936+
56937 static inline void atomic_long_dec(atomic_long_t *l)
56938 {
56939 atomic64_t *v = (atomic64_t *)l;
56940@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
56941 atomic64_dec(v);
56942 }
56943
56944+#ifdef CONFIG_PAX_REFCOUNT
56945+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56946+{
56947+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56948+
56949+ atomic64_dec_unchecked(v);
56950+}
56951+#endif
56952+
56953 static inline void atomic_long_add(long i, atomic_long_t *l)
56954 {
56955 atomic64_t *v = (atomic64_t *)l;
56956@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
56957 atomic64_add(i, v);
56958 }
56959
56960+#ifdef CONFIG_PAX_REFCOUNT
56961+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56962+{
56963+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56964+
56965+ atomic64_add_unchecked(i, v);
56966+}
56967+#endif
56968+
56969 static inline void atomic_long_sub(long i, atomic_long_t *l)
56970 {
56971 atomic64_t *v = (atomic64_t *)l;
56972@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
56973 atomic64_sub(i, v);
56974 }
56975
56976+#ifdef CONFIG_PAX_REFCOUNT
56977+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
56978+{
56979+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56980+
56981+ atomic64_sub_unchecked(i, v);
56982+}
56983+#endif
56984+
56985 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
56986 {
56987 atomic64_t *v = (atomic64_t *)l;
56988@@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
56989 return (long)atomic64_inc_return(v);
56990 }
56991
56992+#ifdef CONFIG_PAX_REFCOUNT
56993+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56994+{
56995+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56996+
56997+ return (long)atomic64_inc_return_unchecked(v);
56998+}
56999+#endif
57000+
57001 static inline long atomic_long_dec_return(atomic_long_t *l)
57002 {
57003 atomic64_t *v = (atomic64_t *)l;
57004@@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
57005
57006 typedef atomic_t atomic_long_t;
57007
57008+#ifdef CONFIG_PAX_REFCOUNT
57009+typedef atomic_unchecked_t atomic_long_unchecked_t;
57010+#else
57011+typedef atomic_t atomic_long_unchecked_t;
57012+#endif
57013+
57014 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
57015 static inline long atomic_long_read(atomic_long_t *l)
57016 {
57017@@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
57018 return (long)atomic_read(v);
57019 }
57020
57021+#ifdef CONFIG_PAX_REFCOUNT
57022+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57023+{
57024+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57025+
57026+ return (long)atomic_read_unchecked(v);
57027+}
57028+#endif
57029+
57030 static inline void atomic_long_set(atomic_long_t *l, long i)
57031 {
57032 atomic_t *v = (atomic_t *)l;
57033@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
57034 atomic_set(v, i);
57035 }
57036
57037+#ifdef CONFIG_PAX_REFCOUNT
57038+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57039+{
57040+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57041+
57042+ atomic_set_unchecked(v, i);
57043+}
57044+#endif
57045+
57046 static inline void atomic_long_inc(atomic_long_t *l)
57047 {
57048 atomic_t *v = (atomic_t *)l;
57049@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
57050 atomic_inc(v);
57051 }
57052
57053+#ifdef CONFIG_PAX_REFCOUNT
57054+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57055+{
57056+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57057+
57058+ atomic_inc_unchecked(v);
57059+}
57060+#endif
57061+
57062 static inline void atomic_long_dec(atomic_long_t *l)
57063 {
57064 atomic_t *v = (atomic_t *)l;
57065@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
57066 atomic_dec(v);
57067 }
57068
57069+#ifdef CONFIG_PAX_REFCOUNT
57070+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57071+{
57072+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57073+
57074+ atomic_dec_unchecked(v);
57075+}
57076+#endif
57077+
57078 static inline void atomic_long_add(long i, atomic_long_t *l)
57079 {
57080 atomic_t *v = (atomic_t *)l;
57081@@ -176,6 +287,15 @@ static inline void atomic_long_add(long
57082 atomic_add(i, v);
57083 }
57084
57085+#ifdef CONFIG_PAX_REFCOUNT
57086+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57087+{
57088+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57089+
57090+ atomic_add_unchecked(i, v);
57091+}
57092+#endif
57093+
57094 static inline void atomic_long_sub(long i, atomic_long_t *l)
57095 {
57096 atomic_t *v = (atomic_t *)l;
57097@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
57098 atomic_sub(i, v);
57099 }
57100
57101+#ifdef CONFIG_PAX_REFCOUNT
57102+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57103+{
57104+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57105+
57106+ atomic_sub_unchecked(i, v);
57107+}
57108+#endif
57109+
57110 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57111 {
57112 atomic_t *v = (atomic_t *)l;
57113@@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
57114 return (long)atomic_inc_return(v);
57115 }
57116
57117+#ifdef CONFIG_PAX_REFCOUNT
57118+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57119+{
57120+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57121+
57122+ return (long)atomic_inc_return_unchecked(v);
57123+}
57124+#endif
57125+
57126 static inline long atomic_long_dec_return(atomic_long_t *l)
57127 {
57128 atomic_t *v = (atomic_t *)l;
57129@@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
57130
57131 #endif /* BITS_PER_LONG == 64 */
57132
57133+#ifdef CONFIG_PAX_REFCOUNT
57134+static inline void pax_refcount_needs_these_functions(void)
57135+{
57136+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
57137+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
57138+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
57139+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
57140+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
57141+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
57142+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
57143+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
57144+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
57145+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
57146+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
57147+
57148+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
57149+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
57150+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
57151+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
57152+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
57153+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
57154+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
57155+}
57156+#else
57157+#define atomic_read_unchecked(v) atomic_read(v)
57158+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
57159+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
57160+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
57161+#define atomic_inc_unchecked(v) atomic_inc(v)
57162+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
57163+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
57164+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
57165+#define atomic_dec_unchecked(v) atomic_dec(v)
57166+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
57167+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
57168+
57169+#define atomic_long_read_unchecked(v) atomic_long_read(v)
57170+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
57171+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
57172+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
57173+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
57174+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
57175+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
57176+#endif
57177+
57178 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
57179diff -urNp linux-3.1.1/include/asm-generic/cache.h linux-3.1.1/include/asm-generic/cache.h
57180--- linux-3.1.1/include/asm-generic/cache.h 2011-11-11 15:19:27.000000000 -0500
57181+++ linux-3.1.1/include/asm-generic/cache.h 2011-11-16 18:39:08.000000000 -0500
57182@@ -6,7 +6,7 @@
57183 * cache lines need to provide their own cache.h.
57184 */
57185
57186-#define L1_CACHE_SHIFT 5
57187-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
57188+#define L1_CACHE_SHIFT 5UL
57189+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
57190
57191 #endif /* __ASM_GENERIC_CACHE_H */
57192diff -urNp linux-3.1.1/include/asm-generic/int-l64.h linux-3.1.1/include/asm-generic/int-l64.h
57193--- linux-3.1.1/include/asm-generic/int-l64.h 2011-11-11 15:19:27.000000000 -0500
57194+++ linux-3.1.1/include/asm-generic/int-l64.h 2011-11-16 18:39:08.000000000 -0500
57195@@ -46,6 +46,8 @@ typedef unsigned int u32;
57196 typedef signed long s64;
57197 typedef unsigned long u64;
57198
57199+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57200+
57201 #define S8_C(x) x
57202 #define U8_C(x) x ## U
57203 #define S16_C(x) x
57204diff -urNp linux-3.1.1/include/asm-generic/int-ll64.h linux-3.1.1/include/asm-generic/int-ll64.h
57205--- linux-3.1.1/include/asm-generic/int-ll64.h 2011-11-11 15:19:27.000000000 -0500
57206+++ linux-3.1.1/include/asm-generic/int-ll64.h 2011-11-16 18:39:08.000000000 -0500
57207@@ -51,6 +51,8 @@ typedef unsigned int u32;
57208 typedef signed long long s64;
57209 typedef unsigned long long u64;
57210
57211+typedef unsigned long long intoverflow_t;
57212+
57213 #define S8_C(x) x
57214 #define U8_C(x) x ## U
57215 #define S16_C(x) x
57216diff -urNp linux-3.1.1/include/asm-generic/kmap_types.h linux-3.1.1/include/asm-generic/kmap_types.h
57217--- linux-3.1.1/include/asm-generic/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
57218+++ linux-3.1.1/include/asm-generic/kmap_types.h 2011-11-16 18:39:08.000000000 -0500
57219@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
57220 KMAP_D(17) KM_NMI,
57221 KMAP_D(18) KM_NMI_PTE,
57222 KMAP_D(19) KM_KDB,
57223+KMAP_D(20) KM_CLEARPAGE,
57224 /*
57225 * Remember to update debug_kmap_atomic() when adding new kmap types!
57226 */
57227-KMAP_D(20) KM_TYPE_NR
57228+KMAP_D(21) KM_TYPE_NR
57229 };
57230
57231 #undef KMAP_D
57232diff -urNp linux-3.1.1/include/asm-generic/pgtable.h linux-3.1.1/include/asm-generic/pgtable.h
57233--- linux-3.1.1/include/asm-generic/pgtable.h 2011-11-11 15:19:27.000000000 -0500
57234+++ linux-3.1.1/include/asm-generic/pgtable.h 2011-11-16 18:39:08.000000000 -0500
57235@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
57236 #endif /* __HAVE_ARCH_PMD_WRITE */
57237 #endif
57238
57239+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57240+static inline unsigned long pax_open_kernel(void) { return 0; }
57241+#endif
57242+
57243+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57244+static inline unsigned long pax_close_kernel(void) { return 0; }
57245+#endif
57246+
57247 #endif /* !__ASSEMBLY__ */
57248
57249 #endif /* _ASM_GENERIC_PGTABLE_H */
57250diff -urNp linux-3.1.1/include/asm-generic/pgtable-nopmd.h linux-3.1.1/include/asm-generic/pgtable-nopmd.h
57251--- linux-3.1.1/include/asm-generic/pgtable-nopmd.h 2011-11-11 15:19:27.000000000 -0500
57252+++ linux-3.1.1/include/asm-generic/pgtable-nopmd.h 2011-11-16 18:39:08.000000000 -0500
57253@@ -1,14 +1,19 @@
57254 #ifndef _PGTABLE_NOPMD_H
57255 #define _PGTABLE_NOPMD_H
57256
57257-#ifndef __ASSEMBLY__
57258-
57259 #include <asm-generic/pgtable-nopud.h>
57260
57261-struct mm_struct;
57262-
57263 #define __PAGETABLE_PMD_FOLDED
57264
57265+#define PMD_SHIFT PUD_SHIFT
57266+#define PTRS_PER_PMD 1
57267+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57268+#define PMD_MASK (~(PMD_SIZE-1))
57269+
57270+#ifndef __ASSEMBLY__
57271+
57272+struct mm_struct;
57273+
57274 /*
57275 * Having the pmd type consist of a pud gets the size right, and allows
57276 * us to conceptually access the pud entry that this pmd is folded into
57277@@ -16,11 +21,6 @@ struct mm_struct;
57278 */
57279 typedef struct { pud_t pud; } pmd_t;
57280
57281-#define PMD_SHIFT PUD_SHIFT
57282-#define PTRS_PER_PMD 1
57283-#define PMD_SIZE (1UL << PMD_SHIFT)
57284-#define PMD_MASK (~(PMD_SIZE-1))
57285-
57286 /*
57287 * The "pud_xxx()" functions here are trivial for a folded two-level
57288 * setup: the pmd is never bad, and a pmd always exists (as it's folded
57289diff -urNp linux-3.1.1/include/asm-generic/pgtable-nopud.h linux-3.1.1/include/asm-generic/pgtable-nopud.h
57290--- linux-3.1.1/include/asm-generic/pgtable-nopud.h 2011-11-11 15:19:27.000000000 -0500
57291+++ linux-3.1.1/include/asm-generic/pgtable-nopud.h 2011-11-16 18:39:08.000000000 -0500
57292@@ -1,10 +1,15 @@
57293 #ifndef _PGTABLE_NOPUD_H
57294 #define _PGTABLE_NOPUD_H
57295
57296-#ifndef __ASSEMBLY__
57297-
57298 #define __PAGETABLE_PUD_FOLDED
57299
57300+#define PUD_SHIFT PGDIR_SHIFT
57301+#define PTRS_PER_PUD 1
57302+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57303+#define PUD_MASK (~(PUD_SIZE-1))
57304+
57305+#ifndef __ASSEMBLY__
57306+
57307 /*
57308 * Having the pud type consist of a pgd gets the size right, and allows
57309 * us to conceptually access the pgd entry that this pud is folded into
57310@@ -12,11 +17,6 @@
57311 */
57312 typedef struct { pgd_t pgd; } pud_t;
57313
57314-#define PUD_SHIFT PGDIR_SHIFT
57315-#define PTRS_PER_PUD 1
57316-#define PUD_SIZE (1UL << PUD_SHIFT)
57317-#define PUD_MASK (~(PUD_SIZE-1))
57318-
57319 /*
57320 * The "pgd_xxx()" functions here are trivial for a folded two-level
57321 * setup: the pud is never bad, and a pud always exists (as it's folded
57322diff -urNp linux-3.1.1/include/asm-generic/vmlinux.lds.h linux-3.1.1/include/asm-generic/vmlinux.lds.h
57323--- linux-3.1.1/include/asm-generic/vmlinux.lds.h 2011-11-11 15:19:27.000000000 -0500
57324+++ linux-3.1.1/include/asm-generic/vmlinux.lds.h 2011-11-16 18:39:08.000000000 -0500
57325@@ -217,6 +217,7 @@
57326 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57327 VMLINUX_SYMBOL(__start_rodata) = .; \
57328 *(.rodata) *(.rodata.*) \
57329+ *(.data..read_only) \
57330 *(__vermagic) /* Kernel version magic */ \
57331 . = ALIGN(8); \
57332 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
57333@@ -723,17 +724,18 @@
57334 * section in the linker script will go there too. @phdr should have
57335 * a leading colon.
57336 *
57337- * Note that this macros defines __per_cpu_load as an absolute symbol.
57338+ * Note that this macros defines per_cpu_load as an absolute symbol.
57339 * If there is no need to put the percpu section at a predetermined
57340 * address, use PERCPU_SECTION.
57341 */
57342 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
57343- VMLINUX_SYMBOL(__per_cpu_load) = .; \
57344- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57345+ per_cpu_load = .; \
57346+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57347 - LOAD_OFFSET) { \
57348+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57349 PERCPU_INPUT(cacheline) \
57350 } phdr \
57351- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
57352+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
57353
57354 /**
57355 * PERCPU_SECTION - define output section for percpu area, simple version
57356diff -urNp linux-3.1.1/include/drm/drm_crtc_helper.h linux-3.1.1/include/drm/drm_crtc_helper.h
57357--- linux-3.1.1/include/drm/drm_crtc_helper.h 2011-11-11 15:19:27.000000000 -0500
57358+++ linux-3.1.1/include/drm/drm_crtc_helper.h 2011-11-16 18:39:08.000000000 -0500
57359@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
57360
57361 /* disable crtc when not in use - more explicit than dpms off */
57362 void (*disable)(struct drm_crtc *crtc);
57363-};
57364+} __no_const;
57365
57366 struct drm_encoder_helper_funcs {
57367 void (*dpms)(struct drm_encoder *encoder, int mode);
57368@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
57369 struct drm_connector *connector);
57370 /* disable encoder when not in use - more explicit than dpms off */
57371 void (*disable)(struct drm_encoder *encoder);
57372-};
57373+} __no_const;
57374
57375 struct drm_connector_helper_funcs {
57376 int (*get_modes)(struct drm_connector *connector);
57377diff -urNp linux-3.1.1/include/drm/drmP.h linux-3.1.1/include/drm/drmP.h
57378--- linux-3.1.1/include/drm/drmP.h 2011-11-11 15:19:27.000000000 -0500
57379+++ linux-3.1.1/include/drm/drmP.h 2011-11-16 18:39:08.000000000 -0500
57380@@ -73,6 +73,7 @@
57381 #include <linux/workqueue.h>
57382 #include <linux/poll.h>
57383 #include <asm/pgalloc.h>
57384+#include <asm/local.h>
57385 #include "drm.h"
57386
57387 #include <linux/idr.h>
57388@@ -1035,7 +1036,7 @@ struct drm_device {
57389
57390 /** \name Usage Counters */
57391 /*@{ */
57392- int open_count; /**< Outstanding files open */
57393+ local_t open_count; /**< Outstanding files open */
57394 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57395 atomic_t vma_count; /**< Outstanding vma areas open */
57396 int buf_use; /**< Buffers in use -- cannot alloc */
57397@@ -1046,7 +1047,7 @@ struct drm_device {
57398 /*@{ */
57399 unsigned long counters;
57400 enum drm_stat_type types[15];
57401- atomic_t counts[15];
57402+ atomic_unchecked_t counts[15];
57403 /*@} */
57404
57405 struct list_head filelist;
57406diff -urNp linux-3.1.1/include/drm/ttm/ttm_memory.h linux-3.1.1/include/drm/ttm/ttm_memory.h
57407--- linux-3.1.1/include/drm/ttm/ttm_memory.h 2011-11-11 15:19:27.000000000 -0500
57408+++ linux-3.1.1/include/drm/ttm/ttm_memory.h 2011-11-16 18:39:08.000000000 -0500
57409@@ -47,7 +47,7 @@
57410
57411 struct ttm_mem_shrink {
57412 int (*do_shrink) (struct ttm_mem_shrink *);
57413-};
57414+} __no_const;
57415
57416 /**
57417 * struct ttm_mem_global - Global memory accounting structure.
57418diff -urNp linux-3.1.1/include/linux/a.out.h linux-3.1.1/include/linux/a.out.h
57419--- linux-3.1.1/include/linux/a.out.h 2011-11-11 15:19:27.000000000 -0500
57420+++ linux-3.1.1/include/linux/a.out.h 2011-11-16 18:39:08.000000000 -0500
57421@@ -39,6 +39,14 @@ enum machine_type {
57422 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57423 };
57424
57425+/* Constants for the N_FLAGS field */
57426+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57427+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57428+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57429+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57430+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57431+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57432+
57433 #if !defined (N_MAGIC)
57434 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57435 #endif
57436diff -urNp linux-3.1.1/include/linux/atmdev.h linux-3.1.1/include/linux/atmdev.h
57437--- linux-3.1.1/include/linux/atmdev.h 2011-11-11 15:19:27.000000000 -0500
57438+++ linux-3.1.1/include/linux/atmdev.h 2011-11-16 18:39:08.000000000 -0500
57439@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57440 #endif
57441
57442 struct k_atm_aal_stats {
57443-#define __HANDLE_ITEM(i) atomic_t i
57444+#define __HANDLE_ITEM(i) atomic_unchecked_t i
57445 __AAL_STAT_ITEMS
57446 #undef __HANDLE_ITEM
57447 };
57448diff -urNp linux-3.1.1/include/linux/binfmts.h linux-3.1.1/include/linux/binfmts.h
57449--- linux-3.1.1/include/linux/binfmts.h 2011-11-11 15:19:27.000000000 -0500
57450+++ linux-3.1.1/include/linux/binfmts.h 2011-11-16 18:39:08.000000000 -0500
57451@@ -88,6 +88,7 @@ struct linux_binfmt {
57452 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57453 int (*load_shlib)(struct file *);
57454 int (*core_dump)(struct coredump_params *cprm);
57455+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57456 unsigned long min_coredump; /* minimal dump size */
57457 };
57458
57459diff -urNp linux-3.1.1/include/linux/blkdev.h linux-3.1.1/include/linux/blkdev.h
57460--- linux-3.1.1/include/linux/blkdev.h 2011-11-11 15:19:27.000000000 -0500
57461+++ linux-3.1.1/include/linux/blkdev.h 2011-11-16 18:39:08.000000000 -0500
57462@@ -1321,7 +1321,7 @@ struct block_device_operations {
57463 /* this callback is with swap_lock and sometimes page table lock held */
57464 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
57465 struct module *owner;
57466-};
57467+} __do_const;
57468
57469 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57470 unsigned long);
57471diff -urNp linux-3.1.1/include/linux/blktrace_api.h linux-3.1.1/include/linux/blktrace_api.h
57472--- linux-3.1.1/include/linux/blktrace_api.h 2011-11-11 15:19:27.000000000 -0500
57473+++ linux-3.1.1/include/linux/blktrace_api.h 2011-11-16 18:39:08.000000000 -0500
57474@@ -162,7 +162,7 @@ struct blk_trace {
57475 struct dentry *dir;
57476 struct dentry *dropped_file;
57477 struct dentry *msg_file;
57478- atomic_t dropped;
57479+ atomic_unchecked_t dropped;
57480 };
57481
57482 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57483diff -urNp linux-3.1.1/include/linux/byteorder/little_endian.h linux-3.1.1/include/linux/byteorder/little_endian.h
57484--- linux-3.1.1/include/linux/byteorder/little_endian.h 2011-11-11 15:19:27.000000000 -0500
57485+++ linux-3.1.1/include/linux/byteorder/little_endian.h 2011-11-16 18:39:08.000000000 -0500
57486@@ -42,51 +42,51 @@
57487
57488 static inline __le64 __cpu_to_le64p(const __u64 *p)
57489 {
57490- return (__force __le64)*p;
57491+ return (__force const __le64)*p;
57492 }
57493 static inline __u64 __le64_to_cpup(const __le64 *p)
57494 {
57495- return (__force __u64)*p;
57496+ return (__force const __u64)*p;
57497 }
57498 static inline __le32 __cpu_to_le32p(const __u32 *p)
57499 {
57500- return (__force __le32)*p;
57501+ return (__force const __le32)*p;
57502 }
57503 static inline __u32 __le32_to_cpup(const __le32 *p)
57504 {
57505- return (__force __u32)*p;
57506+ return (__force const __u32)*p;
57507 }
57508 static inline __le16 __cpu_to_le16p(const __u16 *p)
57509 {
57510- return (__force __le16)*p;
57511+ return (__force const __le16)*p;
57512 }
57513 static inline __u16 __le16_to_cpup(const __le16 *p)
57514 {
57515- return (__force __u16)*p;
57516+ return (__force const __u16)*p;
57517 }
57518 static inline __be64 __cpu_to_be64p(const __u64 *p)
57519 {
57520- return (__force __be64)__swab64p(p);
57521+ return (__force const __be64)__swab64p(p);
57522 }
57523 static inline __u64 __be64_to_cpup(const __be64 *p)
57524 {
57525- return __swab64p((__u64 *)p);
57526+ return __swab64p((const __u64 *)p);
57527 }
57528 static inline __be32 __cpu_to_be32p(const __u32 *p)
57529 {
57530- return (__force __be32)__swab32p(p);
57531+ return (__force const __be32)__swab32p(p);
57532 }
57533 static inline __u32 __be32_to_cpup(const __be32 *p)
57534 {
57535- return __swab32p((__u32 *)p);
57536+ return __swab32p((const __u32 *)p);
57537 }
57538 static inline __be16 __cpu_to_be16p(const __u16 *p)
57539 {
57540- return (__force __be16)__swab16p(p);
57541+ return (__force const __be16)__swab16p(p);
57542 }
57543 static inline __u16 __be16_to_cpup(const __be16 *p)
57544 {
57545- return __swab16p((__u16 *)p);
57546+ return __swab16p((const __u16 *)p);
57547 }
57548 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57549 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57550diff -urNp linux-3.1.1/include/linux/cache.h linux-3.1.1/include/linux/cache.h
57551--- linux-3.1.1/include/linux/cache.h 2011-11-11 15:19:27.000000000 -0500
57552+++ linux-3.1.1/include/linux/cache.h 2011-11-16 18:39:08.000000000 -0500
57553@@ -16,6 +16,10 @@
57554 #define __read_mostly
57555 #endif
57556
57557+#ifndef __read_only
57558+#define __read_only __read_mostly
57559+#endif
57560+
57561 #ifndef ____cacheline_aligned
57562 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57563 #endif
57564diff -urNp linux-3.1.1/include/linux/capability.h linux-3.1.1/include/linux/capability.h
57565--- linux-3.1.1/include/linux/capability.h 2011-11-11 15:19:27.000000000 -0500
57566+++ linux-3.1.1/include/linux/capability.h 2011-11-16 18:40:31.000000000 -0500
57567@@ -547,6 +547,9 @@ extern bool capable(int cap);
57568 extern bool ns_capable(struct user_namespace *ns, int cap);
57569 extern bool task_ns_capable(struct task_struct *t, int cap);
57570 extern bool nsown_capable(int cap);
57571+extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
57572+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
57573+extern bool capable_nolog(int cap);
57574
57575 /* audit system wants to get cap info from files as well */
57576 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
57577diff -urNp linux-3.1.1/include/linux/cleancache.h linux-3.1.1/include/linux/cleancache.h
57578--- linux-3.1.1/include/linux/cleancache.h 2011-11-11 15:19:27.000000000 -0500
57579+++ linux-3.1.1/include/linux/cleancache.h 2011-11-16 18:39:08.000000000 -0500
57580@@ -31,7 +31,7 @@ struct cleancache_ops {
57581 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
57582 void (*flush_inode)(int, struct cleancache_filekey);
57583 void (*flush_fs)(int);
57584-};
57585+} __no_const;
57586
57587 extern struct cleancache_ops
57588 cleancache_register_ops(struct cleancache_ops *ops);
57589diff -urNp linux-3.1.1/include/linux/compiler-gcc4.h linux-3.1.1/include/linux/compiler-gcc4.h
57590--- linux-3.1.1/include/linux/compiler-gcc4.h 2011-11-11 15:19:27.000000000 -0500
57591+++ linux-3.1.1/include/linux/compiler-gcc4.h 2011-11-16 18:39:08.000000000 -0500
57592@@ -31,6 +31,12 @@
57593
57594
57595 #if __GNUC_MINOR__ >= 5
57596+
57597+#ifdef CONSTIFY_PLUGIN
57598+#define __no_const __attribute__((no_const))
57599+#define __do_const __attribute__((do_const))
57600+#endif
57601+
57602 /*
57603 * Mark a position in code as unreachable. This can be used to
57604 * suppress control flow warnings after asm blocks that transfer
57605@@ -46,6 +52,11 @@
57606 #define __noclone __attribute__((__noclone__))
57607
57608 #endif
57609+
57610+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57611+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57612+#define __bos0(ptr) __bos((ptr), 0)
57613+#define __bos1(ptr) __bos((ptr), 1)
57614 #endif
57615
57616 #if __GNUC_MINOR__ > 0
57617diff -urNp linux-3.1.1/include/linux/compiler.h linux-3.1.1/include/linux/compiler.h
57618--- linux-3.1.1/include/linux/compiler.h 2011-11-11 15:19:27.000000000 -0500
57619+++ linux-3.1.1/include/linux/compiler.h 2011-11-16 18:39:08.000000000 -0500
57620@@ -5,31 +5,62 @@
57621
57622 #ifdef __CHECKER__
57623 # define __user __attribute__((noderef, address_space(1)))
57624+# define __force_user __force __user
57625 # define __kernel __attribute__((address_space(0)))
57626+# define __force_kernel __force __kernel
57627 # define __safe __attribute__((safe))
57628 # define __force __attribute__((force))
57629 # define __nocast __attribute__((nocast))
57630 # define __iomem __attribute__((noderef, address_space(2)))
57631+# define __force_iomem __force __iomem
57632 # define __acquires(x) __attribute__((context(x,0,1)))
57633 # define __releases(x) __attribute__((context(x,1,0)))
57634 # define __acquire(x) __context__(x,1)
57635 # define __release(x) __context__(x,-1)
57636 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
57637 # define __percpu __attribute__((noderef, address_space(3)))
57638+# define __force_percpu __force __percpu
57639 #ifdef CONFIG_SPARSE_RCU_POINTER
57640 # define __rcu __attribute__((noderef, address_space(4)))
57641+# define __force_rcu __force __rcu
57642 #else
57643 # define __rcu
57644+# define __force_rcu
57645 #endif
57646 extern void __chk_user_ptr(const volatile void __user *);
57647 extern void __chk_io_ptr(const volatile void __iomem *);
57648+#elif defined(CHECKER_PLUGIN)
57649+//# define __user
57650+//# define __force_user
57651+//# define __kernel
57652+//# define __force_kernel
57653+# define __safe
57654+# define __force
57655+# define __nocast
57656+# define __iomem
57657+# define __force_iomem
57658+# define __chk_user_ptr(x) (void)0
57659+# define __chk_io_ptr(x) (void)0
57660+# define __builtin_warning(x, y...) (1)
57661+# define __acquires(x)
57662+# define __releases(x)
57663+# define __acquire(x) (void)0
57664+# define __release(x) (void)0
57665+# define __cond_lock(x,c) (c)
57666+# define __percpu
57667+# define __force_percpu
57668+# define __rcu
57669+# define __force_rcu
57670 #else
57671 # define __user
57672+# define __force_user
57673 # define __kernel
57674+# define __force_kernel
57675 # define __safe
57676 # define __force
57677 # define __nocast
57678 # define __iomem
57679+# define __force_iomem
57680 # define __chk_user_ptr(x) (void)0
57681 # define __chk_io_ptr(x) (void)0
57682 # define __builtin_warning(x, y...) (1)
57683@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile
57684 # define __release(x) (void)0
57685 # define __cond_lock(x,c) (c)
57686 # define __percpu
57687+# define __force_percpu
57688 # define __rcu
57689+# define __force_rcu
57690 #endif
57691
57692 #ifdef __KERNEL__
57693@@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_
57694 # define __attribute_const__ /* unimplemented */
57695 #endif
57696
57697+#ifndef __no_const
57698+# define __no_const
57699+#endif
57700+
57701+#ifndef __do_const
57702+# define __do_const
57703+#endif
57704+
57705 /*
57706 * Tell gcc if a function is cold. The compiler will assume any path
57707 * directly leading to the call is unlikely.
57708@@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_
57709 #define __cold
57710 #endif
57711
57712+#ifndef __alloc_size
57713+#define __alloc_size(...)
57714+#endif
57715+
57716+#ifndef __bos
57717+#define __bos(ptr, arg)
57718+#endif
57719+
57720+#ifndef __bos0
57721+#define __bos0(ptr)
57722+#endif
57723+
57724+#ifndef __bos1
57725+#define __bos1(ptr)
57726+#endif
57727+
57728 /* Simple shorthand for a section definition */
57729 #ifndef __section
57730 # define __section(S) __attribute__ ((__section__(#S)))
57731@@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_
57732 * use is to mediate communication between process-level code and irq/NMI
57733 * handlers, all running on the same CPU.
57734 */
57735-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
57736+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
57737+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
57738
57739 #endif /* __LINUX_COMPILER_H */
57740diff -urNp linux-3.1.1/include/linux/cpuset.h linux-3.1.1/include/linux/cpuset.h
57741--- linux-3.1.1/include/linux/cpuset.h 2011-11-11 15:19:27.000000000 -0500
57742+++ linux-3.1.1/include/linux/cpuset.h 2011-11-16 18:39:08.000000000 -0500
57743@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
57744 * nodemask.
57745 */
57746 smp_mb();
57747- --ACCESS_ONCE(current->mems_allowed_change_disable);
57748+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
57749 }
57750
57751 static inline void set_mems_allowed(nodemask_t nodemask)
57752diff -urNp linux-3.1.1/include/linux/crypto.h linux-3.1.1/include/linux/crypto.h
57753--- linux-3.1.1/include/linux/crypto.h 2011-11-11 15:19:27.000000000 -0500
57754+++ linux-3.1.1/include/linux/crypto.h 2011-11-16 18:39:08.000000000 -0500
57755@@ -361,7 +361,7 @@ struct cipher_tfm {
57756 const u8 *key, unsigned int keylen);
57757 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57758 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57759-};
57760+} __no_const;
57761
57762 struct hash_tfm {
57763 int (*init)(struct hash_desc *desc);
57764@@ -382,13 +382,13 @@ struct compress_tfm {
57765 int (*cot_decompress)(struct crypto_tfm *tfm,
57766 const u8 *src, unsigned int slen,
57767 u8 *dst, unsigned int *dlen);
57768-};
57769+} __no_const;
57770
57771 struct rng_tfm {
57772 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
57773 unsigned int dlen);
57774 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
57775-};
57776+} __no_const;
57777
57778 #define crt_ablkcipher crt_u.ablkcipher
57779 #define crt_aead crt_u.aead
57780diff -urNp linux-3.1.1/include/linux/decompress/mm.h linux-3.1.1/include/linux/decompress/mm.h
57781--- linux-3.1.1/include/linux/decompress/mm.h 2011-11-11 15:19:27.000000000 -0500
57782+++ linux-3.1.1/include/linux/decompress/mm.h 2011-11-16 18:39:08.000000000 -0500
57783@@ -77,7 +77,7 @@ static void free(void *where)
57784 * warnings when not needed (indeed large_malloc / large_free are not
57785 * needed by inflate */
57786
57787-#define malloc(a) kmalloc(a, GFP_KERNEL)
57788+#define malloc(a) kmalloc((a), GFP_KERNEL)
57789 #define free(a) kfree(a)
57790
57791 #define large_malloc(a) vmalloc(a)
57792diff -urNp linux-3.1.1/include/linux/dma-mapping.h linux-3.1.1/include/linux/dma-mapping.h
57793--- linux-3.1.1/include/linux/dma-mapping.h 2011-11-11 15:19:27.000000000 -0500
57794+++ linux-3.1.1/include/linux/dma-mapping.h 2011-11-16 18:39:08.000000000 -0500
57795@@ -42,7 +42,7 @@ struct dma_map_ops {
57796 int (*dma_supported)(struct device *dev, u64 mask);
57797 int (*set_dma_mask)(struct device *dev, u64 mask);
57798 int is_phys;
57799-};
57800+} __do_const;
57801
57802 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
57803
57804diff -urNp linux-3.1.1/include/linux/efi.h linux-3.1.1/include/linux/efi.h
57805--- linux-3.1.1/include/linux/efi.h 2011-11-11 15:19:27.000000000 -0500
57806+++ linux-3.1.1/include/linux/efi.h 2011-11-16 18:39:08.000000000 -0500
57807@@ -446,7 +446,7 @@ struct efivar_operations {
57808 efi_get_variable_t *get_variable;
57809 efi_get_next_variable_t *get_next_variable;
57810 efi_set_variable_t *set_variable;
57811-};
57812+} __no_const;
57813
57814 struct efivars {
57815 /*
57816diff -urNp linux-3.1.1/include/linux/elf.h linux-3.1.1/include/linux/elf.h
57817--- linux-3.1.1/include/linux/elf.h 2011-11-11 15:19:27.000000000 -0500
57818+++ linux-3.1.1/include/linux/elf.h 2011-11-16 18:39:08.000000000 -0500
57819@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
57820 #define PT_GNU_EH_FRAME 0x6474e550
57821
57822 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
57823+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
57824+
57825+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
57826+
57827+/* Constants for the e_flags field */
57828+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57829+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
57830+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
57831+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
57832+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57833+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57834
57835 /*
57836 * Extended Numbering
57837@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
57838 #define DT_DEBUG 21
57839 #define DT_TEXTREL 22
57840 #define DT_JMPREL 23
57841+#define DT_FLAGS 30
57842+ #define DF_TEXTREL 0x00000004
57843 #define DT_ENCODING 32
57844 #define OLD_DT_LOOS 0x60000000
57845 #define DT_LOOS 0x6000000d
57846@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
57847 #define PF_W 0x2
57848 #define PF_X 0x1
57849
57850+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
57851+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
57852+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
57853+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
57854+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
57855+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
57856+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
57857+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
57858+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
57859+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
57860+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
57861+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
57862+
57863 typedef struct elf32_phdr{
57864 Elf32_Word p_type;
57865 Elf32_Off p_offset;
57866@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
57867 #define EI_OSABI 7
57868 #define EI_PAD 8
57869
57870+#define EI_PAX 14
57871+
57872 #define ELFMAG0 0x7f /* EI_MAG */
57873 #define ELFMAG1 'E'
57874 #define ELFMAG2 'L'
57875@@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
57876 #define elf_note elf32_note
57877 #define elf_addr_t Elf32_Off
57878 #define Elf_Half Elf32_Half
57879+#define elf_dyn Elf32_Dyn
57880
57881 #else
57882
57883@@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
57884 #define elf_note elf64_note
57885 #define elf_addr_t Elf64_Off
57886 #define Elf_Half Elf64_Half
57887+#define elf_dyn Elf64_Dyn
57888
57889 #endif
57890
57891diff -urNp linux-3.1.1/include/linux/firewire.h linux-3.1.1/include/linux/firewire.h
57892--- linux-3.1.1/include/linux/firewire.h 2011-11-11 15:19:27.000000000 -0500
57893+++ linux-3.1.1/include/linux/firewire.h 2011-11-16 18:39:08.000000000 -0500
57894@@ -428,7 +428,7 @@ struct fw_iso_context {
57895 union {
57896 fw_iso_callback_t sc;
57897 fw_iso_mc_callback_t mc;
57898- } callback;
57899+ } __no_const callback;
57900 void *callback_data;
57901 };
57902
57903diff -urNp linux-3.1.1/include/linux/fscache-cache.h linux-3.1.1/include/linux/fscache-cache.h
57904--- linux-3.1.1/include/linux/fscache-cache.h 2011-11-11 15:19:27.000000000 -0500
57905+++ linux-3.1.1/include/linux/fscache-cache.h 2011-11-16 18:39:08.000000000 -0500
57906@@ -102,7 +102,7 @@ struct fscache_operation {
57907 fscache_operation_release_t release;
57908 };
57909
57910-extern atomic_t fscache_op_debug_id;
57911+extern atomic_unchecked_t fscache_op_debug_id;
57912 extern void fscache_op_work_func(struct work_struct *work);
57913
57914 extern void fscache_enqueue_operation(struct fscache_operation *);
57915@@ -122,7 +122,7 @@ static inline void fscache_operation_ini
57916 {
57917 INIT_WORK(&op->work, fscache_op_work_func);
57918 atomic_set(&op->usage, 1);
57919- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
57920+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
57921 op->processor = processor;
57922 op->release = release;
57923 INIT_LIST_HEAD(&op->pend_link);
57924diff -urNp linux-3.1.1/include/linux/fs.h linux-3.1.1/include/linux/fs.h
57925--- linux-3.1.1/include/linux/fs.h 2011-11-11 15:19:27.000000000 -0500
57926+++ linux-3.1.1/include/linux/fs.h 2011-11-16 23:39:39.000000000 -0500
57927@@ -1588,7 +1588,8 @@ struct file_operations {
57928 int (*setlease)(struct file *, long, struct file_lock **);
57929 long (*fallocate)(struct file *file, int mode, loff_t offset,
57930 loff_t len);
57931-};
57932+} __do_const;
57933+typedef struct file_operations __no_const file_operations_no_const;
57934
57935 struct inode_operations {
57936 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
57937diff -urNp linux-3.1.1/include/linux/fsnotify.h linux-3.1.1/include/linux/fsnotify.h
57938--- linux-3.1.1/include/linux/fsnotify.h 2011-11-11 15:19:27.000000000 -0500
57939+++ linux-3.1.1/include/linux/fsnotify.h 2011-11-16 18:39:08.000000000 -0500
57940@@ -314,7 +314,7 @@ static inline void fsnotify_change(struc
57941 */
57942 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
57943 {
57944- return kstrdup(name, GFP_KERNEL);
57945+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
57946 }
57947
57948 /*
57949diff -urNp linux-3.1.1/include/linux/fs_struct.h linux-3.1.1/include/linux/fs_struct.h
57950--- linux-3.1.1/include/linux/fs_struct.h 2011-11-11 15:19:27.000000000 -0500
57951+++ linux-3.1.1/include/linux/fs_struct.h 2011-11-16 18:39:08.000000000 -0500
57952@@ -6,7 +6,7 @@
57953 #include <linux/seqlock.h>
57954
57955 struct fs_struct {
57956- int users;
57957+ atomic_t users;
57958 spinlock_t lock;
57959 seqcount_t seq;
57960 int umask;
57961diff -urNp linux-3.1.1/include/linux/ftrace_event.h linux-3.1.1/include/linux/ftrace_event.h
57962--- linux-3.1.1/include/linux/ftrace_event.h 2011-11-11 15:19:27.000000000 -0500
57963+++ linux-3.1.1/include/linux/ftrace_event.h 2011-11-16 18:39:08.000000000 -0500
57964@@ -97,7 +97,7 @@ struct trace_event_functions {
57965 trace_print_func raw;
57966 trace_print_func hex;
57967 trace_print_func binary;
57968-};
57969+} __no_const;
57970
57971 struct trace_event {
57972 struct hlist_node node;
57973@@ -252,7 +252,7 @@ extern int trace_define_field(struct ftr
57974 extern int trace_add_event_call(struct ftrace_event_call *call);
57975 extern void trace_remove_event_call(struct ftrace_event_call *call);
57976
57977-#define is_signed_type(type) (((type)(-1)) < 0)
57978+#define is_signed_type(type) (((type)(-1)) < (type)1)
57979
57980 int trace_set_clr_event(const char *system, const char *event, int set);
57981
57982diff -urNp linux-3.1.1/include/linux/genhd.h linux-3.1.1/include/linux/genhd.h
57983--- linux-3.1.1/include/linux/genhd.h 2011-11-11 15:19:27.000000000 -0500
57984+++ linux-3.1.1/include/linux/genhd.h 2011-11-16 18:39:08.000000000 -0500
57985@@ -184,7 +184,7 @@ struct gendisk {
57986 struct kobject *slave_dir;
57987
57988 struct timer_rand_state *random;
57989- atomic_t sync_io; /* RAID */
57990+ atomic_unchecked_t sync_io; /* RAID */
57991 struct disk_events *ev;
57992 #ifdef CONFIG_BLK_DEV_INTEGRITY
57993 struct blk_integrity *integrity;
57994diff -urNp linux-3.1.1/include/linux/gracl.h linux-3.1.1/include/linux/gracl.h
57995--- linux-3.1.1/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
57996+++ linux-3.1.1/include/linux/gracl.h 2011-11-16 18:40:31.000000000 -0500
57997@@ -0,0 +1,317 @@
57998+#ifndef GR_ACL_H
57999+#define GR_ACL_H
58000+
58001+#include <linux/grdefs.h>
58002+#include <linux/resource.h>
58003+#include <linux/capability.h>
58004+#include <linux/dcache.h>
58005+#include <asm/resource.h>
58006+
58007+/* Major status information */
58008+
58009+#define GR_VERSION "grsecurity 2.2.2"
58010+#define GRSECURITY_VERSION 0x2202
58011+
58012+enum {
58013+ GR_SHUTDOWN = 0,
58014+ GR_ENABLE = 1,
58015+ GR_SPROLE = 2,
58016+ GR_RELOAD = 3,
58017+ GR_SEGVMOD = 4,
58018+ GR_STATUS = 5,
58019+ GR_UNSPROLE = 6,
58020+ GR_PASSSET = 7,
58021+ GR_SPROLEPAM = 8,
58022+};
58023+
58024+/* Password setup definitions
58025+ * kernel/grhash.c */
58026+enum {
58027+ GR_PW_LEN = 128,
58028+ GR_SALT_LEN = 16,
58029+ GR_SHA_LEN = 32,
58030+};
58031+
58032+enum {
58033+ GR_SPROLE_LEN = 64,
58034+};
58035+
58036+enum {
58037+ GR_NO_GLOB = 0,
58038+ GR_REG_GLOB,
58039+ GR_CREATE_GLOB
58040+};
58041+
58042+#define GR_NLIMITS 32
58043+
58044+/* Begin Data Structures */
58045+
58046+struct sprole_pw {
58047+ unsigned char *rolename;
58048+ unsigned char salt[GR_SALT_LEN];
58049+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
58050+};
58051+
58052+struct name_entry {
58053+ __u32 key;
58054+ ino_t inode;
58055+ dev_t device;
58056+ char *name;
58057+ __u16 len;
58058+ __u8 deleted;
58059+ struct name_entry *prev;
58060+ struct name_entry *next;
58061+};
58062+
58063+struct inodev_entry {
58064+ struct name_entry *nentry;
58065+ struct inodev_entry *prev;
58066+ struct inodev_entry *next;
58067+};
58068+
58069+struct acl_role_db {
58070+ struct acl_role_label **r_hash;
58071+ __u32 r_size;
58072+};
58073+
58074+struct inodev_db {
58075+ struct inodev_entry **i_hash;
58076+ __u32 i_size;
58077+};
58078+
58079+struct name_db {
58080+ struct name_entry **n_hash;
58081+ __u32 n_size;
58082+};
58083+
58084+struct crash_uid {
58085+ uid_t uid;
58086+ unsigned long expires;
58087+};
58088+
58089+struct gr_hash_struct {
58090+ void **table;
58091+ void **nametable;
58092+ void *first;
58093+ __u32 table_size;
58094+ __u32 used_size;
58095+ int type;
58096+};
58097+
58098+/* Userspace Grsecurity ACL data structures */
58099+
58100+struct acl_subject_label {
58101+ char *filename;
58102+ ino_t inode;
58103+ dev_t device;
58104+ __u32 mode;
58105+ kernel_cap_t cap_mask;
58106+ kernel_cap_t cap_lower;
58107+ kernel_cap_t cap_invert_audit;
58108+
58109+ struct rlimit res[GR_NLIMITS];
58110+ __u32 resmask;
58111+
58112+ __u8 user_trans_type;
58113+ __u8 group_trans_type;
58114+ uid_t *user_transitions;
58115+ gid_t *group_transitions;
58116+ __u16 user_trans_num;
58117+ __u16 group_trans_num;
58118+
58119+ __u32 sock_families[2];
58120+ __u32 ip_proto[8];
58121+ __u32 ip_type;
58122+ struct acl_ip_label **ips;
58123+ __u32 ip_num;
58124+ __u32 inaddr_any_override;
58125+
58126+ __u32 crashes;
58127+ unsigned long expires;
58128+
58129+ struct acl_subject_label *parent_subject;
58130+ struct gr_hash_struct *hash;
58131+ struct acl_subject_label *prev;
58132+ struct acl_subject_label *next;
58133+
58134+ struct acl_object_label **obj_hash;
58135+ __u32 obj_hash_size;
58136+ __u16 pax_flags;
58137+};
58138+
58139+struct role_allowed_ip {
58140+ __u32 addr;
58141+ __u32 netmask;
58142+
58143+ struct role_allowed_ip *prev;
58144+ struct role_allowed_ip *next;
58145+};
58146+
58147+struct role_transition {
58148+ char *rolename;
58149+
58150+ struct role_transition *prev;
58151+ struct role_transition *next;
58152+};
58153+
58154+struct acl_role_label {
58155+ char *rolename;
58156+ uid_t uidgid;
58157+ __u16 roletype;
58158+
58159+ __u16 auth_attempts;
58160+ unsigned long expires;
58161+
58162+ struct acl_subject_label *root_label;
58163+ struct gr_hash_struct *hash;
58164+
58165+ struct acl_role_label *prev;
58166+ struct acl_role_label *next;
58167+
58168+ struct role_transition *transitions;
58169+ struct role_allowed_ip *allowed_ips;
58170+ uid_t *domain_children;
58171+ __u16 domain_child_num;
58172+
58173+ struct acl_subject_label **subj_hash;
58174+ __u32 subj_hash_size;
58175+};
58176+
58177+struct user_acl_role_db {
58178+ struct acl_role_label **r_table;
58179+ __u32 num_pointers; /* Number of allocations to track */
58180+ __u32 num_roles; /* Number of roles */
58181+ __u32 num_domain_children; /* Number of domain children */
58182+ __u32 num_subjects; /* Number of subjects */
58183+ __u32 num_objects; /* Number of objects */
58184+};
58185+
58186+struct acl_object_label {
58187+ char *filename;
58188+ ino_t inode;
58189+ dev_t device;
58190+ __u32 mode;
58191+
58192+ struct acl_subject_label *nested;
58193+ struct acl_object_label *globbed;
58194+
58195+ /* next two structures not used */
58196+
58197+ struct acl_object_label *prev;
58198+ struct acl_object_label *next;
58199+};
58200+
58201+struct acl_ip_label {
58202+ char *iface;
58203+ __u32 addr;
58204+ __u32 netmask;
58205+ __u16 low, high;
58206+ __u8 mode;
58207+ __u32 type;
58208+ __u32 proto[8];
58209+
58210+ /* next two structures not used */
58211+
58212+ struct acl_ip_label *prev;
58213+ struct acl_ip_label *next;
58214+};
58215+
58216+struct gr_arg {
58217+ struct user_acl_role_db role_db;
58218+ unsigned char pw[GR_PW_LEN];
58219+ unsigned char salt[GR_SALT_LEN];
58220+ unsigned char sum[GR_SHA_LEN];
58221+ unsigned char sp_role[GR_SPROLE_LEN];
58222+ struct sprole_pw *sprole_pws;
58223+ dev_t segv_device;
58224+ ino_t segv_inode;
58225+ uid_t segv_uid;
58226+ __u16 num_sprole_pws;
58227+ __u16 mode;
58228+};
58229+
58230+struct gr_arg_wrapper {
58231+ struct gr_arg *arg;
58232+ __u32 version;
58233+ __u32 size;
58234+};
58235+
58236+struct subject_map {
58237+ struct acl_subject_label *user;
58238+ struct acl_subject_label *kernel;
58239+ struct subject_map *prev;
58240+ struct subject_map *next;
58241+};
58242+
58243+struct acl_subj_map_db {
58244+ struct subject_map **s_hash;
58245+ __u32 s_size;
58246+};
58247+
58248+/* End Data Structures Section */
58249+
58250+/* Hash functions generated by empirical testing by Brad Spengler
58251+ Makes good use of the low bits of the inode. Generally 0-1 times
58252+ in loop for successful match. 0-3 for unsuccessful match.
58253+ Shift/add algorithm with modulus of table size and an XOR*/
58254+
58255+static __inline__ unsigned int
58256+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58257+{
58258+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
58259+}
58260+
58261+ static __inline__ unsigned int
58262+shash(const struct acl_subject_label *userp, const unsigned int sz)
58263+{
58264+ return ((const unsigned long)userp % sz);
58265+}
58266+
58267+static __inline__ unsigned int
58268+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58269+{
58270+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58271+}
58272+
58273+static __inline__ unsigned int
58274+nhash(const char *name, const __u16 len, const unsigned int sz)
58275+{
58276+ return full_name_hash((const unsigned char *)name, len) % sz;
58277+}
58278+
58279+#define FOR_EACH_ROLE_START(role) \
58280+ role = role_list; \
58281+ while (role) {
58282+
58283+#define FOR_EACH_ROLE_END(role) \
58284+ role = role->prev; \
58285+ }
58286+
58287+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58288+ subj = NULL; \
58289+ iter = 0; \
58290+ while (iter < role->subj_hash_size) { \
58291+ if (subj == NULL) \
58292+ subj = role->subj_hash[iter]; \
58293+ if (subj == NULL) { \
58294+ iter++; \
58295+ continue; \
58296+ }
58297+
58298+#define FOR_EACH_SUBJECT_END(subj,iter) \
58299+ subj = subj->next; \
58300+ if (subj == NULL) \
58301+ iter++; \
58302+ }
58303+
58304+
58305+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58306+ subj = role->hash->first; \
58307+ while (subj != NULL) {
58308+
58309+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58310+ subj = subj->next; \
58311+ }
58312+
58313+#endif
58314+
58315diff -urNp linux-3.1.1/include/linux/gralloc.h linux-3.1.1/include/linux/gralloc.h
58316--- linux-3.1.1/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
58317+++ linux-3.1.1/include/linux/gralloc.h 2011-11-16 18:40:31.000000000 -0500
58318@@ -0,0 +1,9 @@
58319+#ifndef __GRALLOC_H
58320+#define __GRALLOC_H
58321+
58322+void acl_free_all(void);
58323+int acl_alloc_stack_init(unsigned long size);
58324+void *acl_alloc(unsigned long len);
58325+void *acl_alloc_num(unsigned long num, unsigned long len);
58326+
58327+#endif
58328diff -urNp linux-3.1.1/include/linux/grdefs.h linux-3.1.1/include/linux/grdefs.h
58329--- linux-3.1.1/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
58330+++ linux-3.1.1/include/linux/grdefs.h 2011-11-16 18:40:31.000000000 -0500
58331@@ -0,0 +1,140 @@
58332+#ifndef GRDEFS_H
58333+#define GRDEFS_H
58334+
58335+/* Begin grsecurity status declarations */
58336+
58337+enum {
58338+ GR_READY = 0x01,
58339+ GR_STATUS_INIT = 0x00 // disabled state
58340+};
58341+
58342+/* Begin ACL declarations */
58343+
58344+/* Role flags */
58345+
58346+enum {
58347+ GR_ROLE_USER = 0x0001,
58348+ GR_ROLE_GROUP = 0x0002,
58349+ GR_ROLE_DEFAULT = 0x0004,
58350+ GR_ROLE_SPECIAL = 0x0008,
58351+ GR_ROLE_AUTH = 0x0010,
58352+ GR_ROLE_NOPW = 0x0020,
58353+ GR_ROLE_GOD = 0x0040,
58354+ GR_ROLE_LEARN = 0x0080,
58355+ GR_ROLE_TPE = 0x0100,
58356+ GR_ROLE_DOMAIN = 0x0200,
58357+ GR_ROLE_PAM = 0x0400,
58358+ GR_ROLE_PERSIST = 0x0800
58359+};
58360+
58361+/* ACL Subject and Object mode flags */
58362+enum {
58363+ GR_DELETED = 0x80000000
58364+};
58365+
58366+/* ACL Object-only mode flags */
58367+enum {
58368+ GR_READ = 0x00000001,
58369+ GR_APPEND = 0x00000002,
58370+ GR_WRITE = 0x00000004,
58371+ GR_EXEC = 0x00000008,
58372+ GR_FIND = 0x00000010,
58373+ GR_INHERIT = 0x00000020,
58374+ GR_SETID = 0x00000040,
58375+ GR_CREATE = 0x00000080,
58376+ GR_DELETE = 0x00000100,
58377+ GR_LINK = 0x00000200,
58378+ GR_AUDIT_READ = 0x00000400,
58379+ GR_AUDIT_APPEND = 0x00000800,
58380+ GR_AUDIT_WRITE = 0x00001000,
58381+ GR_AUDIT_EXEC = 0x00002000,
58382+ GR_AUDIT_FIND = 0x00004000,
58383+ GR_AUDIT_INHERIT= 0x00008000,
58384+ GR_AUDIT_SETID = 0x00010000,
58385+ GR_AUDIT_CREATE = 0x00020000,
58386+ GR_AUDIT_DELETE = 0x00040000,
58387+ GR_AUDIT_LINK = 0x00080000,
58388+ GR_PTRACERD = 0x00100000,
58389+ GR_NOPTRACE = 0x00200000,
58390+ GR_SUPPRESS = 0x00400000,
58391+ GR_NOLEARN = 0x00800000,
58392+ GR_INIT_TRANSFER= 0x01000000
58393+};
58394+
58395+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58396+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58397+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58398+
58399+/* ACL subject-only mode flags */
58400+enum {
58401+ GR_KILL = 0x00000001,
58402+ GR_VIEW = 0x00000002,
58403+ GR_PROTECTED = 0x00000004,
58404+ GR_LEARN = 0x00000008,
58405+ GR_OVERRIDE = 0x00000010,
58406+ /* just a placeholder, this mode is only used in userspace */
58407+ GR_DUMMY = 0x00000020,
58408+ GR_PROTSHM = 0x00000040,
58409+ GR_KILLPROC = 0x00000080,
58410+ GR_KILLIPPROC = 0x00000100,
58411+ /* just a placeholder, this mode is only used in userspace */
58412+ GR_NOTROJAN = 0x00000200,
58413+ GR_PROTPROCFD = 0x00000400,
58414+ GR_PROCACCT = 0x00000800,
58415+ GR_RELAXPTRACE = 0x00001000,
58416+ GR_NESTED = 0x00002000,
58417+ GR_INHERITLEARN = 0x00004000,
58418+ GR_PROCFIND = 0x00008000,
58419+ GR_POVERRIDE = 0x00010000,
58420+ GR_KERNELAUTH = 0x00020000,
58421+ GR_ATSECURE = 0x00040000,
58422+ GR_SHMEXEC = 0x00080000
58423+};
58424+
58425+enum {
58426+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58427+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58428+ GR_PAX_ENABLE_MPROTECT = 0x0004,
58429+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
58430+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58431+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58432+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58433+ GR_PAX_DISABLE_MPROTECT = 0x0400,
58434+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
58435+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58436+};
58437+
58438+enum {
58439+ GR_ID_USER = 0x01,
58440+ GR_ID_GROUP = 0x02,
58441+};
58442+
58443+enum {
58444+ GR_ID_ALLOW = 0x01,
58445+ GR_ID_DENY = 0x02,
58446+};
58447+
58448+#define GR_CRASH_RES 31
58449+#define GR_UIDTABLE_MAX 500
58450+
58451+/* begin resource learning section */
58452+enum {
58453+ GR_RLIM_CPU_BUMP = 60,
58454+ GR_RLIM_FSIZE_BUMP = 50000,
58455+ GR_RLIM_DATA_BUMP = 10000,
58456+ GR_RLIM_STACK_BUMP = 1000,
58457+ GR_RLIM_CORE_BUMP = 10000,
58458+ GR_RLIM_RSS_BUMP = 500000,
58459+ GR_RLIM_NPROC_BUMP = 1,
58460+ GR_RLIM_NOFILE_BUMP = 5,
58461+ GR_RLIM_MEMLOCK_BUMP = 50000,
58462+ GR_RLIM_AS_BUMP = 500000,
58463+ GR_RLIM_LOCKS_BUMP = 2,
58464+ GR_RLIM_SIGPENDING_BUMP = 5,
58465+ GR_RLIM_MSGQUEUE_BUMP = 10000,
58466+ GR_RLIM_NICE_BUMP = 1,
58467+ GR_RLIM_RTPRIO_BUMP = 1,
58468+ GR_RLIM_RTTIME_BUMP = 1000000
58469+};
58470+
58471+#endif
58472diff -urNp linux-3.1.1/include/linux/grinternal.h linux-3.1.1/include/linux/grinternal.h
58473--- linux-3.1.1/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
58474+++ linux-3.1.1/include/linux/grinternal.h 2011-11-16 18:40:31.000000000 -0500
58475@@ -0,0 +1,220 @@
58476+#ifndef __GRINTERNAL_H
58477+#define __GRINTERNAL_H
58478+
58479+#ifdef CONFIG_GRKERNSEC
58480+
58481+#include <linux/fs.h>
58482+#include <linux/mnt_namespace.h>
58483+#include <linux/nsproxy.h>
58484+#include <linux/gracl.h>
58485+#include <linux/grdefs.h>
58486+#include <linux/grmsg.h>
58487+
58488+void gr_add_learn_entry(const char *fmt, ...)
58489+ __attribute__ ((format (printf, 1, 2)));
58490+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58491+ const struct vfsmount *mnt);
58492+__u32 gr_check_create(const struct dentry *new_dentry,
58493+ const struct dentry *parent,
58494+ const struct vfsmount *mnt, const __u32 mode);
58495+int gr_check_protected_task(const struct task_struct *task);
58496+__u32 to_gr_audit(const __u32 reqmode);
58497+int gr_set_acls(const int type);
58498+int gr_apply_subject_to_task(struct task_struct *task);
58499+int gr_acl_is_enabled(void);
58500+char gr_roletype_to_char(void);
58501+
58502+void gr_handle_alertkill(struct task_struct *task);
58503+char *gr_to_filename(const struct dentry *dentry,
58504+ const struct vfsmount *mnt);
58505+char *gr_to_filename1(const struct dentry *dentry,
58506+ const struct vfsmount *mnt);
58507+char *gr_to_filename2(const struct dentry *dentry,
58508+ const struct vfsmount *mnt);
58509+char *gr_to_filename3(const struct dentry *dentry,
58510+ const struct vfsmount *mnt);
58511+
58512+extern int grsec_enable_harden_ptrace;
58513+extern int grsec_enable_link;
58514+extern int grsec_enable_fifo;
58515+extern int grsec_enable_execve;
58516+extern int grsec_enable_shm;
58517+extern int grsec_enable_execlog;
58518+extern int grsec_enable_signal;
58519+extern int grsec_enable_audit_ptrace;
58520+extern int grsec_enable_forkfail;
58521+extern int grsec_enable_time;
58522+extern int grsec_enable_rofs;
58523+extern int grsec_enable_chroot_shmat;
58524+extern int grsec_enable_chroot_mount;
58525+extern int grsec_enable_chroot_double;
58526+extern int grsec_enable_chroot_pivot;
58527+extern int grsec_enable_chroot_chdir;
58528+extern int grsec_enable_chroot_chmod;
58529+extern int grsec_enable_chroot_mknod;
58530+extern int grsec_enable_chroot_fchdir;
58531+extern int grsec_enable_chroot_nice;
58532+extern int grsec_enable_chroot_execlog;
58533+extern int grsec_enable_chroot_caps;
58534+extern int grsec_enable_chroot_sysctl;
58535+extern int grsec_enable_chroot_unix;
58536+extern int grsec_enable_tpe;
58537+extern int grsec_tpe_gid;
58538+extern int grsec_enable_tpe_all;
58539+extern int grsec_enable_tpe_invert;
58540+extern int grsec_enable_socket_all;
58541+extern int grsec_socket_all_gid;
58542+extern int grsec_enable_socket_client;
58543+extern int grsec_socket_client_gid;
58544+extern int grsec_enable_socket_server;
58545+extern int grsec_socket_server_gid;
58546+extern int grsec_audit_gid;
58547+extern int grsec_enable_group;
58548+extern int grsec_enable_audit_textrel;
58549+extern int grsec_enable_log_rwxmaps;
58550+extern int grsec_enable_mount;
58551+extern int grsec_enable_chdir;
58552+extern int grsec_resource_logging;
58553+extern int grsec_enable_blackhole;
58554+extern int grsec_lastack_retries;
58555+extern int grsec_enable_brute;
58556+extern int grsec_lock;
58557+
58558+extern spinlock_t grsec_alert_lock;
58559+extern unsigned long grsec_alert_wtime;
58560+extern unsigned long grsec_alert_fyet;
58561+
58562+extern spinlock_t grsec_audit_lock;
58563+
58564+extern rwlock_t grsec_exec_file_lock;
58565+
58566+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58567+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58568+ (tsk)->exec_file->f_vfsmnt) : "/")
58569+
58570+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58571+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58572+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58573+
58574+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58575+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
58576+ (tsk)->exec_file->f_vfsmnt) : "/")
58577+
58578+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58579+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58580+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58581+
58582+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58583+
58584+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58585+
58586+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58587+ (task)->pid, (cred)->uid, \
58588+ (cred)->euid, (cred)->gid, (cred)->egid, \
58589+ gr_parent_task_fullpath(task), \
58590+ (task)->real_parent->comm, (task)->real_parent->pid, \
58591+ (pcred)->uid, (pcred)->euid, \
58592+ (pcred)->gid, (pcred)->egid
58593+
58594+#define GR_CHROOT_CAPS {{ \
58595+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58596+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58597+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58598+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58599+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58600+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
58601+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
58602+
58603+#define security_learn(normal_msg,args...) \
58604+({ \
58605+ read_lock(&grsec_exec_file_lock); \
58606+ gr_add_learn_entry(normal_msg "\n", ## args); \
58607+ read_unlock(&grsec_exec_file_lock); \
58608+})
58609+
58610+enum {
58611+ GR_DO_AUDIT,
58612+ GR_DONT_AUDIT,
58613+ /* used for non-audit messages that we shouldn't kill the task on */
58614+ GR_DONT_AUDIT_GOOD
58615+};
58616+
58617+enum {
58618+ GR_TTYSNIFF,
58619+ GR_RBAC,
58620+ GR_RBAC_STR,
58621+ GR_STR_RBAC,
58622+ GR_RBAC_MODE2,
58623+ GR_RBAC_MODE3,
58624+ GR_FILENAME,
58625+ GR_SYSCTL_HIDDEN,
58626+ GR_NOARGS,
58627+ GR_ONE_INT,
58628+ GR_ONE_INT_TWO_STR,
58629+ GR_ONE_STR,
58630+ GR_STR_INT,
58631+ GR_TWO_STR_INT,
58632+ GR_TWO_INT,
58633+ GR_TWO_U64,
58634+ GR_THREE_INT,
58635+ GR_FIVE_INT_TWO_STR,
58636+ GR_TWO_STR,
58637+ GR_THREE_STR,
58638+ GR_FOUR_STR,
58639+ GR_STR_FILENAME,
58640+ GR_FILENAME_STR,
58641+ GR_FILENAME_TWO_INT,
58642+ GR_FILENAME_TWO_INT_STR,
58643+ GR_TEXTREL,
58644+ GR_PTRACE,
58645+ GR_RESOURCE,
58646+ GR_CAP,
58647+ GR_SIG,
58648+ GR_SIG2,
58649+ GR_CRASH1,
58650+ GR_CRASH2,
58651+ GR_PSACCT,
58652+ GR_RWXMAP
58653+};
58654+
58655+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
58656+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
58657+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
58658+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
58659+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
58660+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
58661+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
58662+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
58663+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
58664+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
58665+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
58666+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
58667+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
58668+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
58669+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58670+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
58671+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
58672+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
58673+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58674+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
58675+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
58676+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
58677+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
58678+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
58679+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
58680+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
58681+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
58682+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
58683+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
58684+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
58685+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
58686+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
58687+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
58688+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
58689+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58690+
58691+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
58692+
58693+#endif
58694+
58695+#endif
58696diff -urNp linux-3.1.1/include/linux/grmsg.h linux-3.1.1/include/linux/grmsg.h
58697--- linux-3.1.1/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
58698+++ linux-3.1.1/include/linux/grmsg.h 2011-11-16 18:40:31.000000000 -0500
58699@@ -0,0 +1,108 @@
58700+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
58701+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58702+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
58703+#define GR_STOPMOD_MSG "denied modification of module state by "
58704+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
58705+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
58706+#define GR_IOPERM_MSG "denied use of ioperm() by "
58707+#define GR_IOPL_MSG "denied use of iopl() by "
58708+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
58709+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
58710+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
58711+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
58712+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
58713+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
58714+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
58715+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
58716+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
58717+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
58718+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
58719+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
58720+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
58721+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
58722+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
58723+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
58724+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
58725+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
58726+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
58727+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
58728+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
58729+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
58730+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
58731+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
58732+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
58733+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
58734+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
58735+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
58736+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
58737+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
58738+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
58739+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
58740+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
58741+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
58742+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
58743+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
58744+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
58745+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
58746+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
58747+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
58748+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
58749+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
58750+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
58751+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
58752+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
58753+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
58754+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
58755+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
58756+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
58757+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
58758+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
58759+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
58760+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
58761+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
58762+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
58763+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
58764+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
58765+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
58766+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
58767+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
58768+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
58769+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
58770+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
58771+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
58772+#define GR_FAILFORK_MSG "failed fork with errno %s by "
58773+#define GR_NICE_CHROOT_MSG "denied priority change by "
58774+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
58775+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
58776+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
58777+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
58778+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
58779+#define GR_TIME_MSG "time set by "
58780+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
58781+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
58782+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
58783+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
58784+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
58785+#define GR_BIND_MSG "denied bind() by "
58786+#define GR_CONNECT_MSG "denied connect() by "
58787+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
58788+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
58789+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
58790+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
58791+#define GR_CAP_ACL_MSG "use of %s denied for "
58792+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
58793+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
58794+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
58795+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
58796+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
58797+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
58798+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
58799+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
58800+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
58801+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
58802+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
58803+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
58804+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
58805+#define GR_VM86_MSG "denied use of vm86 by "
58806+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
58807+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
58808diff -urNp linux-3.1.1/include/linux/grsecurity.h linux-3.1.1/include/linux/grsecurity.h
58809--- linux-3.1.1/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
58810+++ linux-3.1.1/include/linux/grsecurity.h 2011-11-17 00:16:10.000000000 -0500
58811@@ -0,0 +1,228 @@
58812+#ifndef GR_SECURITY_H
58813+#define GR_SECURITY_H
58814+#include <linux/fs.h>
58815+#include <linux/fs_struct.h>
58816+#include <linux/binfmts.h>
58817+#include <linux/gracl.h>
58818+
58819+/* notify of brain-dead configs */
58820+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58821+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
58822+#endif
58823+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
58824+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
58825+#endif
58826+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58827+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58828+#endif
58829+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58830+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58831+#endif
58832+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
58833+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
58834+#endif
58835+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
58836+#error "CONFIG_PAX enabled, but no PaX options are enabled."
58837+#endif
58838+
58839+#include <linux/compat.h>
58840+
58841+struct user_arg_ptr {
58842+#ifdef CONFIG_COMPAT
58843+ bool is_compat;
58844+#endif
58845+ union {
58846+ const char __user *const __user *native;
58847+#ifdef CONFIG_COMPAT
58848+ compat_uptr_t __user *compat;
58849+#endif
58850+ } ptr;
58851+};
58852+
58853+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
58854+void gr_handle_brute_check(void);
58855+void gr_handle_kernel_exploit(void);
58856+int gr_process_user_ban(void);
58857+
58858+char gr_roletype_to_char(void);
58859+
58860+int gr_acl_enable_at_secure(void);
58861+
58862+int gr_check_user_change(int real, int effective, int fs);
58863+int gr_check_group_change(int real, int effective, int fs);
58864+
58865+void gr_del_task_from_ip_table(struct task_struct *p);
58866+
58867+int gr_pid_is_chrooted(struct task_struct *p);
58868+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
58869+int gr_handle_chroot_nice(void);
58870+int gr_handle_chroot_sysctl(const int op);
58871+int gr_handle_chroot_setpriority(struct task_struct *p,
58872+ const int niceval);
58873+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
58874+int gr_handle_chroot_chroot(const struct dentry *dentry,
58875+ const struct vfsmount *mnt);
58876+void gr_handle_chroot_chdir(struct path *path);
58877+int gr_handle_chroot_chmod(const struct dentry *dentry,
58878+ const struct vfsmount *mnt, const int mode);
58879+int gr_handle_chroot_mknod(const struct dentry *dentry,
58880+ const struct vfsmount *mnt, const int mode);
58881+int gr_handle_chroot_mount(const struct dentry *dentry,
58882+ const struct vfsmount *mnt,
58883+ const char *dev_name);
58884+int gr_handle_chroot_pivot(void);
58885+int gr_handle_chroot_unix(const pid_t pid);
58886+
58887+int gr_handle_rawio(const struct inode *inode);
58888+
58889+void gr_handle_ioperm(void);
58890+void gr_handle_iopl(void);
58891+
58892+int gr_tpe_allow(const struct file *file);
58893+
58894+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
58895+void gr_clear_chroot_entries(struct task_struct *task);
58896+
58897+void gr_log_forkfail(const int retval);
58898+void gr_log_timechange(void);
58899+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
58900+void gr_log_chdir(const struct dentry *dentry,
58901+ const struct vfsmount *mnt);
58902+void gr_log_chroot_exec(const struct dentry *dentry,
58903+ const struct vfsmount *mnt);
58904+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
58905+void gr_log_remount(const char *devname, const int retval);
58906+void gr_log_unmount(const char *devname, const int retval);
58907+void gr_log_mount(const char *from, const char *to, const int retval);
58908+void gr_log_textrel(struct vm_area_struct *vma);
58909+void gr_log_rwxmmap(struct file *file);
58910+void gr_log_rwxmprotect(struct file *file);
58911+
58912+int gr_handle_follow_link(const struct inode *parent,
58913+ const struct inode *inode,
58914+ const struct dentry *dentry,
58915+ const struct vfsmount *mnt);
58916+int gr_handle_fifo(const struct dentry *dentry,
58917+ const struct vfsmount *mnt,
58918+ const struct dentry *dir, const int flag,
58919+ const int acc_mode);
58920+int gr_handle_hardlink(const struct dentry *dentry,
58921+ const struct vfsmount *mnt,
58922+ struct inode *inode,
58923+ const int mode, const char *to);
58924+
58925+int gr_is_capable(const int cap);
58926+int gr_is_capable_nolog(const int cap);
58927+void gr_learn_resource(const struct task_struct *task, const int limit,
58928+ const unsigned long wanted, const int gt);
58929+void gr_copy_label(struct task_struct *tsk);
58930+void gr_handle_crash(struct task_struct *task, const int sig);
58931+int gr_handle_signal(const struct task_struct *p, const int sig);
58932+int gr_check_crash_uid(const uid_t uid);
58933+int gr_check_protected_task(const struct task_struct *task);
58934+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
58935+int gr_acl_handle_mmap(const struct file *file,
58936+ const unsigned long prot);
58937+int gr_acl_handle_mprotect(const struct file *file,
58938+ const unsigned long prot);
58939+int gr_check_hidden_task(const struct task_struct *tsk);
58940+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
58941+ const struct vfsmount *mnt);
58942+__u32 gr_acl_handle_utime(const struct dentry *dentry,
58943+ const struct vfsmount *mnt);
58944+__u32 gr_acl_handle_access(const struct dentry *dentry,
58945+ const struct vfsmount *mnt, const int fmode);
58946+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
58947+ const struct vfsmount *mnt, mode_t mode);
58948+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
58949+ const struct vfsmount *mnt, mode_t mode);
58950+__u32 gr_acl_handle_chown(const struct dentry *dentry,
58951+ const struct vfsmount *mnt);
58952+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
58953+ const struct vfsmount *mnt);
58954+int gr_handle_ptrace(struct task_struct *task, const long request);
58955+int gr_handle_proc_ptrace(struct task_struct *task);
58956+__u32 gr_acl_handle_execve(const struct dentry *dentry,
58957+ const struct vfsmount *mnt);
58958+int gr_check_crash_exec(const struct file *filp);
58959+int gr_acl_is_enabled(void);
58960+void gr_set_kernel_label(struct task_struct *task);
58961+void gr_set_role_label(struct task_struct *task, const uid_t uid,
58962+ const gid_t gid);
58963+int gr_set_proc_label(const struct dentry *dentry,
58964+ const struct vfsmount *mnt,
58965+ const int unsafe_share);
58966+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
58967+ const struct vfsmount *mnt);
58968+__u32 gr_acl_handle_open(const struct dentry *dentry,
58969+ const struct vfsmount *mnt, int acc_mode);
58970+__u32 gr_acl_handle_creat(const struct dentry *dentry,
58971+ const struct dentry *p_dentry,
58972+ const struct vfsmount *p_mnt,
58973+ int open_flags, int acc_mode, const int imode);
58974+void gr_handle_create(const struct dentry *dentry,
58975+ const struct vfsmount *mnt);
58976+void gr_handle_proc_create(const struct dentry *dentry,
58977+ const struct inode *inode);
58978+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
58979+ const struct dentry *parent_dentry,
58980+ const struct vfsmount *parent_mnt,
58981+ const int mode);
58982+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
58983+ const struct dentry *parent_dentry,
58984+ const struct vfsmount *parent_mnt);
58985+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
58986+ const struct vfsmount *mnt);
58987+void gr_handle_delete(const ino_t ino, const dev_t dev);
58988+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
58989+ const struct vfsmount *mnt);
58990+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
58991+ const struct dentry *parent_dentry,
58992+ const struct vfsmount *parent_mnt,
58993+ const char *from);
58994+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
58995+ const struct dentry *parent_dentry,
58996+ const struct vfsmount *parent_mnt,
58997+ const struct dentry *old_dentry,
58998+ const struct vfsmount *old_mnt, const char *to);
58999+int gr_acl_handle_rename(struct dentry *new_dentry,
59000+ struct dentry *parent_dentry,
59001+ const struct vfsmount *parent_mnt,
59002+ struct dentry *old_dentry,
59003+ struct inode *old_parent_inode,
59004+ struct vfsmount *old_mnt, const char *newname);
59005+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59006+ struct dentry *old_dentry,
59007+ struct dentry *new_dentry,
59008+ struct vfsmount *mnt, const __u8 replace);
59009+__u32 gr_check_link(const struct dentry *new_dentry,
59010+ const struct dentry *parent_dentry,
59011+ const struct vfsmount *parent_mnt,
59012+ const struct dentry *old_dentry,
59013+ const struct vfsmount *old_mnt);
59014+int gr_acl_handle_filldir(const struct file *file, const char *name,
59015+ const unsigned int namelen, const ino_t ino);
59016+
59017+__u32 gr_acl_handle_unix(const struct dentry *dentry,
59018+ const struct vfsmount *mnt);
59019+void gr_acl_handle_exit(void);
59020+void gr_acl_handle_psacct(struct task_struct *task, const long code);
59021+int gr_acl_handle_procpidmem(const struct task_struct *task);
59022+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
59023+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
59024+void gr_audit_ptrace(struct task_struct *task);
59025+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
59026+
59027+#ifdef CONFIG_GRKERNSEC
59028+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
59029+void gr_handle_vm86(void);
59030+void gr_handle_mem_readwrite(u64 from, u64 to);
59031+
59032+extern int grsec_enable_dmesg;
59033+extern int grsec_disable_privio;
59034+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59035+extern int grsec_enable_chroot_findtask;
59036+#endif
59037+#endif
59038+
59039+#endif
59040diff -urNp linux-3.1.1/include/linux/grsock.h linux-3.1.1/include/linux/grsock.h
59041--- linux-3.1.1/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
59042+++ linux-3.1.1/include/linux/grsock.h 2011-11-16 18:40:31.000000000 -0500
59043@@ -0,0 +1,19 @@
59044+#ifndef __GRSOCK_H
59045+#define __GRSOCK_H
59046+
59047+extern void gr_attach_curr_ip(const struct sock *sk);
59048+extern int gr_handle_sock_all(const int family, const int type,
59049+ const int protocol);
59050+extern int gr_handle_sock_server(const struct sockaddr *sck);
59051+extern int gr_handle_sock_server_other(const struct sock *sck);
59052+extern int gr_handle_sock_client(const struct sockaddr *sck);
59053+extern int gr_search_connect(struct socket * sock,
59054+ struct sockaddr_in * addr);
59055+extern int gr_search_bind(struct socket * sock,
59056+ struct sockaddr_in * addr);
59057+extern int gr_search_listen(struct socket * sock);
59058+extern int gr_search_accept(struct socket * sock);
59059+extern int gr_search_socket(const int domain, const int type,
59060+ const int protocol);
59061+
59062+#endif
59063diff -urNp linux-3.1.1/include/linux/hid.h linux-3.1.1/include/linux/hid.h
59064--- linux-3.1.1/include/linux/hid.h 2011-11-11 15:19:27.000000000 -0500
59065+++ linux-3.1.1/include/linux/hid.h 2011-11-16 18:39:08.000000000 -0500
59066@@ -676,7 +676,7 @@ struct hid_ll_driver {
59067 unsigned int code, int value);
59068
59069 int (*parse)(struct hid_device *hdev);
59070-};
59071+} __no_const;
59072
59073 #define PM_HINT_FULLON 1<<5
59074 #define PM_HINT_NORMAL 1<<1
59075diff -urNp linux-3.1.1/include/linux/highmem.h linux-3.1.1/include/linux/highmem.h
59076--- linux-3.1.1/include/linux/highmem.h 2011-11-11 15:19:27.000000000 -0500
59077+++ linux-3.1.1/include/linux/highmem.h 2011-11-16 18:39:08.000000000 -0500
59078@@ -185,6 +185,18 @@ static inline void clear_highpage(struct
59079 kunmap_atomic(kaddr, KM_USER0);
59080 }
59081
59082+static inline void sanitize_highpage(struct page *page)
59083+{
59084+ void *kaddr;
59085+ unsigned long flags;
59086+
59087+ local_irq_save(flags);
59088+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
59089+ clear_page(kaddr);
59090+ kunmap_atomic(kaddr, KM_CLEARPAGE);
59091+ local_irq_restore(flags);
59092+}
59093+
59094 static inline void zero_user_segments(struct page *page,
59095 unsigned start1, unsigned end1,
59096 unsigned start2, unsigned end2)
59097diff -urNp linux-3.1.1/include/linux/i2c.h linux-3.1.1/include/linux/i2c.h
59098--- linux-3.1.1/include/linux/i2c.h 2011-11-11 15:19:27.000000000 -0500
59099+++ linux-3.1.1/include/linux/i2c.h 2011-11-16 18:39:08.000000000 -0500
59100@@ -346,6 +346,7 @@ struct i2c_algorithm {
59101 /* To determine what the adapter supports */
59102 u32 (*functionality) (struct i2c_adapter *);
59103 };
59104+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
59105
59106 /*
59107 * i2c_adapter is the structure used to identify a physical i2c bus along
59108diff -urNp linux-3.1.1/include/linux/i2o.h linux-3.1.1/include/linux/i2o.h
59109--- linux-3.1.1/include/linux/i2o.h 2011-11-11 15:19:27.000000000 -0500
59110+++ linux-3.1.1/include/linux/i2o.h 2011-11-16 18:39:08.000000000 -0500
59111@@ -564,7 +564,7 @@ struct i2o_controller {
59112 struct i2o_device *exec; /* Executive */
59113 #if BITS_PER_LONG == 64
59114 spinlock_t context_list_lock; /* lock for context_list */
59115- atomic_t context_list_counter; /* needed for unique contexts */
59116+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
59117 struct list_head context_list; /* list of context id's
59118 and pointers */
59119 #endif
59120diff -urNp linux-3.1.1/include/linux/init.h linux-3.1.1/include/linux/init.h
59121--- linux-3.1.1/include/linux/init.h 2011-11-11 15:19:27.000000000 -0500
59122+++ linux-3.1.1/include/linux/init.h 2011-11-16 18:39:08.000000000 -0500
59123@@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
59124
59125 /* Each module must use one module_init(). */
59126 #define module_init(initfn) \
59127- static inline initcall_t __inittest(void) \
59128+ static inline __used initcall_t __inittest(void) \
59129 { return initfn; } \
59130 int init_module(void) __attribute__((alias(#initfn)));
59131
59132 /* This is only required if you want to be unloadable. */
59133 #define module_exit(exitfn) \
59134- static inline exitcall_t __exittest(void) \
59135+ static inline __used exitcall_t __exittest(void) \
59136 { return exitfn; } \
59137 void cleanup_module(void) __attribute__((alias(#exitfn)));
59138
59139diff -urNp linux-3.1.1/include/linux/init_task.h linux-3.1.1/include/linux/init_task.h
59140--- linux-3.1.1/include/linux/init_task.h 2011-11-11 15:19:27.000000000 -0500
59141+++ linux-3.1.1/include/linux/init_task.h 2011-11-16 18:39:08.000000000 -0500
59142@@ -126,6 +126,12 @@ extern struct cred init_cred;
59143 # define INIT_PERF_EVENTS(tsk)
59144 #endif
59145
59146+#ifdef CONFIG_X86
59147+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
59148+#else
59149+#define INIT_TASK_THREAD_INFO
59150+#endif
59151+
59152 /*
59153 * INIT_TASK is used to set up the first task table, touch at
59154 * your own risk!. Base=0, limit=0x1fffff (=2MB)
59155@@ -164,6 +170,7 @@ extern struct cred init_cred;
59156 RCU_INIT_POINTER(.cred, &init_cred), \
59157 .comm = "swapper", \
59158 .thread = INIT_THREAD, \
59159+ INIT_TASK_THREAD_INFO \
59160 .fs = &init_fs, \
59161 .files = &init_files, \
59162 .signal = &init_signals, \
59163diff -urNp linux-3.1.1/include/linux/intel-iommu.h linux-3.1.1/include/linux/intel-iommu.h
59164--- linux-3.1.1/include/linux/intel-iommu.h 2011-11-11 15:19:27.000000000 -0500
59165+++ linux-3.1.1/include/linux/intel-iommu.h 2011-11-16 18:39:08.000000000 -0500
59166@@ -296,7 +296,7 @@ struct iommu_flush {
59167 u8 fm, u64 type);
59168 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59169 unsigned int size_order, u64 type);
59170-};
59171+} __no_const;
59172
59173 enum {
59174 SR_DMAR_FECTL_REG,
59175diff -urNp linux-3.1.1/include/linux/interrupt.h linux-3.1.1/include/linux/interrupt.h
59176--- linux-3.1.1/include/linux/interrupt.h 2011-11-11 15:19:27.000000000 -0500
59177+++ linux-3.1.1/include/linux/interrupt.h 2011-11-16 18:39:08.000000000 -0500
59178@@ -425,7 +425,7 @@ enum
59179 /* map softirq index to softirq name. update 'softirq_to_name' in
59180 * kernel/softirq.c when adding a new softirq.
59181 */
59182-extern char *softirq_to_name[NR_SOFTIRQS];
59183+extern const char * const softirq_to_name[NR_SOFTIRQS];
59184
59185 /* softirq mask and active fields moved to irq_cpustat_t in
59186 * asm/hardirq.h to get better cache usage. KAO
59187@@ -433,12 +433,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
59188
59189 struct softirq_action
59190 {
59191- void (*action)(struct softirq_action *);
59192+ void (*action)(void);
59193 };
59194
59195 asmlinkage void do_softirq(void);
59196 asmlinkage void __do_softirq(void);
59197-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59198+extern void open_softirq(int nr, void (*action)(void));
59199 extern void softirq_init(void);
59200 static inline void __raise_softirq_irqoff(unsigned int nr)
59201 {
59202diff -urNp linux-3.1.1/include/linux/kallsyms.h linux-3.1.1/include/linux/kallsyms.h
59203--- linux-3.1.1/include/linux/kallsyms.h 2011-11-11 15:19:27.000000000 -0500
59204+++ linux-3.1.1/include/linux/kallsyms.h 2011-11-16 18:40:31.000000000 -0500
59205@@ -15,7 +15,8 @@
59206
59207 struct module;
59208
59209-#ifdef CONFIG_KALLSYMS
59210+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59211+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59212 /* Lookup the address for a symbol. Returns 0 if not found. */
59213 unsigned long kallsyms_lookup_name(const char *name);
59214
59215@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
59216 /* Stupid that this does nothing, but I didn't create this mess. */
59217 #define __print_symbol(fmt, addr)
59218 #endif /*CONFIG_KALLSYMS*/
59219+#else /* when included by kallsyms.c, vsnprintf.c, or
59220+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59221+extern void __print_symbol(const char *fmt, unsigned long address);
59222+extern int sprint_backtrace(char *buffer, unsigned long address);
59223+extern int sprint_symbol(char *buffer, unsigned long address);
59224+const char *kallsyms_lookup(unsigned long addr,
59225+ unsigned long *symbolsize,
59226+ unsigned long *offset,
59227+ char **modname, char *namebuf);
59228+#endif
59229
59230 /* This macro allows us to keep printk typechecking */
59231 static void __check_printsym_format(const char *fmt, ...)
59232diff -urNp linux-3.1.1/include/linux/kgdb.h linux-3.1.1/include/linux/kgdb.h
59233--- linux-3.1.1/include/linux/kgdb.h 2011-11-11 15:19:27.000000000 -0500
59234+++ linux-3.1.1/include/linux/kgdb.h 2011-11-16 18:39:08.000000000 -0500
59235@@ -53,7 +53,7 @@ extern int kgdb_connected;
59236 extern int kgdb_io_module_registered;
59237
59238 extern atomic_t kgdb_setting_breakpoint;
59239-extern atomic_t kgdb_cpu_doing_single_step;
59240+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59241
59242 extern struct task_struct *kgdb_usethread;
59243 extern struct task_struct *kgdb_contthread;
59244@@ -251,7 +251,7 @@ struct kgdb_arch {
59245 void (*disable_hw_break)(struct pt_regs *regs);
59246 void (*remove_all_hw_break)(void);
59247 void (*correct_hw_break)(void);
59248-};
59249+} __do_const;
59250
59251 /**
59252 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
59253@@ -276,7 +276,7 @@ struct kgdb_io {
59254 void (*pre_exception) (void);
59255 void (*post_exception) (void);
59256 int is_console;
59257-};
59258+} __do_const;
59259
59260 extern struct kgdb_arch arch_kgdb_ops;
59261
59262diff -urNp linux-3.1.1/include/linux/kmod.h linux-3.1.1/include/linux/kmod.h
59263--- linux-3.1.1/include/linux/kmod.h 2011-11-11 15:19:27.000000000 -0500
59264+++ linux-3.1.1/include/linux/kmod.h 2011-11-16 18:40:31.000000000 -0500
59265@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysc
59266 * usually useless though. */
59267 extern int __request_module(bool wait, const char *name, ...) \
59268 __attribute__((format(printf, 2, 3)));
59269+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
59270+ __attribute__((format(printf, 3, 4)));
59271 #define request_module(mod...) __request_module(true, mod)
59272 #define request_module_nowait(mod...) __request_module(false, mod)
59273 #define try_then_request_module(x, mod...) \
59274diff -urNp linux-3.1.1/include/linux/kvm_host.h linux-3.1.1/include/linux/kvm_host.h
59275--- linux-3.1.1/include/linux/kvm_host.h 2011-11-11 15:19:27.000000000 -0500
59276+++ linux-3.1.1/include/linux/kvm_host.h 2011-11-16 18:39:08.000000000 -0500
59277@@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
59278 void vcpu_load(struct kvm_vcpu *vcpu);
59279 void vcpu_put(struct kvm_vcpu *vcpu);
59280
59281-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59282+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59283 struct module *module);
59284 void kvm_exit(void);
59285
59286@@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
59287 struct kvm_guest_debug *dbg);
59288 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59289
59290-int kvm_arch_init(void *opaque);
59291+int kvm_arch_init(const void *opaque);
59292 void kvm_arch_exit(void);
59293
59294 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59295diff -urNp linux-3.1.1/include/linux/libata.h linux-3.1.1/include/linux/libata.h
59296--- linux-3.1.1/include/linux/libata.h 2011-11-11 15:19:27.000000000 -0500
59297+++ linux-3.1.1/include/linux/libata.h 2011-11-16 18:39:08.000000000 -0500
59298@@ -909,7 +909,7 @@ struct ata_port_operations {
59299 * fields must be pointers.
59300 */
59301 const struct ata_port_operations *inherits;
59302-};
59303+} __do_const;
59304
59305 struct ata_port_info {
59306 unsigned long flags;
59307diff -urNp linux-3.1.1/include/linux/mca.h linux-3.1.1/include/linux/mca.h
59308--- linux-3.1.1/include/linux/mca.h 2011-11-11 15:19:27.000000000 -0500
59309+++ linux-3.1.1/include/linux/mca.h 2011-11-16 18:39:08.000000000 -0500
59310@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59311 int region);
59312 void * (*mca_transform_memory)(struct mca_device *,
59313 void *memory);
59314-};
59315+} __no_const;
59316
59317 struct mca_bus {
59318 u64 default_dma_mask;
59319diff -urNp linux-3.1.1/include/linux/memory.h linux-3.1.1/include/linux/memory.h
59320--- linux-3.1.1/include/linux/memory.h 2011-11-11 15:19:27.000000000 -0500
59321+++ linux-3.1.1/include/linux/memory.h 2011-11-16 18:39:08.000000000 -0500
59322@@ -144,7 +144,7 @@ struct memory_accessor {
59323 size_t count);
59324 ssize_t (*write)(struct memory_accessor *, const char *buf,
59325 off_t offset, size_t count);
59326-};
59327+} __no_const;
59328
59329 /*
59330 * Kernel text modification mutex, used for code patching. Users of this lock
59331diff -urNp linux-3.1.1/include/linux/mfd/abx500.h linux-3.1.1/include/linux/mfd/abx500.h
59332--- linux-3.1.1/include/linux/mfd/abx500.h 2011-11-11 15:19:27.000000000 -0500
59333+++ linux-3.1.1/include/linux/mfd/abx500.h 2011-11-16 18:39:08.000000000 -0500
59334@@ -234,6 +234,7 @@ struct abx500_ops {
59335 int (*event_registers_startup_state_get) (struct device *, u8 *);
59336 int (*startup_irq_enabled) (struct device *, unsigned int);
59337 };
59338+typedef struct abx500_ops __no_const abx500_ops_no_const;
59339
59340 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
59341 void abx500_remove_ops(struct device *dev);
59342diff -urNp linux-3.1.1/include/linux/mm.h linux-3.1.1/include/linux/mm.h
59343--- linux-3.1.1/include/linux/mm.h 2011-11-11 15:19:27.000000000 -0500
59344+++ linux-3.1.1/include/linux/mm.h 2011-11-16 18:39:08.000000000 -0500
59345@@ -114,7 +114,14 @@ extern unsigned int kobjsize(const void
59346
59347 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59348 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59349+
59350+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59351+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59352+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59353+#else
59354 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59355+#endif
59356+
59357 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59358 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59359
59360@@ -1011,34 +1018,6 @@ int set_page_dirty(struct page *page);
59361 int set_page_dirty_lock(struct page *page);
59362 int clear_page_dirty_for_io(struct page *page);
59363
59364-/* Is the vma a continuation of the stack vma above it? */
59365-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
59366-{
59367- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59368-}
59369-
59370-static inline int stack_guard_page_start(struct vm_area_struct *vma,
59371- unsigned long addr)
59372-{
59373- return (vma->vm_flags & VM_GROWSDOWN) &&
59374- (vma->vm_start == addr) &&
59375- !vma_growsdown(vma->vm_prev, addr);
59376-}
59377-
59378-/* Is the vma a continuation of the stack vma below it? */
59379-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
59380-{
59381- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
59382-}
59383-
59384-static inline int stack_guard_page_end(struct vm_area_struct *vma,
59385- unsigned long addr)
59386-{
59387- return (vma->vm_flags & VM_GROWSUP) &&
59388- (vma->vm_end == addr) &&
59389- !vma_growsup(vma->vm_next, addr);
59390-}
59391-
59392 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59393 unsigned long old_addr, struct vm_area_struct *new_vma,
59394 unsigned long new_addr, unsigned long len);
59395@@ -1133,6 +1112,15 @@ static inline void sync_mm_rss(struct ta
59396 }
59397 #endif
59398
59399+#ifdef CONFIG_MMU
59400+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
59401+#else
59402+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
59403+{
59404+ return __pgprot(0);
59405+}
59406+#endif
59407+
59408 int vma_wants_writenotify(struct vm_area_struct *vma);
59409
59410 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
59411@@ -1417,6 +1405,7 @@ out:
59412 }
59413
59414 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59415+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59416
59417 extern unsigned long do_brk(unsigned long, unsigned long);
59418
59419@@ -1474,6 +1463,10 @@ extern struct vm_area_struct * find_vma(
59420 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59421 struct vm_area_struct **pprev);
59422
59423+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59424+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59425+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59426+
59427 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59428 NULL if none. Assume start_addr < end_addr. */
59429 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59430@@ -1490,15 +1483,6 @@ static inline unsigned long vma_pages(st
59431 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59432 }
59433
59434-#ifdef CONFIG_MMU
59435-pgprot_t vm_get_page_prot(unsigned long vm_flags);
59436-#else
59437-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
59438-{
59439- return __pgprot(0);
59440-}
59441-#endif
59442-
59443 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59444 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59445 unsigned long pfn, unsigned long size, pgprot_t);
59446@@ -1612,7 +1596,7 @@ extern int unpoison_memory(unsigned long
59447 extern int sysctl_memory_failure_early_kill;
59448 extern int sysctl_memory_failure_recovery;
59449 extern void shake_page(struct page *p, int access);
59450-extern atomic_long_t mce_bad_pages;
59451+extern atomic_long_unchecked_t mce_bad_pages;
59452 extern int soft_offline_page(struct page *page, int flags);
59453
59454 extern void dump_page(struct page *page);
59455@@ -1626,5 +1610,11 @@ extern void copy_user_huge_page(struct p
59456 unsigned int pages_per_huge_page);
59457 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
59458
59459+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59460+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59461+#else
59462+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59463+#endif
59464+
59465 #endif /* __KERNEL__ */
59466 #endif /* _LINUX_MM_H */
59467diff -urNp linux-3.1.1/include/linux/mm_types.h linux-3.1.1/include/linux/mm_types.h
59468--- linux-3.1.1/include/linux/mm_types.h 2011-11-11 15:19:27.000000000 -0500
59469+++ linux-3.1.1/include/linux/mm_types.h 2011-11-16 18:39:08.000000000 -0500
59470@@ -230,6 +230,8 @@ struct vm_area_struct {
59471 #ifdef CONFIG_NUMA
59472 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59473 #endif
59474+
59475+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59476 };
59477
59478 struct core_thread {
59479@@ -362,6 +364,24 @@ struct mm_struct {
59480 #ifdef CONFIG_CPUMASK_OFFSTACK
59481 struct cpumask cpumask_allocation;
59482 #endif
59483+
59484+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59485+ unsigned long pax_flags;
59486+#endif
59487+
59488+#ifdef CONFIG_PAX_DLRESOLVE
59489+ unsigned long call_dl_resolve;
59490+#endif
59491+
59492+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59493+ unsigned long call_syscall;
59494+#endif
59495+
59496+#ifdef CONFIG_PAX_ASLR
59497+ unsigned long delta_mmap; /* randomized offset */
59498+ unsigned long delta_stack; /* randomized offset */
59499+#endif
59500+
59501 };
59502
59503 static inline void mm_init_cpumask(struct mm_struct *mm)
59504diff -urNp linux-3.1.1/include/linux/mmu_notifier.h linux-3.1.1/include/linux/mmu_notifier.h
59505--- linux-3.1.1/include/linux/mmu_notifier.h 2011-11-11 15:19:27.000000000 -0500
59506+++ linux-3.1.1/include/linux/mmu_notifier.h 2011-11-16 18:39:08.000000000 -0500
59507@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
59508 */
59509 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59510 ({ \
59511- pte_t __pte; \
59512+ pte_t ___pte; \
59513 struct vm_area_struct *___vma = __vma; \
59514 unsigned long ___address = __address; \
59515- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59516+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59517 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59518- __pte; \
59519+ ___pte; \
59520 })
59521
59522 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
59523diff -urNp linux-3.1.1/include/linux/mmzone.h linux-3.1.1/include/linux/mmzone.h
59524--- linux-3.1.1/include/linux/mmzone.h 2011-11-11 15:19:27.000000000 -0500
59525+++ linux-3.1.1/include/linux/mmzone.h 2011-11-16 18:39:08.000000000 -0500
59526@@ -356,7 +356,7 @@ struct zone {
59527 unsigned long flags; /* zone flags, see below */
59528
59529 /* Zone statistics */
59530- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59531+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59532
59533 /*
59534 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
59535diff -urNp linux-3.1.1/include/linux/mod_devicetable.h linux-3.1.1/include/linux/mod_devicetable.h
59536--- linux-3.1.1/include/linux/mod_devicetable.h 2011-11-11 15:19:27.000000000 -0500
59537+++ linux-3.1.1/include/linux/mod_devicetable.h 2011-11-16 18:39:08.000000000 -0500
59538@@ -12,7 +12,7 @@
59539 typedef unsigned long kernel_ulong_t;
59540 #endif
59541
59542-#define PCI_ANY_ID (~0)
59543+#define PCI_ANY_ID ((__u16)~0)
59544
59545 struct pci_device_id {
59546 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59547@@ -131,7 +131,7 @@ struct usb_device_id {
59548 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59549 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59550
59551-#define HID_ANY_ID (~0)
59552+#define HID_ANY_ID (~0U)
59553
59554 struct hid_device_id {
59555 __u16 bus;
59556diff -urNp linux-3.1.1/include/linux/module.h linux-3.1.1/include/linux/module.h
59557--- linux-3.1.1/include/linux/module.h 2011-11-11 15:19:27.000000000 -0500
59558+++ linux-3.1.1/include/linux/module.h 2011-11-16 18:39:08.000000000 -0500
59559@@ -16,6 +16,7 @@
59560 #include <linux/kobject.h>
59561 #include <linux/moduleparam.h>
59562 #include <linux/tracepoint.h>
59563+#include <linux/fs.h>
59564
59565 #include <linux/percpu.h>
59566 #include <asm/module.h>
59567@@ -327,19 +328,16 @@ struct module
59568 int (*init)(void);
59569
59570 /* If this is non-NULL, vfree after init() returns */
59571- void *module_init;
59572+ void *module_init_rx, *module_init_rw;
59573
59574 /* Here is the actual code + data, vfree'd on unload. */
59575- void *module_core;
59576+ void *module_core_rx, *module_core_rw;
59577
59578 /* Here are the sizes of the init and core sections */
59579- unsigned int init_size, core_size;
59580+ unsigned int init_size_rw, core_size_rw;
59581
59582 /* The size of the executable code in each section. */
59583- unsigned int init_text_size, core_text_size;
59584-
59585- /* Size of RO sections of the module (text+rodata) */
59586- unsigned int init_ro_size, core_ro_size;
59587+ unsigned int init_size_rx, core_size_rx;
59588
59589 /* Arch-specific module values */
59590 struct mod_arch_specific arch;
59591@@ -395,6 +393,10 @@ struct module
59592 #ifdef CONFIG_EVENT_TRACING
59593 struct ftrace_event_call **trace_events;
59594 unsigned int num_trace_events;
59595+ struct file_operations trace_id;
59596+ struct file_operations trace_enable;
59597+ struct file_operations trace_format;
59598+ struct file_operations trace_filter;
59599 #endif
59600 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59601 unsigned int num_ftrace_callsites;
59602@@ -445,16 +447,46 @@ bool is_module_address(unsigned long add
59603 bool is_module_percpu_address(unsigned long addr);
59604 bool is_module_text_address(unsigned long addr);
59605
59606+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59607+{
59608+
59609+#ifdef CONFIG_PAX_KERNEXEC
59610+ if (ktla_ktva(addr) >= (unsigned long)start &&
59611+ ktla_ktva(addr) < (unsigned long)start + size)
59612+ return 1;
59613+#endif
59614+
59615+ return ((void *)addr >= start && (void *)addr < start + size);
59616+}
59617+
59618+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
59619+{
59620+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
59621+}
59622+
59623+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
59624+{
59625+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
59626+}
59627+
59628+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
59629+{
59630+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
59631+}
59632+
59633+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
59634+{
59635+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
59636+}
59637+
59638 static inline int within_module_core(unsigned long addr, struct module *mod)
59639 {
59640- return (unsigned long)mod->module_core <= addr &&
59641- addr < (unsigned long)mod->module_core + mod->core_size;
59642+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
59643 }
59644
59645 static inline int within_module_init(unsigned long addr, struct module *mod)
59646 {
59647- return (unsigned long)mod->module_init <= addr &&
59648- addr < (unsigned long)mod->module_init + mod->init_size;
59649+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
59650 }
59651
59652 /* Search for module by name: must hold module_mutex. */
59653diff -urNp linux-3.1.1/include/linux/moduleloader.h linux-3.1.1/include/linux/moduleloader.h
59654--- linux-3.1.1/include/linux/moduleloader.h 2011-11-11 15:19:27.000000000 -0500
59655+++ linux-3.1.1/include/linux/moduleloader.h 2011-11-16 18:39:08.000000000 -0500
59656@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(st
59657 sections. Returns NULL on failure. */
59658 void *module_alloc(unsigned long size);
59659
59660+#ifdef CONFIG_PAX_KERNEXEC
59661+void *module_alloc_exec(unsigned long size);
59662+#else
59663+#define module_alloc_exec(x) module_alloc(x)
59664+#endif
59665+
59666 /* Free memory returned from module_alloc. */
59667 void module_free(struct module *mod, void *module_region);
59668
59669+#ifdef CONFIG_PAX_KERNEXEC
59670+void module_free_exec(struct module *mod, void *module_region);
59671+#else
59672+#define module_free_exec(x, y) module_free((x), (y))
59673+#endif
59674+
59675 /* Apply the given relocation to the (simplified) ELF. Return -error
59676 or 0. */
59677 int apply_relocate(Elf_Shdr *sechdrs,
59678diff -urNp linux-3.1.1/include/linux/moduleparam.h linux-3.1.1/include/linux/moduleparam.h
59679--- linux-3.1.1/include/linux/moduleparam.h 2011-11-11 15:19:27.000000000 -0500
59680+++ linux-3.1.1/include/linux/moduleparam.h 2011-11-16 18:39:08.000000000 -0500
59681@@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
59682 * @len is usually just sizeof(string).
59683 */
59684 #define module_param_string(name, string, len, perm) \
59685- static const struct kparam_string __param_string_##name \
59686+ static const struct kparam_string __param_string_##name __used \
59687 = { len, string }; \
59688 __module_param_call(MODULE_PARAM_PREFIX, name, \
59689 &param_ops_string, \
59690@@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
59691 * module_param_named() for why this might be necessary.
59692 */
59693 #define module_param_array_named(name, array, type, nump, perm) \
59694- static const struct kparam_array __param_arr_##name \
59695+ static const struct kparam_array __param_arr_##name __used \
59696 = { .max = ARRAY_SIZE(array), .num = nump, \
59697 .ops = &param_ops_##type, \
59698 .elemsize = sizeof(array[0]), .elem = array }; \
59699diff -urNp linux-3.1.1/include/linux/namei.h linux-3.1.1/include/linux/namei.h
59700--- linux-3.1.1/include/linux/namei.h 2011-11-11 15:19:27.000000000 -0500
59701+++ linux-3.1.1/include/linux/namei.h 2011-11-16 18:39:08.000000000 -0500
59702@@ -24,7 +24,7 @@ struct nameidata {
59703 unsigned seq;
59704 int last_type;
59705 unsigned depth;
59706- char *saved_names[MAX_NESTED_LINKS + 1];
59707+ const char *saved_names[MAX_NESTED_LINKS + 1];
59708
59709 /* Intent data */
59710 union {
59711@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
59712 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
59713 extern void unlock_rename(struct dentry *, struct dentry *);
59714
59715-static inline void nd_set_link(struct nameidata *nd, char *path)
59716+static inline void nd_set_link(struct nameidata *nd, const char *path)
59717 {
59718 nd->saved_names[nd->depth] = path;
59719 }
59720
59721-static inline char *nd_get_link(struct nameidata *nd)
59722+static inline const char *nd_get_link(const struct nameidata *nd)
59723 {
59724 return nd->saved_names[nd->depth];
59725 }
59726diff -urNp linux-3.1.1/include/linux/netdevice.h linux-3.1.1/include/linux/netdevice.h
59727--- linux-3.1.1/include/linux/netdevice.h 2011-11-11 15:19:27.000000000 -0500
59728+++ linux-3.1.1/include/linux/netdevice.h 2011-11-16 18:39:08.000000000 -0500
59729@@ -944,6 +944,7 @@ struct net_device_ops {
59730 int (*ndo_set_features)(struct net_device *dev,
59731 u32 features);
59732 };
59733+typedef struct net_device_ops __no_const net_device_ops_no_const;
59734
59735 /*
59736 * The DEVICE structure.
59737diff -urNp linux-3.1.1/include/linux/netfilter/xt_gradm.h linux-3.1.1/include/linux/netfilter/xt_gradm.h
59738--- linux-3.1.1/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
59739+++ linux-3.1.1/include/linux/netfilter/xt_gradm.h 2011-11-16 18:40:31.000000000 -0500
59740@@ -0,0 +1,9 @@
59741+#ifndef _LINUX_NETFILTER_XT_GRADM_H
59742+#define _LINUX_NETFILTER_XT_GRADM_H 1
59743+
59744+struct xt_gradm_mtinfo {
59745+ __u16 flags;
59746+ __u16 invflags;
59747+};
59748+
59749+#endif
59750diff -urNp linux-3.1.1/include/linux/of_pdt.h linux-3.1.1/include/linux/of_pdt.h
59751--- linux-3.1.1/include/linux/of_pdt.h 2011-11-11 15:19:27.000000000 -0500
59752+++ linux-3.1.1/include/linux/of_pdt.h 2011-11-16 18:39:08.000000000 -0500
59753@@ -32,7 +32,7 @@ struct of_pdt_ops {
59754
59755 /* return 0 on success; fill in 'len' with number of bytes in path */
59756 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
59757-};
59758+} __no_const;
59759
59760 extern void *prom_early_alloc(unsigned long size);
59761
59762diff -urNp linux-3.1.1/include/linux/oprofile.h linux-3.1.1/include/linux/oprofile.h
59763--- linux-3.1.1/include/linux/oprofile.h 2011-11-11 15:19:27.000000000 -0500
59764+++ linux-3.1.1/include/linux/oprofile.h 2011-11-16 18:39:08.000000000 -0500
59765@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
59766 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
59767 char const * name, ulong * val);
59768
59769-/** Create a file for read-only access to an atomic_t. */
59770+/** Create a file for read-only access to an atomic_unchecked_t. */
59771 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
59772- char const * name, atomic_t * val);
59773+ char const * name, atomic_unchecked_t * val);
59774
59775 /** create a directory */
59776 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
59777diff -urNp linux-3.1.1/include/linux/padata.h linux-3.1.1/include/linux/padata.h
59778--- linux-3.1.1/include/linux/padata.h 2011-11-11 15:19:27.000000000 -0500
59779+++ linux-3.1.1/include/linux/padata.h 2011-11-16 18:39:08.000000000 -0500
59780@@ -129,7 +129,7 @@ struct parallel_data {
59781 struct padata_instance *pinst;
59782 struct padata_parallel_queue __percpu *pqueue;
59783 struct padata_serial_queue __percpu *squeue;
59784- atomic_t seq_nr;
59785+ atomic_unchecked_t seq_nr;
59786 atomic_t reorder_objects;
59787 atomic_t refcnt;
59788 unsigned int max_seq_nr;
59789diff -urNp linux-3.1.1/include/linux/perf_event.h linux-3.1.1/include/linux/perf_event.h
59790--- linux-3.1.1/include/linux/perf_event.h 2011-11-11 15:19:27.000000000 -0500
59791+++ linux-3.1.1/include/linux/perf_event.h 2011-11-16 18:39:08.000000000 -0500
59792@@ -745,8 +745,8 @@ struct perf_event {
59793
59794 enum perf_event_active_state state;
59795 unsigned int attach_state;
59796- local64_t count;
59797- atomic64_t child_count;
59798+ local64_t count; /* PaX: fix it one day */
59799+ atomic64_unchecked_t child_count;
59800
59801 /*
59802 * These are the total time in nanoseconds that the event
59803@@ -797,8 +797,8 @@ struct perf_event {
59804 * These accumulate total time (in nanoseconds) that children
59805 * events have been enabled and running, respectively.
59806 */
59807- atomic64_t child_total_time_enabled;
59808- atomic64_t child_total_time_running;
59809+ atomic64_unchecked_t child_total_time_enabled;
59810+ atomic64_unchecked_t child_total_time_running;
59811
59812 /*
59813 * Protect attach/detach and child_list:
59814diff -urNp linux-3.1.1/include/linux/pipe_fs_i.h linux-3.1.1/include/linux/pipe_fs_i.h
59815--- linux-3.1.1/include/linux/pipe_fs_i.h 2011-11-11 15:19:27.000000000 -0500
59816+++ linux-3.1.1/include/linux/pipe_fs_i.h 2011-11-16 18:39:08.000000000 -0500
59817@@ -46,9 +46,9 @@ struct pipe_buffer {
59818 struct pipe_inode_info {
59819 wait_queue_head_t wait;
59820 unsigned int nrbufs, curbuf, buffers;
59821- unsigned int readers;
59822- unsigned int writers;
59823- unsigned int waiting_writers;
59824+ atomic_t readers;
59825+ atomic_t writers;
59826+ atomic_t waiting_writers;
59827 unsigned int r_counter;
59828 unsigned int w_counter;
59829 struct page *tmp_page;
59830diff -urNp linux-3.1.1/include/linux/pm_runtime.h linux-3.1.1/include/linux/pm_runtime.h
59831--- linux-3.1.1/include/linux/pm_runtime.h 2011-11-11 15:19:27.000000000 -0500
59832+++ linux-3.1.1/include/linux/pm_runtime.h 2011-11-16 18:39:08.000000000 -0500
59833@@ -99,7 +99,7 @@ static inline bool pm_runtime_callbacks_
59834
59835 static inline void pm_runtime_mark_last_busy(struct device *dev)
59836 {
59837- ACCESS_ONCE(dev->power.last_busy) = jiffies;
59838+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
59839 }
59840
59841 #else /* !CONFIG_PM_RUNTIME */
59842diff -urNp linux-3.1.1/include/linux/poison.h linux-3.1.1/include/linux/poison.h
59843--- linux-3.1.1/include/linux/poison.h 2011-11-11 15:19:27.000000000 -0500
59844+++ linux-3.1.1/include/linux/poison.h 2011-11-16 18:39:08.000000000 -0500
59845@@ -19,8 +19,8 @@
59846 * under normal circumstances, used to verify that nobody uses
59847 * non-initialized list entries.
59848 */
59849-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
59850-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
59851+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
59852+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
59853
59854 /********** include/linux/timer.h **********/
59855 /*
59856diff -urNp linux-3.1.1/include/linux/preempt.h linux-3.1.1/include/linux/preempt.h
59857--- linux-3.1.1/include/linux/preempt.h 2011-11-11 15:19:27.000000000 -0500
59858+++ linux-3.1.1/include/linux/preempt.h 2011-11-16 18:39:08.000000000 -0500
59859@@ -123,7 +123,7 @@ struct preempt_ops {
59860 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
59861 void (*sched_out)(struct preempt_notifier *notifier,
59862 struct task_struct *next);
59863-};
59864+} __no_const;
59865
59866 /**
59867 * preempt_notifier - key for installing preemption notifiers
59868diff -urNp linux-3.1.1/include/linux/proc_fs.h linux-3.1.1/include/linux/proc_fs.h
59869--- linux-3.1.1/include/linux/proc_fs.h 2011-11-11 15:19:27.000000000 -0500
59870+++ linux-3.1.1/include/linux/proc_fs.h 2011-11-16 18:40:31.000000000 -0500
59871@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
59872 return proc_create_data(name, mode, parent, proc_fops, NULL);
59873 }
59874
59875+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
59876+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
59877+{
59878+#ifdef CONFIG_GRKERNSEC_PROC_USER
59879+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
59880+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59881+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
59882+#else
59883+ return proc_create_data(name, mode, parent, proc_fops, NULL);
59884+#endif
59885+}
59886+
59887+
59888 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
59889 mode_t mode, struct proc_dir_entry *base,
59890 read_proc_t *read_proc, void * data)
59891@@ -258,7 +271,7 @@ union proc_op {
59892 int (*proc_show)(struct seq_file *m,
59893 struct pid_namespace *ns, struct pid *pid,
59894 struct task_struct *task);
59895-};
59896+} __no_const;
59897
59898 struct ctl_table_header;
59899 struct ctl_table;
59900diff -urNp linux-3.1.1/include/linux/ptrace.h linux-3.1.1/include/linux/ptrace.h
59901--- linux-3.1.1/include/linux/ptrace.h 2011-11-11 15:19:27.000000000 -0500
59902+++ linux-3.1.1/include/linux/ptrace.h 2011-11-16 18:40:31.000000000 -0500
59903@@ -129,10 +129,10 @@ extern void __ptrace_unlink(struct task_
59904 extern void exit_ptrace(struct task_struct *tracer);
59905 #define PTRACE_MODE_READ 1
59906 #define PTRACE_MODE_ATTACH 2
59907-/* Returns 0 on success, -errno on denial. */
59908-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
59909 /* Returns true on success, false on denial. */
59910 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
59911+/* Returns true on success, false on denial. */
59912+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
59913
59914 static inline int ptrace_reparented(struct task_struct *child)
59915 {
59916diff -urNp linux-3.1.1/include/linux/random.h linux-3.1.1/include/linux/random.h
59917--- linux-3.1.1/include/linux/random.h 2011-11-11 15:19:27.000000000 -0500
59918+++ linux-3.1.1/include/linux/random.h 2011-11-16 18:39:08.000000000 -0500
59919@@ -69,12 +69,17 @@ void srandom32(u32 seed);
59920
59921 u32 prandom32(struct rnd_state *);
59922
59923+static inline unsigned long pax_get_random_long(void)
59924+{
59925+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
59926+}
59927+
59928 /*
59929 * Handle minimum values for seeds
59930 */
59931 static inline u32 __seed(u32 x, u32 m)
59932 {
59933- return (x < m) ? x + m : x;
59934+ return (x <= m) ? x + m + 1 : x;
59935 }
59936
59937 /**
59938diff -urNp linux-3.1.1/include/linux/reboot.h linux-3.1.1/include/linux/reboot.h
59939--- linux-3.1.1/include/linux/reboot.h 2011-11-11 15:19:27.000000000 -0500
59940+++ linux-3.1.1/include/linux/reboot.h 2011-11-16 18:39:08.000000000 -0500
59941@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(st
59942 * Architecture-specific implementations of sys_reboot commands.
59943 */
59944
59945-extern void machine_restart(char *cmd);
59946-extern void machine_halt(void);
59947-extern void machine_power_off(void);
59948+extern void machine_restart(char *cmd) __noreturn;
59949+extern void machine_halt(void) __noreturn;
59950+extern void machine_power_off(void) __noreturn;
59951
59952 extern void machine_shutdown(void);
59953 struct pt_regs;
59954@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struc
59955 */
59956
59957 extern void kernel_restart_prepare(char *cmd);
59958-extern void kernel_restart(char *cmd);
59959-extern void kernel_halt(void);
59960-extern void kernel_power_off(void);
59961+extern void kernel_restart(char *cmd) __noreturn;
59962+extern void kernel_halt(void) __noreturn;
59963+extern void kernel_power_off(void) __noreturn;
59964
59965 extern int C_A_D; /* for sysctl */
59966 void ctrl_alt_del(void);
59967@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
59968 * Emergency restart, callable from an interrupt handler.
59969 */
59970
59971-extern void emergency_restart(void);
59972+extern void emergency_restart(void) __noreturn;
59973 #include <asm/emergency-restart.h>
59974
59975 #endif
59976diff -urNp linux-3.1.1/include/linux/reiserfs_fs.h linux-3.1.1/include/linux/reiserfs_fs.h
59977--- linux-3.1.1/include/linux/reiserfs_fs.h 2011-11-11 15:19:27.000000000 -0500
59978+++ linux-3.1.1/include/linux/reiserfs_fs.h 2011-11-16 18:39:08.000000000 -0500
59979@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
59980 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
59981
59982 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
59983-#define get_generation(s) atomic_read (&fs_generation(s))
59984+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
59985 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
59986 #define __fs_changed(gen,s) (gen != get_generation (s))
59987 #define fs_changed(gen,s) \
59988diff -urNp linux-3.1.1/include/linux/reiserfs_fs_sb.h linux-3.1.1/include/linux/reiserfs_fs_sb.h
59989--- linux-3.1.1/include/linux/reiserfs_fs_sb.h 2011-11-11 15:19:27.000000000 -0500
59990+++ linux-3.1.1/include/linux/reiserfs_fs_sb.h 2011-11-16 18:39:08.000000000 -0500
59991@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
59992 /* Comment? -Hans */
59993 wait_queue_head_t s_wait;
59994 /* To be obsoleted soon by per buffer seals.. -Hans */
59995- atomic_t s_generation_counter; // increased by one every time the
59996+ atomic_unchecked_t s_generation_counter; // increased by one every time the
59997 // tree gets re-balanced
59998 unsigned long s_properties; /* File system properties. Currently holds
59999 on-disk FS format */
60000diff -urNp linux-3.1.1/include/linux/relay.h linux-3.1.1/include/linux/relay.h
60001--- linux-3.1.1/include/linux/relay.h 2011-11-11 15:19:27.000000000 -0500
60002+++ linux-3.1.1/include/linux/relay.h 2011-11-16 18:39:08.000000000 -0500
60003@@ -159,7 +159,7 @@ struct rchan_callbacks
60004 * The callback should return 0 if successful, negative if not.
60005 */
60006 int (*remove_buf_file)(struct dentry *dentry);
60007-};
60008+} __no_const;
60009
60010 /*
60011 * CONFIG_RELAY kernel API, kernel/relay.c
60012diff -urNp linux-3.1.1/include/linux/rfkill.h linux-3.1.1/include/linux/rfkill.h
60013--- linux-3.1.1/include/linux/rfkill.h 2011-11-11 15:19:27.000000000 -0500
60014+++ linux-3.1.1/include/linux/rfkill.h 2011-11-16 18:39:08.000000000 -0500
60015@@ -147,6 +147,7 @@ struct rfkill_ops {
60016 void (*query)(struct rfkill *rfkill, void *data);
60017 int (*set_block)(void *data, bool blocked);
60018 };
60019+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
60020
60021 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
60022 /**
60023diff -urNp linux-3.1.1/include/linux/rmap.h linux-3.1.1/include/linux/rmap.h
60024--- linux-3.1.1/include/linux/rmap.h 2011-11-11 15:19:27.000000000 -0500
60025+++ linux-3.1.1/include/linux/rmap.h 2011-11-16 18:39:08.000000000 -0500
60026@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
60027 void anon_vma_init(void); /* create anon_vma_cachep */
60028 int anon_vma_prepare(struct vm_area_struct *);
60029 void unlink_anon_vmas(struct vm_area_struct *);
60030-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
60031-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
60032+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
60033+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
60034 void __anon_vma_link(struct vm_area_struct *);
60035
60036 static inline void anon_vma_merge(struct vm_area_struct *vma,
60037diff -urNp linux-3.1.1/include/linux/sched.h linux-3.1.1/include/linux/sched.h
60038--- linux-3.1.1/include/linux/sched.h 2011-11-11 15:19:27.000000000 -0500
60039+++ linux-3.1.1/include/linux/sched.h 2011-11-16 18:40:31.000000000 -0500
60040@@ -100,6 +100,7 @@ struct bio_list;
60041 struct fs_struct;
60042 struct perf_event_context;
60043 struct blk_plug;
60044+struct linux_binprm;
60045
60046 /*
60047 * List of flags we want to share for kernel threads,
60048@@ -380,10 +381,13 @@ struct user_namespace;
60049 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60050
60051 extern int sysctl_max_map_count;
60052+extern unsigned long sysctl_heap_stack_gap;
60053
60054 #include <linux/aio.h>
60055
60056 #ifdef CONFIG_MMU
60057+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60058+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60059 extern void arch_pick_mmap_layout(struct mm_struct *mm);
60060 extern unsigned long
60061 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60062@@ -629,6 +633,17 @@ struct signal_struct {
60063 #ifdef CONFIG_TASKSTATS
60064 struct taskstats *stats;
60065 #endif
60066+
60067+#ifdef CONFIG_GRKERNSEC
60068+ u32 curr_ip;
60069+ u32 saved_ip;
60070+ u32 gr_saddr;
60071+ u32 gr_daddr;
60072+ u16 gr_sport;
60073+ u16 gr_dport;
60074+ u8 used_accept:1;
60075+#endif
60076+
60077 #ifdef CONFIG_AUDIT
60078 unsigned audit_tty;
60079 struct tty_audit_buf *tty_audit_buf;
60080@@ -710,6 +725,11 @@ struct user_struct {
60081 struct key *session_keyring; /* UID's default session keyring */
60082 #endif
60083
60084+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60085+ unsigned int banned;
60086+ unsigned long ban_expires;
60087+#endif
60088+
60089 /* Hash table maintenance information */
60090 struct hlist_node uidhash_node;
60091 uid_t uid;
60092@@ -1340,8 +1360,8 @@ struct task_struct {
60093 struct list_head thread_group;
60094
60095 struct completion *vfork_done; /* for vfork() */
60096- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60097- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60098+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60099+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60100
60101 cputime_t utime, stime, utimescaled, stimescaled;
60102 cputime_t gtime;
60103@@ -1357,13 +1377,6 @@ struct task_struct {
60104 struct task_cputime cputime_expires;
60105 struct list_head cpu_timers[3];
60106
60107-/* process credentials */
60108- const struct cred __rcu *real_cred; /* objective and real subjective task
60109- * credentials (COW) */
60110- const struct cred __rcu *cred; /* effective (overridable) subjective task
60111- * credentials (COW) */
60112- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60113-
60114 char comm[TASK_COMM_LEN]; /* executable name excluding path
60115 - access with [gs]et_task_comm (which lock
60116 it with task_lock())
60117@@ -1380,8 +1393,16 @@ struct task_struct {
60118 #endif
60119 /* CPU-specific state of this task */
60120 struct thread_struct thread;
60121+/* thread_info moved to task_struct */
60122+#ifdef CONFIG_X86
60123+ struct thread_info tinfo;
60124+#endif
60125 /* filesystem information */
60126 struct fs_struct *fs;
60127+
60128+ const struct cred __rcu *cred; /* effective (overridable) subjective task
60129+ * credentials (COW) */
60130+
60131 /* open file information */
60132 struct files_struct *files;
60133 /* namespaces */
60134@@ -1428,6 +1449,11 @@ struct task_struct {
60135 struct rt_mutex_waiter *pi_blocked_on;
60136 #endif
60137
60138+/* process credentials */
60139+ const struct cred __rcu *real_cred; /* objective and real subjective task
60140+ * credentials (COW) */
60141+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60142+
60143 #ifdef CONFIG_DEBUG_MUTEXES
60144 /* mutex deadlock detection */
60145 struct mutex_waiter *blocked_on;
60146@@ -1537,6 +1563,21 @@ struct task_struct {
60147 unsigned long default_timer_slack_ns;
60148
60149 struct list_head *scm_work_list;
60150+
60151+#ifdef CONFIG_GRKERNSEC
60152+ /* grsecurity */
60153+ struct dentry *gr_chroot_dentry;
60154+ struct acl_subject_label *acl;
60155+ struct acl_role_label *role;
60156+ struct file *exec_file;
60157+ u16 acl_role_id;
60158+ /* is this the task that authenticated to the special role */
60159+ u8 acl_sp_role;
60160+ u8 is_writable;
60161+ u8 brute;
60162+ u8 gr_is_chrooted;
60163+#endif
60164+
60165 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60166 /* Index of current stored address in ret_stack */
60167 int curr_ret_stack;
60168@@ -1571,6 +1612,57 @@ struct task_struct {
60169 #endif
60170 };
60171
60172+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60173+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60174+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60175+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60176+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60177+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60178+
60179+#ifdef CONFIG_PAX_SOFTMODE
60180+extern int pax_softmode;
60181+#endif
60182+
60183+extern int pax_check_flags(unsigned long *);
60184+
60185+/* if tsk != current then task_lock must be held on it */
60186+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60187+static inline unsigned long pax_get_flags(struct task_struct *tsk)
60188+{
60189+ if (likely(tsk->mm))
60190+ return tsk->mm->pax_flags;
60191+ else
60192+ return 0UL;
60193+}
60194+
60195+/* if tsk != current then task_lock must be held on it */
60196+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60197+{
60198+ if (likely(tsk->mm)) {
60199+ tsk->mm->pax_flags = flags;
60200+ return 0;
60201+ }
60202+ return -EINVAL;
60203+}
60204+#endif
60205+
60206+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60207+extern void pax_set_initial_flags(struct linux_binprm *bprm);
60208+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60209+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60210+#endif
60211+
60212+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60213+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
60214+extern void pax_report_refcount_overflow(struct pt_regs *regs);
60215+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60216+
60217+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
60218+extern void pax_track_stack(void);
60219+#else
60220+static inline void pax_track_stack(void) {}
60221+#endif
60222+
60223 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60224 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
60225
60226@@ -2074,7 +2166,9 @@ void yield(void);
60227 extern struct exec_domain default_exec_domain;
60228
60229 union thread_union {
60230+#ifndef CONFIG_X86
60231 struct thread_info thread_info;
60232+#endif
60233 unsigned long stack[THREAD_SIZE/sizeof(long)];
60234 };
60235
60236@@ -2107,6 +2201,7 @@ extern struct pid_namespace init_pid_ns;
60237 */
60238
60239 extern struct task_struct *find_task_by_vpid(pid_t nr);
60240+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60241 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60242 struct pid_namespace *ns);
60243
60244@@ -2243,7 +2338,7 @@ extern void __cleanup_sighand(struct sig
60245 extern void exit_itimers(struct signal_struct *);
60246 extern void flush_itimer_signals(void);
60247
60248-extern NORET_TYPE void do_group_exit(int);
60249+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60250
60251 extern void daemonize(const char *, ...);
60252 extern int allow_signal(int);
60253@@ -2408,13 +2503,17 @@ static inline unsigned long *end_of_stac
60254
60255 #endif
60256
60257-static inline int object_is_on_stack(void *obj)
60258+static inline int object_starts_on_stack(void *obj)
60259 {
60260- void *stack = task_stack_page(current);
60261+ const void *stack = task_stack_page(current);
60262
60263 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60264 }
60265
60266+#ifdef CONFIG_PAX_USERCOPY
60267+extern int object_is_on_stack(const void *obj, unsigned long len);
60268+#endif
60269+
60270 extern void thread_info_cache_init(void);
60271
60272 #ifdef CONFIG_DEBUG_STACK_USAGE
60273diff -urNp linux-3.1.1/include/linux/screen_info.h linux-3.1.1/include/linux/screen_info.h
60274--- linux-3.1.1/include/linux/screen_info.h 2011-11-11 15:19:27.000000000 -0500
60275+++ linux-3.1.1/include/linux/screen_info.h 2011-11-16 18:39:08.000000000 -0500
60276@@ -43,7 +43,8 @@ struct screen_info {
60277 __u16 pages; /* 0x32 */
60278 __u16 vesa_attributes; /* 0x34 */
60279 __u32 capabilities; /* 0x36 */
60280- __u8 _reserved[6]; /* 0x3a */
60281+ __u16 vesapm_size; /* 0x3a */
60282+ __u8 _reserved[4]; /* 0x3c */
60283 } __attribute__((packed));
60284
60285 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60286diff -urNp linux-3.1.1/include/linux/security.h linux-3.1.1/include/linux/security.h
60287--- linux-3.1.1/include/linux/security.h 2011-11-11 15:19:27.000000000 -0500
60288+++ linux-3.1.1/include/linux/security.h 2011-11-16 18:40:31.000000000 -0500
60289@@ -36,6 +36,7 @@
60290 #include <linux/key.h>
60291 #include <linux/xfrm.h>
60292 #include <linux/slab.h>
60293+#include <linux/grsecurity.h>
60294 #include <net/flow.h>
60295
60296 /* Maximum number of letters for an LSM name string */
60297diff -urNp linux-3.1.1/include/linux/seq_file.h linux-3.1.1/include/linux/seq_file.h
60298--- linux-3.1.1/include/linux/seq_file.h 2011-11-11 15:19:27.000000000 -0500
60299+++ linux-3.1.1/include/linux/seq_file.h 2011-11-16 18:39:08.000000000 -0500
60300@@ -33,6 +33,7 @@ struct seq_operations {
60301 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
60302 int (*show) (struct seq_file *m, void *v);
60303 };
60304+typedef struct seq_operations __no_const seq_operations_no_const;
60305
60306 #define SEQ_SKIP 1
60307
60308diff -urNp linux-3.1.1/include/linux/shm.h linux-3.1.1/include/linux/shm.h
60309--- linux-3.1.1/include/linux/shm.h 2011-11-11 15:19:27.000000000 -0500
60310+++ linux-3.1.1/include/linux/shm.h 2011-11-16 18:59:58.000000000 -0500
60311@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the ke
60312
60313 /* The task created the shm object. NULL if the task is dead. */
60314 struct task_struct *shm_creator;
60315+#ifdef CONFIG_GRKERNSEC
60316+ time_t shm_createtime;
60317+ pid_t shm_lapid;
60318+#endif
60319 };
60320
60321 /* shm_mode upper byte flags */
60322diff -urNp linux-3.1.1/include/linux/skbuff.h linux-3.1.1/include/linux/skbuff.h
60323--- linux-3.1.1/include/linux/skbuff.h 2011-11-11 15:19:27.000000000 -0500
60324+++ linux-3.1.1/include/linux/skbuff.h 2011-11-16 18:39:08.000000000 -0500
60325@@ -610,7 +610,7 @@ static inline struct skb_shared_hwtstamp
60326 */
60327 static inline int skb_queue_empty(const struct sk_buff_head *list)
60328 {
60329- return list->next == (struct sk_buff *)list;
60330+ return list->next == (const struct sk_buff *)list;
60331 }
60332
60333 /**
60334@@ -623,7 +623,7 @@ static inline int skb_queue_empty(const
60335 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60336 const struct sk_buff *skb)
60337 {
60338- return skb->next == (struct sk_buff *)list;
60339+ return skb->next == (const struct sk_buff *)list;
60340 }
60341
60342 /**
60343@@ -636,7 +636,7 @@ static inline bool skb_queue_is_last(con
60344 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60345 const struct sk_buff *skb)
60346 {
60347- return skb->prev == (struct sk_buff *)list;
60348+ return skb->prev == (const struct sk_buff *)list;
60349 }
60350
60351 /**
60352@@ -1458,7 +1458,7 @@ static inline int pskb_network_may_pull(
60353 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
60354 */
60355 #ifndef NET_SKB_PAD
60356-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
60357+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
60358 #endif
60359
60360 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60361diff -urNp linux-3.1.1/include/linux/slab_def.h linux-3.1.1/include/linux/slab_def.h
60362--- linux-3.1.1/include/linux/slab_def.h 2011-11-11 15:19:27.000000000 -0500
60363+++ linux-3.1.1/include/linux/slab_def.h 2011-11-16 18:39:08.000000000 -0500
60364@@ -68,10 +68,10 @@ struct kmem_cache {
60365 unsigned long node_allocs;
60366 unsigned long node_frees;
60367 unsigned long node_overflow;
60368- atomic_t allochit;
60369- atomic_t allocmiss;
60370- atomic_t freehit;
60371- atomic_t freemiss;
60372+ atomic_unchecked_t allochit;
60373+ atomic_unchecked_t allocmiss;
60374+ atomic_unchecked_t freehit;
60375+ atomic_unchecked_t freemiss;
60376
60377 /*
60378 * If debugging is enabled, then the allocator can add additional
60379diff -urNp linux-3.1.1/include/linux/slab.h linux-3.1.1/include/linux/slab.h
60380--- linux-3.1.1/include/linux/slab.h 2011-11-11 15:19:27.000000000 -0500
60381+++ linux-3.1.1/include/linux/slab.h 2011-11-16 18:39:08.000000000 -0500
60382@@ -11,12 +11,20 @@
60383
60384 #include <linux/gfp.h>
60385 #include <linux/types.h>
60386+#include <linux/err.h>
60387
60388 /*
60389 * Flags to pass to kmem_cache_create().
60390 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60391 */
60392 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60393+
60394+#ifdef CONFIG_PAX_USERCOPY
60395+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60396+#else
60397+#define SLAB_USERCOPY 0x00000000UL
60398+#endif
60399+
60400 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60401 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60402 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60403@@ -87,10 +95,13 @@
60404 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60405 * Both make kfree a no-op.
60406 */
60407-#define ZERO_SIZE_PTR ((void *)16)
60408+#define ZERO_SIZE_PTR \
60409+({ \
60410+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60411+ (void *)(-MAX_ERRNO-1L); \
60412+})
60413
60414-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60415- (unsigned long)ZERO_SIZE_PTR)
60416+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60417
60418 /*
60419 * struct kmem_cache related prototypes
60420@@ -161,6 +172,7 @@ void * __must_check krealloc(const void
60421 void kfree(const void *);
60422 void kzfree(const void *);
60423 size_t ksize(const void *);
60424+void check_object_size(const void *ptr, unsigned long n, bool to);
60425
60426 /*
60427 * Allocator specific definitions. These are mainly used to establish optimized
60428@@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t
60429
60430 void __init kmem_cache_init_late(void);
60431
60432+#define kmalloc(x, y) \
60433+({ \
60434+ void *___retval; \
60435+ intoverflow_t ___x = (intoverflow_t)x; \
60436+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
60437+ ___retval = NULL; \
60438+ else \
60439+ ___retval = kmalloc((size_t)___x, (y)); \
60440+ ___retval; \
60441+})
60442+
60443+#define kmalloc_node(x, y, z) \
60444+({ \
60445+ void *___retval; \
60446+ intoverflow_t ___x = (intoverflow_t)x; \
60447+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60448+ ___retval = NULL; \
60449+ else \
60450+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
60451+ ___retval; \
60452+})
60453+
60454+#define kzalloc(x, y) \
60455+({ \
60456+ void *___retval; \
60457+ intoverflow_t ___x = (intoverflow_t)x; \
60458+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
60459+ ___retval = NULL; \
60460+ else \
60461+ ___retval = kzalloc((size_t)___x, (y)); \
60462+ ___retval; \
60463+})
60464+
60465+#define __krealloc(x, y, z) \
60466+({ \
60467+ void *___retval; \
60468+ intoverflow_t ___y = (intoverflow_t)y; \
60469+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
60470+ ___retval = NULL; \
60471+ else \
60472+ ___retval = __krealloc((x), (size_t)___y, (z)); \
60473+ ___retval; \
60474+})
60475+
60476+#define krealloc(x, y, z) \
60477+({ \
60478+ void *___retval; \
60479+ intoverflow_t ___y = (intoverflow_t)y; \
60480+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
60481+ ___retval = NULL; \
60482+ else \
60483+ ___retval = krealloc((x), (size_t)___y, (z)); \
60484+ ___retval; \
60485+})
60486+
60487 #endif /* _LINUX_SLAB_H */
60488diff -urNp linux-3.1.1/include/linux/slub_def.h linux-3.1.1/include/linux/slub_def.h
60489--- linux-3.1.1/include/linux/slub_def.h 2011-11-11 15:19:27.000000000 -0500
60490+++ linux-3.1.1/include/linux/slub_def.h 2011-11-16 18:39:08.000000000 -0500
60491@@ -85,7 +85,7 @@ struct kmem_cache {
60492 struct kmem_cache_order_objects max;
60493 struct kmem_cache_order_objects min;
60494 gfp_t allocflags; /* gfp flags to use on each alloc */
60495- int refcount; /* Refcount for slab cache destroy */
60496+ atomic_t refcount; /* Refcount for slab cache destroy */
60497 void (*ctor)(void *);
60498 int inuse; /* Offset to metadata */
60499 int align; /* Alignment */
60500@@ -211,7 +211,7 @@ static __always_inline struct kmem_cache
60501 }
60502
60503 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60504-void *__kmalloc(size_t size, gfp_t flags);
60505+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60506
60507 static __always_inline void *
60508 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
60509diff -urNp linux-3.1.1/include/linux/sonet.h linux-3.1.1/include/linux/sonet.h
60510--- linux-3.1.1/include/linux/sonet.h 2011-11-11 15:19:27.000000000 -0500
60511+++ linux-3.1.1/include/linux/sonet.h 2011-11-16 18:39:08.000000000 -0500
60512@@ -61,7 +61,7 @@ struct sonet_stats {
60513 #include <linux/atomic.h>
60514
60515 struct k_sonet_stats {
60516-#define __HANDLE_ITEM(i) atomic_t i
60517+#define __HANDLE_ITEM(i) atomic_unchecked_t i
60518 __SONET_ITEMS
60519 #undef __HANDLE_ITEM
60520 };
60521diff -urNp linux-3.1.1/include/linux/sunrpc/clnt.h linux-3.1.1/include/linux/sunrpc/clnt.h
60522--- linux-3.1.1/include/linux/sunrpc/clnt.h 2011-11-11 15:19:27.000000000 -0500
60523+++ linux-3.1.1/include/linux/sunrpc/clnt.h 2011-11-16 18:39:08.000000000 -0500
60524@@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
60525 {
60526 switch (sap->sa_family) {
60527 case AF_INET:
60528- return ntohs(((struct sockaddr_in *)sap)->sin_port);
60529+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60530 case AF_INET6:
60531- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60532+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60533 }
60534 return 0;
60535 }
60536@@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
60537 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60538 const struct sockaddr *src)
60539 {
60540- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60541+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60542 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60543
60544 dsin->sin_family = ssin->sin_family;
60545@@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
60546 if (sa->sa_family != AF_INET6)
60547 return 0;
60548
60549- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60550+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60551 }
60552
60553 #endif /* __KERNEL__ */
60554diff -urNp linux-3.1.1/include/linux/sunrpc/sched.h linux-3.1.1/include/linux/sunrpc/sched.h
60555--- linux-3.1.1/include/linux/sunrpc/sched.h 2011-11-11 15:19:27.000000000 -0500
60556+++ linux-3.1.1/include/linux/sunrpc/sched.h 2011-11-16 18:39:08.000000000 -0500
60557@@ -105,6 +105,7 @@ struct rpc_call_ops {
60558 void (*rpc_call_done)(struct rpc_task *, void *);
60559 void (*rpc_release)(void *);
60560 };
60561+typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
60562
60563 struct rpc_task_setup {
60564 struct rpc_task *task;
60565diff -urNp linux-3.1.1/include/linux/sunrpc/svc_rdma.h linux-3.1.1/include/linux/sunrpc/svc_rdma.h
60566--- linux-3.1.1/include/linux/sunrpc/svc_rdma.h 2011-11-11 15:19:27.000000000 -0500
60567+++ linux-3.1.1/include/linux/sunrpc/svc_rdma.h 2011-11-16 18:39:08.000000000 -0500
60568@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
60569 extern unsigned int svcrdma_max_requests;
60570 extern unsigned int svcrdma_max_req_size;
60571
60572-extern atomic_t rdma_stat_recv;
60573-extern atomic_t rdma_stat_read;
60574-extern atomic_t rdma_stat_write;
60575-extern atomic_t rdma_stat_sq_starve;
60576-extern atomic_t rdma_stat_rq_starve;
60577-extern atomic_t rdma_stat_rq_poll;
60578-extern atomic_t rdma_stat_rq_prod;
60579-extern atomic_t rdma_stat_sq_poll;
60580-extern atomic_t rdma_stat_sq_prod;
60581+extern atomic_unchecked_t rdma_stat_recv;
60582+extern atomic_unchecked_t rdma_stat_read;
60583+extern atomic_unchecked_t rdma_stat_write;
60584+extern atomic_unchecked_t rdma_stat_sq_starve;
60585+extern atomic_unchecked_t rdma_stat_rq_starve;
60586+extern atomic_unchecked_t rdma_stat_rq_poll;
60587+extern atomic_unchecked_t rdma_stat_rq_prod;
60588+extern atomic_unchecked_t rdma_stat_sq_poll;
60589+extern atomic_unchecked_t rdma_stat_sq_prod;
60590
60591 #define RPCRDMA_VERSION 1
60592
60593diff -urNp linux-3.1.1/include/linux/sysctl.h linux-3.1.1/include/linux/sysctl.h
60594--- linux-3.1.1/include/linux/sysctl.h 2011-11-11 15:19:27.000000000 -0500
60595+++ linux-3.1.1/include/linux/sysctl.h 2011-11-16 18:40:31.000000000 -0500
60596@@ -155,7 +155,11 @@ enum
60597 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
60598 };
60599
60600-
60601+#ifdef CONFIG_PAX_SOFTMODE
60602+enum {
60603+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
60604+};
60605+#endif
60606
60607 /* CTL_VM names: */
60608 enum
60609@@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
60610
60611 extern int proc_dostring(struct ctl_table *, int,
60612 void __user *, size_t *, loff_t *);
60613+extern int proc_dostring_modpriv(struct ctl_table *, int,
60614+ void __user *, size_t *, loff_t *);
60615 extern int proc_dointvec(struct ctl_table *, int,
60616 void __user *, size_t *, loff_t *);
60617 extern int proc_dointvec_minmax(struct ctl_table *, int,
60618diff -urNp linux-3.1.1/include/linux/tty_ldisc.h linux-3.1.1/include/linux/tty_ldisc.h
60619--- linux-3.1.1/include/linux/tty_ldisc.h 2011-11-11 15:19:27.000000000 -0500
60620+++ linux-3.1.1/include/linux/tty_ldisc.h 2011-11-16 18:39:08.000000000 -0500
60621@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
60622
60623 struct module *owner;
60624
60625- int refcount;
60626+ atomic_t refcount;
60627 };
60628
60629 struct tty_ldisc {
60630diff -urNp linux-3.1.1/include/linux/types.h linux-3.1.1/include/linux/types.h
60631--- linux-3.1.1/include/linux/types.h 2011-11-11 15:19:27.000000000 -0500
60632+++ linux-3.1.1/include/linux/types.h 2011-11-16 18:39:08.000000000 -0500
60633@@ -213,10 +213,26 @@ typedef struct {
60634 int counter;
60635 } atomic_t;
60636
60637+#ifdef CONFIG_PAX_REFCOUNT
60638+typedef struct {
60639+ int counter;
60640+} atomic_unchecked_t;
60641+#else
60642+typedef atomic_t atomic_unchecked_t;
60643+#endif
60644+
60645 #ifdef CONFIG_64BIT
60646 typedef struct {
60647 long counter;
60648 } atomic64_t;
60649+
60650+#ifdef CONFIG_PAX_REFCOUNT
60651+typedef struct {
60652+ long counter;
60653+} atomic64_unchecked_t;
60654+#else
60655+typedef atomic64_t atomic64_unchecked_t;
60656+#endif
60657 #endif
60658
60659 struct list_head {
60660diff -urNp linux-3.1.1/include/linux/uaccess.h linux-3.1.1/include/linux/uaccess.h
60661--- linux-3.1.1/include/linux/uaccess.h 2011-11-11 15:19:27.000000000 -0500
60662+++ linux-3.1.1/include/linux/uaccess.h 2011-11-16 18:39:08.000000000 -0500
60663@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
60664 long ret; \
60665 mm_segment_t old_fs = get_fs(); \
60666 \
60667- set_fs(KERNEL_DS); \
60668 pagefault_disable(); \
60669- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
60670- pagefault_enable(); \
60671+ set_fs(KERNEL_DS); \
60672+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
60673 set_fs(old_fs); \
60674+ pagefault_enable(); \
60675 ret; \
60676 })
60677
60678diff -urNp linux-3.1.1/include/linux/unaligned/access_ok.h linux-3.1.1/include/linux/unaligned/access_ok.h
60679--- linux-3.1.1/include/linux/unaligned/access_ok.h 2011-11-11 15:19:27.000000000 -0500
60680+++ linux-3.1.1/include/linux/unaligned/access_ok.h 2011-11-16 18:39:08.000000000 -0500
60681@@ -6,32 +6,32 @@
60682
60683 static inline u16 get_unaligned_le16(const void *p)
60684 {
60685- return le16_to_cpup((__le16 *)p);
60686+ return le16_to_cpup((const __le16 *)p);
60687 }
60688
60689 static inline u32 get_unaligned_le32(const void *p)
60690 {
60691- return le32_to_cpup((__le32 *)p);
60692+ return le32_to_cpup((const __le32 *)p);
60693 }
60694
60695 static inline u64 get_unaligned_le64(const void *p)
60696 {
60697- return le64_to_cpup((__le64 *)p);
60698+ return le64_to_cpup((const __le64 *)p);
60699 }
60700
60701 static inline u16 get_unaligned_be16(const void *p)
60702 {
60703- return be16_to_cpup((__be16 *)p);
60704+ return be16_to_cpup((const __be16 *)p);
60705 }
60706
60707 static inline u32 get_unaligned_be32(const void *p)
60708 {
60709- return be32_to_cpup((__be32 *)p);
60710+ return be32_to_cpup((const __be32 *)p);
60711 }
60712
60713 static inline u64 get_unaligned_be64(const void *p)
60714 {
60715- return be64_to_cpup((__be64 *)p);
60716+ return be64_to_cpup((const __be64 *)p);
60717 }
60718
60719 static inline void put_unaligned_le16(u16 val, void *p)
60720diff -urNp linux-3.1.1/include/linux/vermagic.h linux-3.1.1/include/linux/vermagic.h
60721--- linux-3.1.1/include/linux/vermagic.h 2011-11-11 15:19:27.000000000 -0500
60722+++ linux-3.1.1/include/linux/vermagic.h 2011-11-16 18:54:54.000000000 -0500
60723@@ -26,9 +26,35 @@
60724 #define MODULE_ARCH_VERMAGIC ""
60725 #endif
60726
60727+#ifdef CONFIG_PAX_REFCOUNT
60728+#define MODULE_PAX_REFCOUNT "REFCOUNT "
60729+#else
60730+#define MODULE_PAX_REFCOUNT ""
60731+#endif
60732+
60733+#ifdef CONSTIFY_PLUGIN
60734+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
60735+#else
60736+#define MODULE_CONSTIFY_PLUGIN ""
60737+#endif
60738+
60739+#ifdef STACKLEAK_PLUGIN
60740+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
60741+#else
60742+#define MODULE_STACKLEAK_PLUGIN ""
60743+#endif
60744+
60745+#ifdef CONFIG_GRKERNSEC
60746+#define MODULE_GRSEC "GRSEC "
60747+#else
60748+#define MODULE_GRSEC ""
60749+#endif
60750+
60751 #define VERMAGIC_STRING \
60752 UTS_RELEASE " " \
60753 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
60754 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
60755- MODULE_ARCH_VERMAGIC
60756+ MODULE_ARCH_VERMAGIC \
60757+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
60758+ MODULE_GRSEC
60759
60760diff -urNp linux-3.1.1/include/linux/vmalloc.h linux-3.1.1/include/linux/vmalloc.h
60761--- linux-3.1.1/include/linux/vmalloc.h 2011-11-11 15:19:27.000000000 -0500
60762+++ linux-3.1.1/include/linux/vmalloc.h 2011-11-16 18:39:08.000000000 -0500
60763@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining
60764 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
60765 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
60766 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
60767+
60768+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
60769+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
60770+#endif
60771+
60772 /* bits [20..32] reserved for arch specific ioremap internals */
60773
60774 /*
60775@@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
60776 # endif
60777 #endif
60778
60779+#define vmalloc(x) \
60780+({ \
60781+ void *___retval; \
60782+ intoverflow_t ___x = (intoverflow_t)x; \
60783+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
60784+ ___retval = NULL; \
60785+ else \
60786+ ___retval = vmalloc((unsigned long)___x); \
60787+ ___retval; \
60788+})
60789+
60790+#define vzalloc(x) \
60791+({ \
60792+ void *___retval; \
60793+ intoverflow_t ___x = (intoverflow_t)x; \
60794+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
60795+ ___retval = NULL; \
60796+ else \
60797+ ___retval = vzalloc((unsigned long)___x); \
60798+ ___retval; \
60799+})
60800+
60801+#define __vmalloc(x, y, z) \
60802+({ \
60803+ void *___retval; \
60804+ intoverflow_t ___x = (intoverflow_t)x; \
60805+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
60806+ ___retval = NULL; \
60807+ else \
60808+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
60809+ ___retval; \
60810+})
60811+
60812+#define vmalloc_user(x) \
60813+({ \
60814+ void *___retval; \
60815+ intoverflow_t ___x = (intoverflow_t)x; \
60816+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
60817+ ___retval = NULL; \
60818+ else \
60819+ ___retval = vmalloc_user((unsigned long)___x); \
60820+ ___retval; \
60821+})
60822+
60823+#define vmalloc_exec(x) \
60824+({ \
60825+ void *___retval; \
60826+ intoverflow_t ___x = (intoverflow_t)x; \
60827+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
60828+ ___retval = NULL; \
60829+ else \
60830+ ___retval = vmalloc_exec((unsigned long)___x); \
60831+ ___retval; \
60832+})
60833+
60834+#define vmalloc_node(x, y) \
60835+({ \
60836+ void *___retval; \
60837+ intoverflow_t ___x = (intoverflow_t)x; \
60838+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
60839+ ___retval = NULL; \
60840+ else \
60841+ ___retval = vmalloc_node((unsigned long)___x, (y));\
60842+ ___retval; \
60843+})
60844+
60845+#define vzalloc_node(x, y) \
60846+({ \
60847+ void *___retval; \
60848+ intoverflow_t ___x = (intoverflow_t)x; \
60849+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
60850+ ___retval = NULL; \
60851+ else \
60852+ ___retval = vzalloc_node((unsigned long)___x, (y));\
60853+ ___retval; \
60854+})
60855+
60856+#define vmalloc_32(x) \
60857+({ \
60858+ void *___retval; \
60859+ intoverflow_t ___x = (intoverflow_t)x; \
60860+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
60861+ ___retval = NULL; \
60862+ else \
60863+ ___retval = vmalloc_32((unsigned long)___x); \
60864+ ___retval; \
60865+})
60866+
60867+#define vmalloc_32_user(x) \
60868+({ \
60869+void *___retval; \
60870+ intoverflow_t ___x = (intoverflow_t)x; \
60871+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
60872+ ___retval = NULL; \
60873+ else \
60874+ ___retval = vmalloc_32_user((unsigned long)___x);\
60875+ ___retval; \
60876+})
60877+
60878 #endif /* _LINUX_VMALLOC_H */
60879diff -urNp linux-3.1.1/include/linux/vmstat.h linux-3.1.1/include/linux/vmstat.h
60880--- linux-3.1.1/include/linux/vmstat.h 2011-11-11 15:19:27.000000000 -0500
60881+++ linux-3.1.1/include/linux/vmstat.h 2011-11-16 18:39:08.000000000 -0500
60882@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(in
60883 /*
60884 * Zone based page accounting with per cpu differentials.
60885 */
60886-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60887+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60888
60889 static inline void zone_page_state_add(long x, struct zone *zone,
60890 enum zone_stat_item item)
60891 {
60892- atomic_long_add(x, &zone->vm_stat[item]);
60893- atomic_long_add(x, &vm_stat[item]);
60894+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
60895+ atomic_long_add_unchecked(x, &vm_stat[item]);
60896 }
60897
60898 static inline unsigned long global_page_state(enum zone_stat_item item)
60899 {
60900- long x = atomic_long_read(&vm_stat[item]);
60901+ long x = atomic_long_read_unchecked(&vm_stat[item]);
60902 #ifdef CONFIG_SMP
60903 if (x < 0)
60904 x = 0;
60905@@ -109,7 +109,7 @@ static inline unsigned long global_page_
60906 static inline unsigned long zone_page_state(struct zone *zone,
60907 enum zone_stat_item item)
60908 {
60909- long x = atomic_long_read(&zone->vm_stat[item]);
60910+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
60911 #ifdef CONFIG_SMP
60912 if (x < 0)
60913 x = 0;
60914@@ -126,7 +126,7 @@ static inline unsigned long zone_page_st
60915 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
60916 enum zone_stat_item item)
60917 {
60918- long x = atomic_long_read(&zone->vm_stat[item]);
60919+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
60920
60921 #ifdef CONFIG_SMP
60922 int cpu;
60923@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state
60924
60925 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
60926 {
60927- atomic_long_inc(&zone->vm_stat[item]);
60928- atomic_long_inc(&vm_stat[item]);
60929+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
60930+ atomic_long_inc_unchecked(&vm_stat[item]);
60931 }
60932
60933 static inline void __inc_zone_page_state(struct page *page,
60934@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state
60935
60936 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
60937 {
60938- atomic_long_dec(&zone->vm_stat[item]);
60939- atomic_long_dec(&vm_stat[item]);
60940+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
60941+ atomic_long_dec_unchecked(&vm_stat[item]);
60942 }
60943
60944 static inline void __dec_zone_page_state(struct page *page,
60945diff -urNp linux-3.1.1/include/media/saa7146_vv.h linux-3.1.1/include/media/saa7146_vv.h
60946--- linux-3.1.1/include/media/saa7146_vv.h 2011-11-11 15:19:27.000000000 -0500
60947+++ linux-3.1.1/include/media/saa7146_vv.h 2011-11-16 18:39:08.000000000 -0500
60948@@ -163,7 +163,7 @@ struct saa7146_ext_vv
60949 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
60950
60951 /* the extension can override this */
60952- struct v4l2_ioctl_ops ops;
60953+ v4l2_ioctl_ops_no_const ops;
60954 /* pointer to the saa7146 core ops */
60955 const struct v4l2_ioctl_ops *core_ops;
60956
60957diff -urNp linux-3.1.1/include/media/v4l2-dev.h linux-3.1.1/include/media/v4l2-dev.h
60958--- linux-3.1.1/include/media/v4l2-dev.h 2011-11-11 15:19:27.000000000 -0500
60959+++ linux-3.1.1/include/media/v4l2-dev.h 2011-11-16 18:39:08.000000000 -0500
60960@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_sta
60961
60962
60963 struct v4l2_file_operations {
60964- struct module *owner;
60965+ struct module * const owner;
60966 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
60967 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
60968 unsigned int (*poll) (struct file *, struct poll_table_struct *);
60969@@ -68,6 +68,7 @@ struct v4l2_file_operations {
60970 int (*open) (struct file *);
60971 int (*release) (struct file *);
60972 };
60973+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
60974
60975 /*
60976 * Newer version of video_device, handled by videodev2.c
60977diff -urNp linux-3.1.1/include/media/v4l2-ioctl.h linux-3.1.1/include/media/v4l2-ioctl.h
60978--- linux-3.1.1/include/media/v4l2-ioctl.h 2011-11-11 15:19:27.000000000 -0500
60979+++ linux-3.1.1/include/media/v4l2-ioctl.h 2011-11-17 18:44:20.000000000 -0500
60980@@ -272,7 +272,7 @@ struct v4l2_ioctl_ops {
60981 long (*vidioc_default) (struct file *file, void *fh,
60982 bool valid_prio, int cmd, void *arg);
60983 };
60984-
60985+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
60986
60987 /* v4l debugging and diagnostics */
60988
60989diff -urNp linux-3.1.1/include/net/caif/caif_hsi.h linux-3.1.1/include/net/caif/caif_hsi.h
60990--- linux-3.1.1/include/net/caif/caif_hsi.h 2011-11-11 15:19:27.000000000 -0500
60991+++ linux-3.1.1/include/net/caif/caif_hsi.h 2011-11-16 18:39:08.000000000 -0500
60992@@ -94,7 +94,7 @@ struct cfhsi_drv {
60993 void (*rx_done_cb) (struct cfhsi_drv *drv);
60994 void (*wake_up_cb) (struct cfhsi_drv *drv);
60995 void (*wake_down_cb) (struct cfhsi_drv *drv);
60996-};
60997+} __no_const;
60998
60999 /* Structure implemented by HSI device. */
61000 struct cfhsi_dev {
61001diff -urNp linux-3.1.1/include/net/caif/cfctrl.h linux-3.1.1/include/net/caif/cfctrl.h
61002--- linux-3.1.1/include/net/caif/cfctrl.h 2011-11-11 15:19:27.000000000 -0500
61003+++ linux-3.1.1/include/net/caif/cfctrl.h 2011-11-16 18:39:08.000000000 -0500
61004@@ -52,7 +52,7 @@ struct cfctrl_rsp {
61005 void (*radioset_rsp)(void);
61006 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
61007 struct cflayer *client_layer);
61008-};
61009+} __no_const;
61010
61011 /* Link Setup Parameters for CAIF-Links. */
61012 struct cfctrl_link_param {
61013@@ -101,8 +101,8 @@ struct cfctrl_request_info {
61014 struct cfctrl {
61015 struct cfsrvl serv;
61016 struct cfctrl_rsp res;
61017- atomic_t req_seq_no;
61018- atomic_t rsp_seq_no;
61019+ atomic_unchecked_t req_seq_no;
61020+ atomic_unchecked_t rsp_seq_no;
61021 struct list_head list;
61022 /* Protects from simultaneous access to first_req list */
61023 spinlock_t info_list_lock;
61024diff -urNp linux-3.1.1/include/net/flow.h linux-3.1.1/include/net/flow.h
61025--- linux-3.1.1/include/net/flow.h 2011-11-11 15:19:27.000000000 -0500
61026+++ linux-3.1.1/include/net/flow.h 2011-11-16 18:39:08.000000000 -0500
61027@@ -207,6 +207,6 @@ extern struct flow_cache_object *flow_ca
61028 u8 dir, flow_resolve_t resolver, void *ctx);
61029
61030 extern void flow_cache_flush(void);
61031-extern atomic_t flow_cache_genid;
61032+extern atomic_unchecked_t flow_cache_genid;
61033
61034 #endif
61035diff -urNp linux-3.1.1/include/net/inetpeer.h linux-3.1.1/include/net/inetpeer.h
61036--- linux-3.1.1/include/net/inetpeer.h 2011-11-11 15:19:27.000000000 -0500
61037+++ linux-3.1.1/include/net/inetpeer.h 2011-11-16 18:39:08.000000000 -0500
61038@@ -47,8 +47,8 @@ struct inet_peer {
61039 */
61040 union {
61041 struct {
61042- atomic_t rid; /* Frag reception counter */
61043- atomic_t ip_id_count; /* IP ID for the next packet */
61044+ atomic_unchecked_t rid; /* Frag reception counter */
61045+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
61046 __u32 tcp_ts;
61047 __u32 tcp_ts_stamp;
61048 };
61049@@ -112,11 +112,11 @@ static inline int inet_getid(struct inet
61050 more++;
61051 inet_peer_refcheck(p);
61052 do {
61053- old = atomic_read(&p->ip_id_count);
61054+ old = atomic_read_unchecked(&p->ip_id_count);
61055 new = old + more;
61056 if (!new)
61057 new = 1;
61058- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
61059+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
61060 return new;
61061 }
61062
61063diff -urNp linux-3.1.1/include/net/ip_fib.h linux-3.1.1/include/net/ip_fib.h
61064--- linux-3.1.1/include/net/ip_fib.h 2011-11-11 15:19:27.000000000 -0500
61065+++ linux-3.1.1/include/net/ip_fib.h 2011-11-16 18:39:08.000000000 -0500
61066@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
61067
61068 #define FIB_RES_SADDR(net, res) \
61069 ((FIB_RES_NH(res).nh_saddr_genid == \
61070- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
61071+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
61072 FIB_RES_NH(res).nh_saddr : \
61073 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
61074 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
61075diff -urNp linux-3.1.1/include/net/ip_vs.h linux-3.1.1/include/net/ip_vs.h
61076--- linux-3.1.1/include/net/ip_vs.h 2011-11-11 15:19:27.000000000 -0500
61077+++ linux-3.1.1/include/net/ip_vs.h 2011-11-16 18:39:08.000000000 -0500
61078@@ -509,7 +509,7 @@ struct ip_vs_conn {
61079 struct ip_vs_conn *control; /* Master control connection */
61080 atomic_t n_control; /* Number of controlled ones */
61081 struct ip_vs_dest *dest; /* real server */
61082- atomic_t in_pkts; /* incoming packet counter */
61083+ atomic_unchecked_t in_pkts; /* incoming packet counter */
61084
61085 /* packet transmitter for different forwarding methods. If it
61086 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61087@@ -647,7 +647,7 @@ struct ip_vs_dest {
61088 __be16 port; /* port number of the server */
61089 union nf_inet_addr addr; /* IP address of the server */
61090 volatile unsigned flags; /* dest status flags */
61091- atomic_t conn_flags; /* flags to copy to conn */
61092+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
61093 atomic_t weight; /* server weight */
61094
61095 atomic_t refcnt; /* reference counter */
61096diff -urNp linux-3.1.1/include/net/irda/ircomm_core.h linux-3.1.1/include/net/irda/ircomm_core.h
61097--- linux-3.1.1/include/net/irda/ircomm_core.h 2011-11-11 15:19:27.000000000 -0500
61098+++ linux-3.1.1/include/net/irda/ircomm_core.h 2011-11-16 18:39:08.000000000 -0500
61099@@ -51,7 +51,7 @@ typedef struct {
61100 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61101 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61102 struct ircomm_info *);
61103-} call_t;
61104+} __no_const call_t;
61105
61106 struct ircomm_cb {
61107 irda_queue_t queue;
61108diff -urNp linux-3.1.1/include/net/irda/ircomm_tty.h linux-3.1.1/include/net/irda/ircomm_tty.h
61109--- linux-3.1.1/include/net/irda/ircomm_tty.h 2011-11-11 15:19:27.000000000 -0500
61110+++ linux-3.1.1/include/net/irda/ircomm_tty.h 2011-11-16 18:39:08.000000000 -0500
61111@@ -35,6 +35,7 @@
61112 #include <linux/termios.h>
61113 #include <linux/timer.h>
61114 #include <linux/tty.h> /* struct tty_struct */
61115+#include <asm/local.h>
61116
61117 #include <net/irda/irias_object.h>
61118 #include <net/irda/ircomm_core.h>
61119@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61120 unsigned short close_delay;
61121 unsigned short closing_wait; /* time to wait before closing */
61122
61123- int open_count;
61124- int blocked_open; /* # of blocked opens */
61125+ local_t open_count;
61126+ local_t blocked_open; /* # of blocked opens */
61127
61128 /* Protect concurent access to :
61129 * o self->open_count
61130diff -urNp linux-3.1.1/include/net/iucv/af_iucv.h linux-3.1.1/include/net/iucv/af_iucv.h
61131--- linux-3.1.1/include/net/iucv/af_iucv.h 2011-11-11 15:19:27.000000000 -0500
61132+++ linux-3.1.1/include/net/iucv/af_iucv.h 2011-11-16 18:39:08.000000000 -0500
61133@@ -87,7 +87,7 @@ struct iucv_sock {
61134 struct iucv_sock_list {
61135 struct hlist_head head;
61136 rwlock_t lock;
61137- atomic_t autobind_name;
61138+ atomic_unchecked_t autobind_name;
61139 };
61140
61141 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61142diff -urNp linux-3.1.1/include/net/lapb.h linux-3.1.1/include/net/lapb.h
61143--- linux-3.1.1/include/net/lapb.h 2011-11-11 15:19:27.000000000 -0500
61144+++ linux-3.1.1/include/net/lapb.h 2011-11-16 18:39:08.000000000 -0500
61145@@ -95,7 +95,7 @@ struct lapb_cb {
61146 struct sk_buff_head write_queue;
61147 struct sk_buff_head ack_queue;
61148 unsigned char window;
61149- struct lapb_register_struct callbacks;
61150+ struct lapb_register_struct *callbacks;
61151
61152 /* FRMR control information */
61153 struct lapb_frame frmr_data;
61154diff -urNp linux-3.1.1/include/net/neighbour.h linux-3.1.1/include/net/neighbour.h
61155--- linux-3.1.1/include/net/neighbour.h 2011-11-11 15:19:27.000000000 -0500
61156+++ linux-3.1.1/include/net/neighbour.h 2011-11-16 18:39:08.000000000 -0500
61157@@ -122,7 +122,7 @@ struct neigh_ops {
61158 void (*error_report)(struct neighbour *, struct sk_buff *);
61159 int (*output)(struct neighbour *, struct sk_buff *);
61160 int (*connected_output)(struct neighbour *, struct sk_buff *);
61161-};
61162+} __do_const;
61163
61164 struct pneigh_entry {
61165 struct pneigh_entry *next;
61166diff -urNp linux-3.1.1/include/net/netlink.h linux-3.1.1/include/net/netlink.h
61167--- linux-3.1.1/include/net/netlink.h 2011-11-11 15:19:27.000000000 -0500
61168+++ linux-3.1.1/include/net/netlink.h 2011-11-16 18:39:08.000000000 -0500
61169@@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
61170 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61171 {
61172 if (mark)
61173- skb_trim(skb, (unsigned char *) mark - skb->data);
61174+ skb_trim(skb, (const unsigned char *) mark - skb->data);
61175 }
61176
61177 /**
61178diff -urNp linux-3.1.1/include/net/netns/ipv4.h linux-3.1.1/include/net/netns/ipv4.h
61179--- linux-3.1.1/include/net/netns/ipv4.h 2011-11-11 15:19:27.000000000 -0500
61180+++ linux-3.1.1/include/net/netns/ipv4.h 2011-11-16 18:39:08.000000000 -0500
61181@@ -56,8 +56,8 @@ struct netns_ipv4 {
61182
61183 unsigned int sysctl_ping_group_range[2];
61184
61185- atomic_t rt_genid;
61186- atomic_t dev_addr_genid;
61187+ atomic_unchecked_t rt_genid;
61188+ atomic_unchecked_t dev_addr_genid;
61189
61190 #ifdef CONFIG_IP_MROUTE
61191 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
61192diff -urNp linux-3.1.1/include/net/sctp/sctp.h linux-3.1.1/include/net/sctp/sctp.h
61193--- linux-3.1.1/include/net/sctp/sctp.h 2011-11-11 15:19:27.000000000 -0500
61194+++ linux-3.1.1/include/net/sctp/sctp.h 2011-11-16 18:39:08.000000000 -0500
61195@@ -318,9 +318,9 @@ do { \
61196
61197 #else /* SCTP_DEBUG */
61198
61199-#define SCTP_DEBUG_PRINTK(whatever...)
61200-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
61201-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61202+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61203+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
61204+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61205 #define SCTP_ENABLE_DEBUG
61206 #define SCTP_DISABLE_DEBUG
61207 #define SCTP_ASSERT(expr, str, func)
61208diff -urNp linux-3.1.1/include/net/sock.h linux-3.1.1/include/net/sock.h
61209--- linux-3.1.1/include/net/sock.h 2011-11-11 15:19:27.000000000 -0500
61210+++ linux-3.1.1/include/net/sock.h 2011-11-16 18:39:08.000000000 -0500
61211@@ -278,7 +278,7 @@ struct sock {
61212 #ifdef CONFIG_RPS
61213 __u32 sk_rxhash;
61214 #endif
61215- atomic_t sk_drops;
61216+ atomic_unchecked_t sk_drops;
61217 int sk_rcvbuf;
61218
61219 struct sk_filter __rcu *sk_filter;
61220@@ -1391,7 +1391,7 @@ static inline void sk_nocaps_add(struct
61221 }
61222
61223 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
61224- char __user *from, char *to,
61225+ char __user *from, unsigned char *to,
61226 int copy, int offset)
61227 {
61228 if (skb->ip_summed == CHECKSUM_NONE) {
61229diff -urNp linux-3.1.1/include/net/tcp.h linux-3.1.1/include/net/tcp.h
61230--- linux-3.1.1/include/net/tcp.h 2011-11-11 15:19:27.000000000 -0500
61231+++ linux-3.1.1/include/net/tcp.h 2011-11-16 18:39:08.000000000 -0500
61232@@ -1401,8 +1401,8 @@ enum tcp_seq_states {
61233 struct tcp_seq_afinfo {
61234 char *name;
61235 sa_family_t family;
61236- struct file_operations seq_fops;
61237- struct seq_operations seq_ops;
61238+ file_operations_no_const seq_fops;
61239+ seq_operations_no_const seq_ops;
61240 };
61241
61242 struct tcp_iter_state {
61243diff -urNp linux-3.1.1/include/net/udp.h linux-3.1.1/include/net/udp.h
61244--- linux-3.1.1/include/net/udp.h 2011-11-11 15:19:27.000000000 -0500
61245+++ linux-3.1.1/include/net/udp.h 2011-11-16 18:39:08.000000000 -0500
61246@@ -234,8 +234,8 @@ struct udp_seq_afinfo {
61247 char *name;
61248 sa_family_t family;
61249 struct udp_table *udp_table;
61250- struct file_operations seq_fops;
61251- struct seq_operations seq_ops;
61252+ file_operations_no_const seq_fops;
61253+ seq_operations_no_const seq_ops;
61254 };
61255
61256 struct udp_iter_state {
61257diff -urNp linux-3.1.1/include/net/xfrm.h linux-3.1.1/include/net/xfrm.h
61258--- linux-3.1.1/include/net/xfrm.h 2011-11-11 15:19:27.000000000 -0500
61259+++ linux-3.1.1/include/net/xfrm.h 2011-11-16 18:39:08.000000000 -0500
61260@@ -505,7 +505,7 @@ struct xfrm_policy {
61261 struct timer_list timer;
61262
61263 struct flow_cache_object flo;
61264- atomic_t genid;
61265+ atomic_unchecked_t genid;
61266 u32 priority;
61267 u32 index;
61268 struct xfrm_mark mark;
61269diff -urNp linux-3.1.1/include/rdma/iw_cm.h linux-3.1.1/include/rdma/iw_cm.h
61270--- linux-3.1.1/include/rdma/iw_cm.h 2011-11-11 15:19:27.000000000 -0500
61271+++ linux-3.1.1/include/rdma/iw_cm.h 2011-11-16 18:39:08.000000000 -0500
61272@@ -120,7 +120,7 @@ struct iw_cm_verbs {
61273 int backlog);
61274
61275 int (*destroy_listen)(struct iw_cm_id *cm_id);
61276-};
61277+} __no_const;
61278
61279 /**
61280 * iw_create_cm_id - Create an IW CM identifier.
61281diff -urNp linux-3.1.1/include/scsi/libfc.h linux-3.1.1/include/scsi/libfc.h
61282--- linux-3.1.1/include/scsi/libfc.h 2011-11-11 15:19:27.000000000 -0500
61283+++ linux-3.1.1/include/scsi/libfc.h 2011-11-16 18:39:08.000000000 -0500
61284@@ -758,6 +758,7 @@ struct libfc_function_template {
61285 */
61286 void (*disc_stop_final) (struct fc_lport *);
61287 };
61288+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
61289
61290 /**
61291 * struct fc_disc - Discovery context
61292@@ -861,7 +862,7 @@ struct fc_lport {
61293 struct fc_vport *vport;
61294
61295 /* Operational Information */
61296- struct libfc_function_template tt;
61297+ libfc_function_template_no_const tt;
61298 u8 link_up;
61299 u8 qfull;
61300 enum fc_lport_state state;
61301diff -urNp linux-3.1.1/include/scsi/scsi_device.h linux-3.1.1/include/scsi/scsi_device.h
61302--- linux-3.1.1/include/scsi/scsi_device.h 2011-11-11 15:19:27.000000000 -0500
61303+++ linux-3.1.1/include/scsi/scsi_device.h 2011-11-16 18:39:08.000000000 -0500
61304@@ -161,9 +161,9 @@ struct scsi_device {
61305 unsigned int max_device_blocked; /* what device_blocked counts down from */
61306 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61307
61308- atomic_t iorequest_cnt;
61309- atomic_t iodone_cnt;
61310- atomic_t ioerr_cnt;
61311+ atomic_unchecked_t iorequest_cnt;
61312+ atomic_unchecked_t iodone_cnt;
61313+ atomic_unchecked_t ioerr_cnt;
61314
61315 struct device sdev_gendev,
61316 sdev_dev;
61317diff -urNp linux-3.1.1/include/scsi/scsi_transport_fc.h linux-3.1.1/include/scsi/scsi_transport_fc.h
61318--- linux-3.1.1/include/scsi/scsi_transport_fc.h 2011-11-11 15:19:27.000000000 -0500
61319+++ linux-3.1.1/include/scsi/scsi_transport_fc.h 2011-11-16 18:39:08.000000000 -0500
61320@@ -711,7 +711,7 @@ struct fc_function_template {
61321 unsigned long show_host_system_hostname:1;
61322
61323 unsigned long disable_target_scan:1;
61324-};
61325+} __do_const;
61326
61327
61328 /**
61329diff -urNp linux-3.1.1/include/sound/ak4xxx-adda.h linux-3.1.1/include/sound/ak4xxx-adda.h
61330--- linux-3.1.1/include/sound/ak4xxx-adda.h 2011-11-11 15:19:27.000000000 -0500
61331+++ linux-3.1.1/include/sound/ak4xxx-adda.h 2011-11-16 18:39:08.000000000 -0500
61332@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
61333 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
61334 unsigned char val);
61335 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
61336-};
61337+} __no_const;
61338
61339 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
61340
61341diff -urNp linux-3.1.1/include/sound/hwdep.h linux-3.1.1/include/sound/hwdep.h
61342--- linux-3.1.1/include/sound/hwdep.h 2011-11-11 15:19:27.000000000 -0500
61343+++ linux-3.1.1/include/sound/hwdep.h 2011-11-16 18:39:08.000000000 -0500
61344@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
61345 struct snd_hwdep_dsp_status *status);
61346 int (*dsp_load)(struct snd_hwdep *hw,
61347 struct snd_hwdep_dsp_image *image);
61348-};
61349+} __no_const;
61350
61351 struct snd_hwdep {
61352 struct snd_card *card;
61353diff -urNp linux-3.1.1/include/sound/info.h linux-3.1.1/include/sound/info.h
61354--- linux-3.1.1/include/sound/info.h 2011-11-11 15:19:27.000000000 -0500
61355+++ linux-3.1.1/include/sound/info.h 2011-11-16 18:39:08.000000000 -0500
61356@@ -44,7 +44,7 @@ struct snd_info_entry_text {
61357 struct snd_info_buffer *buffer);
61358 void (*write)(struct snd_info_entry *entry,
61359 struct snd_info_buffer *buffer);
61360-};
61361+} __no_const;
61362
61363 struct snd_info_entry_ops {
61364 int (*open)(struct snd_info_entry *entry,
61365diff -urNp linux-3.1.1/include/sound/pcm.h linux-3.1.1/include/sound/pcm.h
61366--- linux-3.1.1/include/sound/pcm.h 2011-11-11 15:19:27.000000000 -0500
61367+++ linux-3.1.1/include/sound/pcm.h 2011-11-16 18:39:08.000000000 -0500
61368@@ -81,6 +81,7 @@ struct snd_pcm_ops {
61369 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
61370 int (*ack)(struct snd_pcm_substream *substream);
61371 };
61372+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
61373
61374 /*
61375 *
61376diff -urNp linux-3.1.1/include/sound/sb16_csp.h linux-3.1.1/include/sound/sb16_csp.h
61377--- linux-3.1.1/include/sound/sb16_csp.h 2011-11-11 15:19:27.000000000 -0500
61378+++ linux-3.1.1/include/sound/sb16_csp.h 2011-11-16 18:39:08.000000000 -0500
61379@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
61380 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
61381 int (*csp_stop) (struct snd_sb_csp * p);
61382 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
61383-};
61384+} __no_const;
61385
61386 /*
61387 * CSP private data
61388diff -urNp linux-3.1.1/include/sound/soc.h linux-3.1.1/include/sound/soc.h
61389--- linux-3.1.1/include/sound/soc.h 2011-11-11 15:19:27.000000000 -0500
61390+++ linux-3.1.1/include/sound/soc.h 2011-11-16 18:39:08.000000000 -0500
61391@@ -676,7 +676,7 @@ struct snd_soc_platform_driver {
61392 /* platform IO - used for platform DAPM */
61393 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
61394 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
61395-};
61396+} __do_const;
61397
61398 struct snd_soc_platform {
61399 const char *name;
61400diff -urNp linux-3.1.1/include/sound/ymfpci.h linux-3.1.1/include/sound/ymfpci.h
61401--- linux-3.1.1/include/sound/ymfpci.h 2011-11-11 15:19:27.000000000 -0500
61402+++ linux-3.1.1/include/sound/ymfpci.h 2011-11-16 18:39:08.000000000 -0500
61403@@ -358,7 +358,7 @@ struct snd_ymfpci {
61404 spinlock_t reg_lock;
61405 spinlock_t voice_lock;
61406 wait_queue_head_t interrupt_sleep;
61407- atomic_t interrupt_sleep_count;
61408+ atomic_unchecked_t interrupt_sleep_count;
61409 struct snd_info_entry *proc_entry;
61410 const struct firmware *dsp_microcode;
61411 const struct firmware *controller_microcode;
61412diff -urNp linux-3.1.1/include/target/target_core_base.h linux-3.1.1/include/target/target_core_base.h
61413--- linux-3.1.1/include/target/target_core_base.h 2011-11-11 15:19:27.000000000 -0500
61414+++ linux-3.1.1/include/target/target_core_base.h 2011-11-16 18:39:08.000000000 -0500
61415@@ -356,7 +356,7 @@ struct t10_reservation_ops {
61416 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
61417 int (*t10_pr_register)(struct se_cmd *);
61418 int (*t10_pr_clear)(struct se_cmd *);
61419-};
61420+} __no_const;
61421
61422 struct t10_reservation {
61423 /* Reservation effects all target ports */
61424@@ -496,8 +496,8 @@ struct se_cmd {
61425 atomic_t t_task_cdbs_left;
61426 atomic_t t_task_cdbs_ex_left;
61427 atomic_t t_task_cdbs_timeout_left;
61428- atomic_t t_task_cdbs_sent;
61429- atomic_t t_transport_aborted;
61430+ atomic_unchecked_t t_task_cdbs_sent;
61431+ atomic_unchecked_t t_transport_aborted;
61432 atomic_t t_transport_active;
61433 atomic_t t_transport_complete;
61434 atomic_t t_transport_queue_active;
61435@@ -744,7 +744,7 @@ struct se_device {
61436 atomic_t active_cmds;
61437 atomic_t simple_cmds;
61438 atomic_t depth_left;
61439- atomic_t dev_ordered_id;
61440+ atomic_unchecked_t dev_ordered_id;
61441 atomic_t dev_tur_active;
61442 atomic_t execute_tasks;
61443 atomic_t dev_status_thr_count;
61444diff -urNp linux-3.1.1/include/trace/events/irq.h linux-3.1.1/include/trace/events/irq.h
61445--- linux-3.1.1/include/trace/events/irq.h 2011-11-11 15:19:27.000000000 -0500
61446+++ linux-3.1.1/include/trace/events/irq.h 2011-11-16 18:39:08.000000000 -0500
61447@@ -36,7 +36,7 @@ struct softirq_action;
61448 */
61449 TRACE_EVENT(irq_handler_entry,
61450
61451- TP_PROTO(int irq, struct irqaction *action),
61452+ TP_PROTO(int irq, const struct irqaction *action),
61453
61454 TP_ARGS(irq, action),
61455
61456@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
61457 */
61458 TRACE_EVENT(irq_handler_exit,
61459
61460- TP_PROTO(int irq, struct irqaction *action, int ret),
61461+ TP_PROTO(int irq, const struct irqaction *action, int ret),
61462
61463 TP_ARGS(irq, action, ret),
61464
61465diff -urNp linux-3.1.1/include/video/udlfb.h linux-3.1.1/include/video/udlfb.h
61466--- linux-3.1.1/include/video/udlfb.h 2011-11-11 15:19:27.000000000 -0500
61467+++ linux-3.1.1/include/video/udlfb.h 2011-11-16 18:39:08.000000000 -0500
61468@@ -51,10 +51,10 @@ struct dlfb_data {
61469 int base8;
61470 u32 pseudo_palette[256];
61471 /* blit-only rendering path metrics, exposed through sysfs */
61472- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61473- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
61474- atomic_t bytes_sent; /* to usb, after compression including overhead */
61475- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
61476+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61477+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
61478+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
61479+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
61480 };
61481
61482 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
61483diff -urNp linux-3.1.1/include/video/uvesafb.h linux-3.1.1/include/video/uvesafb.h
61484--- linux-3.1.1/include/video/uvesafb.h 2011-11-11 15:19:27.000000000 -0500
61485+++ linux-3.1.1/include/video/uvesafb.h 2011-11-16 18:39:08.000000000 -0500
61486@@ -177,6 +177,7 @@ struct uvesafb_par {
61487 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
61488 u8 pmi_setpal; /* PMI for palette changes */
61489 u16 *pmi_base; /* protected mode interface location */
61490+ u8 *pmi_code; /* protected mode code location */
61491 void *pmi_start;
61492 void *pmi_pal;
61493 u8 *vbe_state_orig; /*
61494diff -urNp linux-3.1.1/init/do_mounts.c linux-3.1.1/init/do_mounts.c
61495--- linux-3.1.1/init/do_mounts.c 2011-11-11 15:19:27.000000000 -0500
61496+++ linux-3.1.1/init/do_mounts.c 2011-11-16 18:39:08.000000000 -0500
61497@@ -287,11 +287,11 @@ static void __init get_fs_names(char *pa
61498
61499 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
61500 {
61501- int err = sys_mount(name, "/root", fs, flags, data);
61502+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
61503 if (err)
61504 return err;
61505
61506- sys_chdir((const char __user __force *)"/root");
61507+ sys_chdir((const char __force_user*)"/root");
61508 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
61509 printk(KERN_INFO
61510 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
61511@@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
61512 va_start(args, fmt);
61513 vsprintf(buf, fmt, args);
61514 va_end(args);
61515- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
61516+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
61517 if (fd >= 0) {
61518 sys_ioctl(fd, FDEJECT, 0);
61519 sys_close(fd);
61520 }
61521 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
61522- fd = sys_open("/dev/console", O_RDWR, 0);
61523+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
61524 if (fd >= 0) {
61525 sys_ioctl(fd, TCGETS, (long)&termios);
61526 termios.c_lflag &= ~ICANON;
61527 sys_ioctl(fd, TCSETSF, (long)&termios);
61528- sys_read(fd, &c, 1);
61529+ sys_read(fd, (char __user *)&c, 1);
61530 termios.c_lflag |= ICANON;
61531 sys_ioctl(fd, TCSETSF, (long)&termios);
61532 sys_close(fd);
61533@@ -488,6 +488,6 @@ void __init prepare_namespace(void)
61534 mount_root();
61535 out:
61536 devtmpfs_mount("dev");
61537- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61538- sys_chroot((const char __user __force *)".");
61539+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
61540+ sys_chroot((const char __force_user *)".");
61541 }
61542diff -urNp linux-3.1.1/init/do_mounts.h linux-3.1.1/init/do_mounts.h
61543--- linux-3.1.1/init/do_mounts.h 2011-11-11 15:19:27.000000000 -0500
61544+++ linux-3.1.1/init/do_mounts.h 2011-11-16 18:39:08.000000000 -0500
61545@@ -15,15 +15,15 @@ extern int root_mountflags;
61546
61547 static inline int create_dev(char *name, dev_t dev)
61548 {
61549- sys_unlink(name);
61550- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
61551+ sys_unlink((char __force_user *)name);
61552+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
61553 }
61554
61555 #if BITS_PER_LONG == 32
61556 static inline u32 bstat(char *name)
61557 {
61558 struct stat64 stat;
61559- if (sys_stat64(name, &stat) != 0)
61560+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
61561 return 0;
61562 if (!S_ISBLK(stat.st_mode))
61563 return 0;
61564@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
61565 static inline u32 bstat(char *name)
61566 {
61567 struct stat stat;
61568- if (sys_newstat(name, &stat) != 0)
61569+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
61570 return 0;
61571 if (!S_ISBLK(stat.st_mode))
61572 return 0;
61573diff -urNp linux-3.1.1/init/do_mounts_initrd.c linux-3.1.1/init/do_mounts_initrd.c
61574--- linux-3.1.1/init/do_mounts_initrd.c 2011-11-11 15:19:27.000000000 -0500
61575+++ linux-3.1.1/init/do_mounts_initrd.c 2011-11-16 18:39:08.000000000 -0500
61576@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
61577 create_dev("/dev/root.old", Root_RAM0);
61578 /* mount initrd on rootfs' /root */
61579 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
61580- sys_mkdir("/old", 0700);
61581- root_fd = sys_open("/", 0, 0);
61582- old_fd = sys_open("/old", 0, 0);
61583+ sys_mkdir((const char __force_user *)"/old", 0700);
61584+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
61585+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
61586 /* move initrd over / and chdir/chroot in initrd root */
61587- sys_chdir("/root");
61588- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61589- sys_chroot(".");
61590+ sys_chdir((const char __force_user *)"/root");
61591+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
61592+ sys_chroot((const char __force_user *)".");
61593
61594 /*
61595 * In case that a resume from disk is carried out by linuxrc or one of
61596@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
61597
61598 /* move initrd to rootfs' /old */
61599 sys_fchdir(old_fd);
61600- sys_mount("/", ".", NULL, MS_MOVE, NULL);
61601+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
61602 /* switch root and cwd back to / of rootfs */
61603 sys_fchdir(root_fd);
61604- sys_chroot(".");
61605+ sys_chroot((const char __force_user *)".");
61606 sys_close(old_fd);
61607 sys_close(root_fd);
61608
61609 if (new_decode_dev(real_root_dev) == Root_RAM0) {
61610- sys_chdir("/old");
61611+ sys_chdir((const char __force_user *)"/old");
61612 return;
61613 }
61614
61615@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
61616 mount_root();
61617
61618 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
61619- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
61620+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
61621 if (!error)
61622 printk("okay\n");
61623 else {
61624- int fd = sys_open("/dev/root.old", O_RDWR, 0);
61625+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
61626 if (error == -ENOENT)
61627 printk("/initrd does not exist. Ignored.\n");
61628 else
61629 printk("failed\n");
61630 printk(KERN_NOTICE "Unmounting old root\n");
61631- sys_umount("/old", MNT_DETACH);
61632+ sys_umount((char __force_user *)"/old", MNT_DETACH);
61633 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
61634 if (fd < 0) {
61635 error = fd;
61636@@ -116,11 +116,11 @@ int __init initrd_load(void)
61637 * mounted in the normal path.
61638 */
61639 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
61640- sys_unlink("/initrd.image");
61641+ sys_unlink((const char __force_user *)"/initrd.image");
61642 handle_initrd();
61643 return 1;
61644 }
61645 }
61646- sys_unlink("/initrd.image");
61647+ sys_unlink((const char __force_user *)"/initrd.image");
61648 return 0;
61649 }
61650diff -urNp linux-3.1.1/init/do_mounts_md.c linux-3.1.1/init/do_mounts_md.c
61651--- linux-3.1.1/init/do_mounts_md.c 2011-11-11 15:19:27.000000000 -0500
61652+++ linux-3.1.1/init/do_mounts_md.c 2011-11-16 18:39:08.000000000 -0500
61653@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
61654 partitioned ? "_d" : "", minor,
61655 md_setup_args[ent].device_names);
61656
61657- fd = sys_open(name, 0, 0);
61658+ fd = sys_open((char __force_user *)name, 0, 0);
61659 if (fd < 0) {
61660 printk(KERN_ERR "md: open failed - cannot start "
61661 "array %s\n", name);
61662@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
61663 * array without it
61664 */
61665 sys_close(fd);
61666- fd = sys_open(name, 0, 0);
61667+ fd = sys_open((char __force_user *)name, 0, 0);
61668 sys_ioctl(fd, BLKRRPART, 0);
61669 }
61670 sys_close(fd);
61671@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
61672
61673 wait_for_device_probe();
61674
61675- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
61676+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
61677 if (fd >= 0) {
61678 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
61679 sys_close(fd);
61680diff -urNp linux-3.1.1/init/initramfs.c linux-3.1.1/init/initramfs.c
61681--- linux-3.1.1/init/initramfs.c 2011-11-11 15:19:27.000000000 -0500
61682+++ linux-3.1.1/init/initramfs.c 2011-11-16 18:39:08.000000000 -0500
61683@@ -74,7 +74,7 @@ static void __init free_hash(void)
61684 }
61685 }
61686
61687-static long __init do_utime(char __user *filename, time_t mtime)
61688+static long __init do_utime(__force char __user *filename, time_t mtime)
61689 {
61690 struct timespec t[2];
61691
61692@@ -109,7 +109,7 @@ static void __init dir_utime(void)
61693 struct dir_entry *de, *tmp;
61694 list_for_each_entry_safe(de, tmp, &dir_list, list) {
61695 list_del(&de->list);
61696- do_utime(de->name, de->mtime);
61697+ do_utime((char __force_user *)de->name, de->mtime);
61698 kfree(de->name);
61699 kfree(de);
61700 }
61701@@ -271,7 +271,7 @@ static int __init maybe_link(void)
61702 if (nlink >= 2) {
61703 char *old = find_link(major, minor, ino, mode, collected);
61704 if (old)
61705- return (sys_link(old, collected) < 0) ? -1 : 1;
61706+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
61707 }
61708 return 0;
61709 }
61710@@ -280,11 +280,11 @@ static void __init clean_path(char *path
61711 {
61712 struct stat st;
61713
61714- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
61715+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
61716 if (S_ISDIR(st.st_mode))
61717- sys_rmdir(path);
61718+ sys_rmdir((char __force_user *)path);
61719 else
61720- sys_unlink(path);
61721+ sys_unlink((char __force_user *)path);
61722 }
61723 }
61724
61725@@ -305,7 +305,7 @@ static int __init do_name(void)
61726 int openflags = O_WRONLY|O_CREAT;
61727 if (ml != 1)
61728 openflags |= O_TRUNC;
61729- wfd = sys_open(collected, openflags, mode);
61730+ wfd = sys_open((char __force_user *)collected, openflags, mode);
61731
61732 if (wfd >= 0) {
61733 sys_fchown(wfd, uid, gid);
61734@@ -317,17 +317,17 @@ static int __init do_name(void)
61735 }
61736 }
61737 } else if (S_ISDIR(mode)) {
61738- sys_mkdir(collected, mode);
61739- sys_chown(collected, uid, gid);
61740- sys_chmod(collected, mode);
61741+ sys_mkdir((char __force_user *)collected, mode);
61742+ sys_chown((char __force_user *)collected, uid, gid);
61743+ sys_chmod((char __force_user *)collected, mode);
61744 dir_add(collected, mtime);
61745 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
61746 S_ISFIFO(mode) || S_ISSOCK(mode)) {
61747 if (maybe_link() == 0) {
61748- sys_mknod(collected, mode, rdev);
61749- sys_chown(collected, uid, gid);
61750- sys_chmod(collected, mode);
61751- do_utime(collected, mtime);
61752+ sys_mknod((char __force_user *)collected, mode, rdev);
61753+ sys_chown((char __force_user *)collected, uid, gid);
61754+ sys_chmod((char __force_user *)collected, mode);
61755+ do_utime((char __force_user *)collected, mtime);
61756 }
61757 }
61758 return 0;
61759@@ -336,15 +336,15 @@ static int __init do_name(void)
61760 static int __init do_copy(void)
61761 {
61762 if (count >= body_len) {
61763- sys_write(wfd, victim, body_len);
61764+ sys_write(wfd, (char __force_user *)victim, body_len);
61765 sys_close(wfd);
61766- do_utime(vcollected, mtime);
61767+ do_utime((char __force_user *)vcollected, mtime);
61768 kfree(vcollected);
61769 eat(body_len);
61770 state = SkipIt;
61771 return 0;
61772 } else {
61773- sys_write(wfd, victim, count);
61774+ sys_write(wfd, (char __force_user *)victim, count);
61775 body_len -= count;
61776 eat(count);
61777 return 1;
61778@@ -355,9 +355,9 @@ static int __init do_symlink(void)
61779 {
61780 collected[N_ALIGN(name_len) + body_len] = '\0';
61781 clean_path(collected, 0);
61782- sys_symlink(collected + N_ALIGN(name_len), collected);
61783- sys_lchown(collected, uid, gid);
61784- do_utime(collected, mtime);
61785+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
61786+ sys_lchown((char __force_user *)collected, uid, gid);
61787+ do_utime((char __force_user *)collected, mtime);
61788 state = SkipIt;
61789 next_state = Reset;
61790 return 0;
61791diff -urNp linux-3.1.1/init/Kconfig linux-3.1.1/init/Kconfig
61792--- linux-3.1.1/init/Kconfig 2011-11-11 15:19:27.000000000 -0500
61793+++ linux-3.1.1/init/Kconfig 2011-11-16 18:39:08.000000000 -0500
61794@@ -1202,7 +1202,7 @@ config SLUB_DEBUG
61795
61796 config COMPAT_BRK
61797 bool "Disable heap randomization"
61798- default y
61799+ default n
61800 help
61801 Randomizing heap placement makes heap exploits harder, but it
61802 also breaks ancient binaries (including anything libc5 based).
61803diff -urNp linux-3.1.1/init/main.c linux-3.1.1/init/main.c
61804--- linux-3.1.1/init/main.c 2011-11-11 15:19:27.000000000 -0500
61805+++ linux-3.1.1/init/main.c 2011-11-16 18:40:44.000000000 -0500
61806@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
61807 extern void tc_init(void);
61808 #endif
61809
61810+extern void grsecurity_init(void);
61811+
61812 /*
61813 * Debug helper: via this flag we know that we are in 'early bootup code'
61814 * where only the boot processor is running with IRQ disabled. This means
61815@@ -149,6 +151,49 @@ static int __init set_reset_devices(char
61816
61817 __setup("reset_devices", set_reset_devices);
61818
61819+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
61820+extern char pax_enter_kernel_user[];
61821+extern char pax_exit_kernel_user[];
61822+extern pgdval_t clone_pgd_mask;
61823+#endif
61824+
61825+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
61826+static int __init setup_pax_nouderef(char *str)
61827+{
61828+#ifdef CONFIG_X86_32
61829+ unsigned int cpu;
61830+ struct desc_struct *gdt;
61831+
61832+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
61833+ gdt = get_cpu_gdt_table(cpu);
61834+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
61835+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
61836+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
61837+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
61838+ }
61839+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
61840+#else
61841+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
61842+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
61843+ clone_pgd_mask = ~(pgdval_t)0UL;
61844+#endif
61845+
61846+ return 0;
61847+}
61848+early_param("pax_nouderef", setup_pax_nouderef);
61849+#endif
61850+
61851+#ifdef CONFIG_PAX_SOFTMODE
61852+int pax_softmode;
61853+
61854+static int __init setup_pax_softmode(char *str)
61855+{
61856+ get_option(&str, &pax_softmode);
61857+ return 1;
61858+}
61859+__setup("pax_softmode=", setup_pax_softmode);
61860+#endif
61861+
61862 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
61863 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
61864 static const char *panic_later, *panic_param;
61865@@ -678,6 +723,7 @@ int __init_or_module do_one_initcall(ini
61866 {
61867 int count = preempt_count();
61868 int ret;
61869+ const char *msg1 = "", *msg2 = "";
61870
61871 if (initcall_debug)
61872 ret = do_one_initcall_debug(fn);
61873@@ -690,15 +736,15 @@ int __init_or_module do_one_initcall(ini
61874 sprintf(msgbuf, "error code %d ", ret);
61875
61876 if (preempt_count() != count) {
61877- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
61878+ msg1 = " preemption imbalance";
61879 preempt_count() = count;
61880 }
61881 if (irqs_disabled()) {
61882- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
61883+ msg2 = " disabled interrupts";
61884 local_irq_enable();
61885 }
61886- if (msgbuf[0]) {
61887- printk("initcall %pF returned with %s\n", fn, msgbuf);
61888+ if (msgbuf[0] || *msg1 || *msg2) {
61889+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
61890 }
61891
61892 return ret;
61893@@ -817,7 +863,7 @@ static int __init kernel_init(void * unu
61894 do_basic_setup();
61895
61896 /* Open the /dev/console on the rootfs, this should never fail */
61897- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
61898+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
61899 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
61900
61901 (void) sys_dup(0);
61902@@ -830,11 +876,13 @@ static int __init kernel_init(void * unu
61903 if (!ramdisk_execute_command)
61904 ramdisk_execute_command = "/init";
61905
61906- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
61907+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
61908 ramdisk_execute_command = NULL;
61909 prepare_namespace();
61910 }
61911
61912+ grsecurity_init();
61913+
61914 /*
61915 * Ok, we have completed the initial bootup, and
61916 * we're essentially up and running. Get rid of the
61917diff -urNp linux-3.1.1/ipc/mqueue.c linux-3.1.1/ipc/mqueue.c
61918--- linux-3.1.1/ipc/mqueue.c 2011-11-11 15:19:27.000000000 -0500
61919+++ linux-3.1.1/ipc/mqueue.c 2011-11-16 18:40:44.000000000 -0500
61920@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(st
61921 mq_bytes = (mq_msg_tblsz +
61922 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
61923
61924+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
61925 spin_lock(&mq_lock);
61926 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
61927 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
61928diff -urNp linux-3.1.1/ipc/msg.c linux-3.1.1/ipc/msg.c
61929--- linux-3.1.1/ipc/msg.c 2011-11-11 15:19:27.000000000 -0500
61930+++ linux-3.1.1/ipc/msg.c 2011-11-16 18:39:08.000000000 -0500
61931@@ -309,18 +309,19 @@ static inline int msg_security(struct ke
61932 return security_msg_queue_associate(msq, msgflg);
61933 }
61934
61935+static struct ipc_ops msg_ops = {
61936+ .getnew = newque,
61937+ .associate = msg_security,
61938+ .more_checks = NULL
61939+};
61940+
61941 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
61942 {
61943 struct ipc_namespace *ns;
61944- struct ipc_ops msg_ops;
61945 struct ipc_params msg_params;
61946
61947 ns = current->nsproxy->ipc_ns;
61948
61949- msg_ops.getnew = newque;
61950- msg_ops.associate = msg_security;
61951- msg_ops.more_checks = NULL;
61952-
61953 msg_params.key = key;
61954 msg_params.flg = msgflg;
61955
61956diff -urNp linux-3.1.1/ipc/sem.c linux-3.1.1/ipc/sem.c
61957--- linux-3.1.1/ipc/sem.c 2011-11-11 15:19:27.000000000 -0500
61958+++ linux-3.1.1/ipc/sem.c 2011-11-16 18:40:44.000000000 -0500
61959@@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
61960 return 0;
61961 }
61962
61963+static struct ipc_ops sem_ops = {
61964+ .getnew = newary,
61965+ .associate = sem_security,
61966+ .more_checks = sem_more_checks
61967+};
61968+
61969 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
61970 {
61971 struct ipc_namespace *ns;
61972- struct ipc_ops sem_ops;
61973 struct ipc_params sem_params;
61974
61975 ns = current->nsproxy->ipc_ns;
61976@@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
61977 if (nsems < 0 || nsems > ns->sc_semmsl)
61978 return -EINVAL;
61979
61980- sem_ops.getnew = newary;
61981- sem_ops.associate = sem_security;
61982- sem_ops.more_checks = sem_more_checks;
61983-
61984 sem_params.key = key;
61985 sem_params.flg = semflg;
61986 sem_params.u.nsems = nsems;
61987@@ -848,6 +849,8 @@ static int semctl_main(struct ipc_namesp
61988 int nsems;
61989 struct list_head tasks;
61990
61991+ pax_track_stack();
61992+
61993 sma = sem_lock_check(ns, semid);
61994 if (IS_ERR(sma))
61995 return PTR_ERR(sma);
61996@@ -1295,6 +1298,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
61997 struct ipc_namespace *ns;
61998 struct list_head tasks;
61999
62000+ pax_track_stack();
62001+
62002 ns = current->nsproxy->ipc_ns;
62003
62004 if (nsops < 1 || semid < 0)
62005diff -urNp linux-3.1.1/ipc/shm.c linux-3.1.1/ipc/shm.c
62006--- linux-3.1.1/ipc/shm.c 2011-11-11 15:19:27.000000000 -0500
62007+++ linux-3.1.1/ipc/shm.c 2011-11-16 18:40:44.000000000 -0500
62008@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
62009 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62010 #endif
62011
62012+#ifdef CONFIG_GRKERNSEC
62013+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62014+ const time_t shm_createtime, const uid_t cuid,
62015+ const int shmid);
62016+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62017+ const time_t shm_createtime);
62018+#endif
62019+
62020 void shm_init_ns(struct ipc_namespace *ns)
62021 {
62022 ns->shm_ctlmax = SHMMAX;
62023@@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *
62024 shp->shm_lprid = 0;
62025 shp->shm_atim = shp->shm_dtim = 0;
62026 shp->shm_ctim = get_seconds();
62027+#ifdef CONFIG_GRKERNSEC
62028+ {
62029+ struct timespec timeval;
62030+ do_posix_clock_monotonic_gettime(&timeval);
62031+
62032+ shp->shm_createtime = timeval.tv_sec;
62033+ }
62034+#endif
62035 shp->shm_segsz = size;
62036 shp->shm_nattch = 0;
62037 shp->shm_file = file;
62038@@ -559,18 +575,19 @@ static inline int shm_more_checks(struct
62039 return 0;
62040 }
62041
62042+static struct ipc_ops shm_ops = {
62043+ .getnew = newseg,
62044+ .associate = shm_security,
62045+ .more_checks = shm_more_checks
62046+};
62047+
62048 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62049 {
62050 struct ipc_namespace *ns;
62051- struct ipc_ops shm_ops;
62052 struct ipc_params shm_params;
62053
62054 ns = current->nsproxy->ipc_ns;
62055
62056- shm_ops.getnew = newseg;
62057- shm_ops.associate = shm_security;
62058- shm_ops.more_checks = shm_more_checks;
62059-
62060 shm_params.key = key;
62061 shm_params.flg = shmflg;
62062 shm_params.u.size = size;
62063@@ -870,8 +887,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
62064 case SHM_LOCK:
62065 case SHM_UNLOCK:
62066 {
62067- struct file *uninitialized_var(shm_file);
62068-
62069 lru_add_drain_all(); /* drain pagevecs to lru lists */
62070
62071 shp = shm_lock_check(ns, shmid);
62072@@ -1004,9 +1019,21 @@ long do_shmat(int shmid, char __user *sh
62073 if (err)
62074 goto out_unlock;
62075
62076+#ifdef CONFIG_GRKERNSEC
62077+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62078+ shp->shm_perm.cuid, shmid) ||
62079+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62080+ err = -EACCES;
62081+ goto out_unlock;
62082+ }
62083+#endif
62084+
62085 path = shp->shm_file->f_path;
62086 path_get(&path);
62087 shp->shm_nattch++;
62088+#ifdef CONFIG_GRKERNSEC
62089+ shp->shm_lapid = current->pid;
62090+#endif
62091 size = i_size_read(path.dentry->d_inode);
62092 shm_unlock(shp);
62093
62094diff -urNp linux-3.1.1/kernel/acct.c linux-3.1.1/kernel/acct.c
62095--- linux-3.1.1/kernel/acct.c 2011-11-11 15:19:27.000000000 -0500
62096+++ linux-3.1.1/kernel/acct.c 2011-11-16 18:39:08.000000000 -0500
62097@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
62098 */
62099 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62100 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62101- file->f_op->write(file, (char *)&ac,
62102+ file->f_op->write(file, (char __force_user *)&ac,
62103 sizeof(acct_t), &file->f_pos);
62104 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62105 set_fs(fs);
62106diff -urNp linux-3.1.1/kernel/audit.c linux-3.1.1/kernel/audit.c
62107--- linux-3.1.1/kernel/audit.c 2011-11-11 15:19:27.000000000 -0500
62108+++ linux-3.1.1/kernel/audit.c 2011-11-16 18:39:08.000000000 -0500
62109@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
62110 3) suppressed due to audit_rate_limit
62111 4) suppressed due to audit_backlog_limit
62112 */
62113-static atomic_t audit_lost = ATOMIC_INIT(0);
62114+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62115
62116 /* The netlink socket. */
62117 static struct sock *audit_sock;
62118@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
62119 unsigned long now;
62120 int print;
62121
62122- atomic_inc(&audit_lost);
62123+ atomic_inc_unchecked(&audit_lost);
62124
62125 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62126
62127@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
62128 printk(KERN_WARNING
62129 "audit: audit_lost=%d audit_rate_limit=%d "
62130 "audit_backlog_limit=%d\n",
62131- atomic_read(&audit_lost),
62132+ atomic_read_unchecked(&audit_lost),
62133 audit_rate_limit,
62134 audit_backlog_limit);
62135 audit_panic(message);
62136@@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_b
62137 status_set.pid = audit_pid;
62138 status_set.rate_limit = audit_rate_limit;
62139 status_set.backlog_limit = audit_backlog_limit;
62140- status_set.lost = atomic_read(&audit_lost);
62141+ status_set.lost = atomic_read_unchecked(&audit_lost);
62142 status_set.backlog = skb_queue_len(&audit_skb_queue);
62143 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62144 &status_set, sizeof(status_set));
62145diff -urNp linux-3.1.1/kernel/auditsc.c linux-3.1.1/kernel/auditsc.c
62146--- linux-3.1.1/kernel/auditsc.c 2011-11-11 15:19:27.000000000 -0500
62147+++ linux-3.1.1/kernel/auditsc.c 2011-11-16 18:39:08.000000000 -0500
62148@@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_conte
62149 }
62150
62151 /* global counter which is incremented every time something logs in */
62152-static atomic_t session_id = ATOMIC_INIT(0);
62153+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62154
62155 /**
62156 * audit_set_loginuid - set a task's audit_context loginuid
62157@@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT
62158 */
62159 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62160 {
62161- unsigned int sessionid = atomic_inc_return(&session_id);
62162+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62163 struct audit_context *context = task->audit_context;
62164
62165 if (context && context->in_syscall) {
62166diff -urNp linux-3.1.1/kernel/capability.c linux-3.1.1/kernel/capability.c
62167--- linux-3.1.1/kernel/capability.c 2011-11-11 15:19:27.000000000 -0500
62168+++ linux-3.1.1/kernel/capability.c 2011-11-16 18:40:44.000000000 -0500
62169@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
62170 * before modification is attempted and the application
62171 * fails.
62172 */
62173+ if (tocopy > ARRAY_SIZE(kdata))
62174+ return -EFAULT;
62175+
62176 if (copy_to_user(dataptr, kdata, tocopy
62177 * sizeof(struct __user_cap_data_struct))) {
62178 return -EFAULT;
62179@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *n
62180 BUG();
62181 }
62182
62183- if (security_capable(ns, current_cred(), cap) == 0) {
62184+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
62185 current->flags |= PF_SUPERPRIV;
62186 return true;
62187 }
62188@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *n
62189 }
62190 EXPORT_SYMBOL(ns_capable);
62191
62192+bool ns_capable_nolog(struct user_namespace *ns, int cap)
62193+{
62194+ if (unlikely(!cap_valid(cap))) {
62195+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62196+ BUG();
62197+ }
62198+
62199+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
62200+ current->flags |= PF_SUPERPRIV;
62201+ return true;
62202+ }
62203+ return false;
62204+}
62205+EXPORT_SYMBOL(ns_capable_nolog);
62206+
62207+bool capable_nolog(int cap)
62208+{
62209+ return ns_capable_nolog(&init_user_ns, cap);
62210+}
62211+EXPORT_SYMBOL(capable_nolog);
62212+
62213 /**
62214 * task_ns_capable - Determine whether current task has a superior
62215 * capability targeted at a specific task's user namespace.
62216@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct
62217 }
62218 EXPORT_SYMBOL(task_ns_capable);
62219
62220+bool task_ns_capable_nolog(struct task_struct *t, int cap)
62221+{
62222+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
62223+}
62224+EXPORT_SYMBOL(task_ns_capable_nolog);
62225+
62226 /**
62227 * nsown_capable - Check superior capability to one's own user_ns
62228 * @cap: The capability in question
62229diff -urNp linux-3.1.1/kernel/cgroup.c linux-3.1.1/kernel/cgroup.c
62230--- linux-3.1.1/kernel/cgroup.c 2011-11-11 15:19:27.000000000 -0500
62231+++ linux-3.1.1/kernel/cgroup.c 2011-11-16 18:40:44.000000000 -0500
62232@@ -595,6 +595,8 @@ static struct css_set *find_css_set(
62233 struct hlist_head *hhead;
62234 struct cg_cgroup_link *link;
62235
62236+ pax_track_stack();
62237+
62238 /* First see if we already have a cgroup group that matches
62239 * the desired set */
62240 read_lock(&css_set_lock);
62241diff -urNp linux-3.1.1/kernel/compat.c linux-3.1.1/kernel/compat.c
62242--- linux-3.1.1/kernel/compat.c 2011-11-11 15:19:27.000000000 -0500
62243+++ linux-3.1.1/kernel/compat.c 2011-11-16 18:40:44.000000000 -0500
62244@@ -13,6 +13,7 @@
62245
62246 #include <linux/linkage.h>
62247 #include <linux/compat.h>
62248+#include <linux/module.h>
62249 #include <linux/errno.h>
62250 #include <linux/time.h>
62251 #include <linux/signal.h>
62252@@ -167,7 +168,7 @@ static long compat_nanosleep_restart(str
62253 mm_segment_t oldfs;
62254 long ret;
62255
62256- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
62257+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
62258 oldfs = get_fs();
62259 set_fs(KERNEL_DS);
62260 ret = hrtimer_nanosleep_restart(restart);
62261@@ -199,7 +200,7 @@ asmlinkage long compat_sys_nanosleep(str
62262 oldfs = get_fs();
62263 set_fs(KERNEL_DS);
62264 ret = hrtimer_nanosleep(&tu,
62265- rmtp ? (struct timespec __user *)&rmt : NULL,
62266+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
62267 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
62268 set_fs(oldfs);
62269
62270@@ -308,7 +309,7 @@ asmlinkage long compat_sys_sigpending(co
62271 mm_segment_t old_fs = get_fs();
62272
62273 set_fs(KERNEL_DS);
62274- ret = sys_sigpending((old_sigset_t __user *) &s);
62275+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
62276 set_fs(old_fs);
62277 if (ret == 0)
62278 ret = put_user(s, set);
62279@@ -331,8 +332,8 @@ asmlinkage long compat_sys_sigprocmask(i
62280 old_fs = get_fs();
62281 set_fs(KERNEL_DS);
62282 ret = sys_sigprocmask(how,
62283- set ? (old_sigset_t __user *) &s : NULL,
62284- oset ? (old_sigset_t __user *) &s : NULL);
62285+ set ? (old_sigset_t __force_user *) &s : NULL,
62286+ oset ? (old_sigset_t __force_user *) &s : NULL);
62287 set_fs(old_fs);
62288 if (ret == 0)
62289 if (oset)
62290@@ -369,7 +370,7 @@ asmlinkage long compat_sys_old_getrlimit
62291 mm_segment_t old_fs = get_fs();
62292
62293 set_fs(KERNEL_DS);
62294- ret = sys_old_getrlimit(resource, &r);
62295+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
62296 set_fs(old_fs);
62297
62298 if (!ret) {
62299@@ -441,7 +442,7 @@ asmlinkage long compat_sys_getrusage(int
62300 mm_segment_t old_fs = get_fs();
62301
62302 set_fs(KERNEL_DS);
62303- ret = sys_getrusage(who, (struct rusage __user *) &r);
62304+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
62305 set_fs(old_fs);
62306
62307 if (ret)
62308@@ -468,8 +469,8 @@ compat_sys_wait4(compat_pid_t pid, compa
62309 set_fs (KERNEL_DS);
62310 ret = sys_wait4(pid,
62311 (stat_addr ?
62312- (unsigned int __user *) &status : NULL),
62313- options, (struct rusage __user *) &r);
62314+ (unsigned int __force_user *) &status : NULL),
62315+ options, (struct rusage __force_user *) &r);
62316 set_fs (old_fs);
62317
62318 if (ret > 0) {
62319@@ -494,8 +495,8 @@ asmlinkage long compat_sys_waitid(int wh
62320 memset(&info, 0, sizeof(info));
62321
62322 set_fs(KERNEL_DS);
62323- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
62324- uru ? (struct rusage __user *)&ru : NULL);
62325+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
62326+ uru ? (struct rusage __force_user *)&ru : NULL);
62327 set_fs(old_fs);
62328
62329 if ((ret < 0) || (info.si_signo == 0))
62330@@ -625,8 +626,8 @@ long compat_sys_timer_settime(timer_t ti
62331 oldfs = get_fs();
62332 set_fs(KERNEL_DS);
62333 err = sys_timer_settime(timer_id, flags,
62334- (struct itimerspec __user *) &newts,
62335- (struct itimerspec __user *) &oldts);
62336+ (struct itimerspec __force_user *) &newts,
62337+ (struct itimerspec __force_user *) &oldts);
62338 set_fs(oldfs);
62339 if (!err && old && put_compat_itimerspec(old, &oldts))
62340 return -EFAULT;
62341@@ -643,7 +644,7 @@ long compat_sys_timer_gettime(timer_t ti
62342 oldfs = get_fs();
62343 set_fs(KERNEL_DS);
62344 err = sys_timer_gettime(timer_id,
62345- (struct itimerspec __user *) &ts);
62346+ (struct itimerspec __force_user *) &ts);
62347 set_fs(oldfs);
62348 if (!err && put_compat_itimerspec(setting, &ts))
62349 return -EFAULT;
62350@@ -662,7 +663,7 @@ long compat_sys_clock_settime(clockid_t
62351 oldfs = get_fs();
62352 set_fs(KERNEL_DS);
62353 err = sys_clock_settime(which_clock,
62354- (struct timespec __user *) &ts);
62355+ (struct timespec __force_user *) &ts);
62356 set_fs(oldfs);
62357 return err;
62358 }
62359@@ -677,7 +678,7 @@ long compat_sys_clock_gettime(clockid_t
62360 oldfs = get_fs();
62361 set_fs(KERNEL_DS);
62362 err = sys_clock_gettime(which_clock,
62363- (struct timespec __user *) &ts);
62364+ (struct timespec __force_user *) &ts);
62365 set_fs(oldfs);
62366 if (!err && put_compat_timespec(&ts, tp))
62367 return -EFAULT;
62368@@ -697,7 +698,7 @@ long compat_sys_clock_adjtime(clockid_t
62369
62370 oldfs = get_fs();
62371 set_fs(KERNEL_DS);
62372- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
62373+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
62374 set_fs(oldfs);
62375
62376 err = compat_put_timex(utp, &txc);
62377@@ -717,7 +718,7 @@ long compat_sys_clock_getres(clockid_t w
62378 oldfs = get_fs();
62379 set_fs(KERNEL_DS);
62380 err = sys_clock_getres(which_clock,
62381- (struct timespec __user *) &ts);
62382+ (struct timespec __force_user *) &ts);
62383 set_fs(oldfs);
62384 if (!err && tp && put_compat_timespec(&ts, tp))
62385 return -EFAULT;
62386@@ -729,9 +730,9 @@ static long compat_clock_nanosleep_resta
62387 long err;
62388 mm_segment_t oldfs;
62389 struct timespec tu;
62390- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
62391+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
62392
62393- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
62394+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
62395 oldfs = get_fs();
62396 set_fs(KERNEL_DS);
62397 err = clock_nanosleep_restart(restart);
62398@@ -763,8 +764,8 @@ long compat_sys_clock_nanosleep(clockid_
62399 oldfs = get_fs();
62400 set_fs(KERNEL_DS);
62401 err = sys_clock_nanosleep(which_clock, flags,
62402- (struct timespec __user *) &in,
62403- (struct timespec __user *) &out);
62404+ (struct timespec __force_user *) &in,
62405+ (struct timespec __force_user *) &out);
62406 set_fs(oldfs);
62407
62408 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
62409diff -urNp linux-3.1.1/kernel/configs.c linux-3.1.1/kernel/configs.c
62410--- linux-3.1.1/kernel/configs.c 2011-11-11 15:19:27.000000000 -0500
62411+++ linux-3.1.1/kernel/configs.c 2011-11-16 18:40:44.000000000 -0500
62412@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
62413 struct proc_dir_entry *entry;
62414
62415 /* create the current config file */
62416+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62417+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62418+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62419+ &ikconfig_file_ops);
62420+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62421+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62422+ &ikconfig_file_ops);
62423+#endif
62424+#else
62425 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62426 &ikconfig_file_ops);
62427+#endif
62428+
62429 if (!entry)
62430 return -ENOMEM;
62431
62432diff -urNp linux-3.1.1/kernel/cred.c linux-3.1.1/kernel/cred.c
62433--- linux-3.1.1/kernel/cred.c 2011-11-11 15:19:27.000000000 -0500
62434+++ linux-3.1.1/kernel/cred.c 2011-11-16 18:40:44.000000000 -0500
62435@@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
62436 */
62437 void __put_cred(struct cred *cred)
62438 {
62439+ pax_track_stack();
62440+
62441 kdebug("__put_cred(%p{%d,%d})", cred,
62442 atomic_read(&cred->usage),
62443 read_cred_subscribers(cred));
62444@@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
62445 {
62446 struct cred *cred;
62447
62448+ pax_track_stack();
62449+
62450 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
62451 atomic_read(&tsk->cred->usage),
62452 read_cred_subscribers(tsk->cred));
62453@@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
62454 {
62455 const struct cred *cred;
62456
62457+ pax_track_stack();
62458+
62459 rcu_read_lock();
62460
62461 do {
62462@@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
62463 {
62464 struct cred *new;
62465
62466+ pax_track_stack();
62467+
62468 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
62469 if (!new)
62470 return NULL;
62471@@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
62472 const struct cred *old;
62473 struct cred *new;
62474
62475+ pax_track_stack();
62476+
62477 validate_process_creds();
62478
62479 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62480@@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
62481 struct thread_group_cred *tgcred = NULL;
62482 struct cred *new;
62483
62484+ pax_track_stack();
62485+
62486 #ifdef CONFIG_KEYS
62487 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
62488 if (!tgcred)
62489@@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
62490 struct cred *new;
62491 int ret;
62492
62493+ pax_track_stack();
62494+
62495 if (
62496 #ifdef CONFIG_KEYS
62497 !p->cred->thread_keyring &&
62498@@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
62499 struct task_struct *task = current;
62500 const struct cred *old = task->real_cred;
62501
62502+ pax_track_stack();
62503+
62504 kdebug("commit_creds(%p{%d,%d})", new,
62505 atomic_read(&new->usage),
62506 read_cred_subscribers(new));
62507@@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
62508
62509 get_cred(new); /* we will require a ref for the subj creds too */
62510
62511+ gr_set_role_label(task, new->uid, new->gid);
62512+
62513 /* dumpability changes */
62514 if (old->euid != new->euid ||
62515 old->egid != new->egid ||
62516@@ -549,6 +567,8 @@ EXPORT_SYMBOL(commit_creds);
62517 */
62518 void abort_creds(struct cred *new)
62519 {
62520+ pax_track_stack();
62521+
62522 kdebug("abort_creds(%p{%d,%d})", new,
62523 atomic_read(&new->usage),
62524 read_cred_subscribers(new));
62525@@ -572,6 +592,8 @@ const struct cred *override_creds(const
62526 {
62527 const struct cred *old = current->cred;
62528
62529+ pax_track_stack();
62530+
62531 kdebug("override_creds(%p{%d,%d})", new,
62532 atomic_read(&new->usage),
62533 read_cred_subscribers(new));
62534@@ -601,6 +623,8 @@ void revert_creds(const struct cred *old
62535 {
62536 const struct cred *override = current->cred;
62537
62538+ pax_track_stack();
62539+
62540 kdebug("revert_creds(%p{%d,%d})", old,
62541 atomic_read(&old->usage),
62542 read_cred_subscribers(old));
62543@@ -647,6 +671,8 @@ struct cred *prepare_kernel_cred(struct
62544 const struct cred *old;
62545 struct cred *new;
62546
62547+ pax_track_stack();
62548+
62549 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62550 if (!new)
62551 return NULL;
62552@@ -701,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
62553 */
62554 int set_security_override(struct cred *new, u32 secid)
62555 {
62556+ pax_track_stack();
62557+
62558 return security_kernel_act_as(new, secid);
62559 }
62560 EXPORT_SYMBOL(set_security_override);
62561@@ -720,6 +748,8 @@ int set_security_override_from_ctx(struc
62562 u32 secid;
62563 int ret;
62564
62565+ pax_track_stack();
62566+
62567 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
62568 if (ret < 0)
62569 return ret;
62570diff -urNp linux-3.1.1/kernel/debug/debug_core.c linux-3.1.1/kernel/debug/debug_core.c
62571--- linux-3.1.1/kernel/debug/debug_core.c 2011-11-11 15:19:27.000000000 -0500
62572+++ linux-3.1.1/kernel/debug/debug_core.c 2011-11-16 18:39:08.000000000 -0500
62573@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
62574 */
62575 static atomic_t masters_in_kgdb;
62576 static atomic_t slaves_in_kgdb;
62577-static atomic_t kgdb_break_tasklet_var;
62578+static atomic_unchecked_t kgdb_break_tasklet_var;
62579 atomic_t kgdb_setting_breakpoint;
62580
62581 struct task_struct *kgdb_usethread;
62582@@ -129,7 +129,7 @@ int kgdb_single_step;
62583 static pid_t kgdb_sstep_pid;
62584
62585 /* to keep track of the CPU which is doing the single stepping*/
62586-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62587+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62588
62589 /*
62590 * If you are debugging a problem where roundup (the collection of
62591@@ -542,7 +542,7 @@ return_normal:
62592 * kernel will only try for the value of sstep_tries before
62593 * giving up and continuing on.
62594 */
62595- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
62596+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
62597 (kgdb_info[cpu].task &&
62598 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
62599 atomic_set(&kgdb_active, -1);
62600@@ -636,8 +636,8 @@ cpu_master_loop:
62601 }
62602
62603 kgdb_restore:
62604- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
62605- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
62606+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
62607+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
62608 if (kgdb_info[sstep_cpu].task)
62609 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
62610 else
62611@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
62612 static void kgdb_tasklet_bpt(unsigned long ing)
62613 {
62614 kgdb_breakpoint();
62615- atomic_set(&kgdb_break_tasklet_var, 0);
62616+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
62617 }
62618
62619 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
62620
62621 void kgdb_schedule_breakpoint(void)
62622 {
62623- if (atomic_read(&kgdb_break_tasklet_var) ||
62624+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
62625 atomic_read(&kgdb_active) != -1 ||
62626 atomic_read(&kgdb_setting_breakpoint))
62627 return;
62628- atomic_inc(&kgdb_break_tasklet_var);
62629+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
62630 tasklet_schedule(&kgdb_tasklet_breakpoint);
62631 }
62632 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
62633diff -urNp linux-3.1.1/kernel/debug/kdb/kdb_main.c linux-3.1.1/kernel/debug/kdb/kdb_main.c
62634--- linux-3.1.1/kernel/debug/kdb/kdb_main.c 2011-11-11 15:19:27.000000000 -0500
62635+++ linux-3.1.1/kernel/debug/kdb/kdb_main.c 2011-11-16 18:39:08.000000000 -0500
62636@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
62637 list_for_each_entry(mod, kdb_modules, list) {
62638
62639 kdb_printf("%-20s%8u 0x%p ", mod->name,
62640- mod->core_size, (void *)mod);
62641+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
62642 #ifdef CONFIG_MODULE_UNLOAD
62643 kdb_printf("%4d ", module_refcount(mod));
62644 #endif
62645@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
62646 kdb_printf(" (Loading)");
62647 else
62648 kdb_printf(" (Live)");
62649- kdb_printf(" 0x%p", mod->module_core);
62650+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
62651
62652 #ifdef CONFIG_MODULE_UNLOAD
62653 {
62654diff -urNp linux-3.1.1/kernel/events/core.c linux-3.1.1/kernel/events/core.c
62655--- linux-3.1.1/kernel/events/core.c 2011-11-11 15:19:27.000000000 -0500
62656+++ linux-3.1.1/kernel/events/core.c 2011-11-16 18:39:08.000000000 -0500
62657@@ -172,7 +172,7 @@ int perf_proc_update_handler(struct ctl_
62658 return 0;
62659 }
62660
62661-static atomic64_t perf_event_id;
62662+static atomic64_unchecked_t perf_event_id;
62663
62664 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
62665 enum event_type_t event_type);
62666@@ -2535,7 +2535,7 @@ static void __perf_event_read(void *info
62667
62668 static inline u64 perf_event_count(struct perf_event *event)
62669 {
62670- return local64_read(&event->count) + atomic64_read(&event->child_count);
62671+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
62672 }
62673
62674 static u64 perf_event_read(struct perf_event *event)
62675@@ -3060,9 +3060,9 @@ u64 perf_event_read_value(struct perf_ev
62676 mutex_lock(&event->child_mutex);
62677 total += perf_event_read(event);
62678 *enabled += event->total_time_enabled +
62679- atomic64_read(&event->child_total_time_enabled);
62680+ atomic64_read_unchecked(&event->child_total_time_enabled);
62681 *running += event->total_time_running +
62682- atomic64_read(&event->child_total_time_running);
62683+ atomic64_read_unchecked(&event->child_total_time_running);
62684
62685 list_for_each_entry(child, &event->child_list, child_list) {
62686 total += perf_event_read(child);
62687@@ -3448,10 +3448,10 @@ void perf_event_update_userpage(struct p
62688 userpg->offset -= local64_read(&event->hw.prev_count);
62689
62690 userpg->time_enabled = enabled +
62691- atomic64_read(&event->child_total_time_enabled);
62692+ atomic64_read_unchecked(&event->child_total_time_enabled);
62693
62694 userpg->time_running = running +
62695- atomic64_read(&event->child_total_time_running);
62696+ atomic64_read_unchecked(&event->child_total_time_running);
62697
62698 barrier();
62699 ++userpg->lock;
62700@@ -3822,11 +3822,11 @@ static void perf_output_read_one(struct
62701 values[n++] = perf_event_count(event);
62702 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
62703 values[n++] = enabled +
62704- atomic64_read(&event->child_total_time_enabled);
62705+ atomic64_read_unchecked(&event->child_total_time_enabled);
62706 }
62707 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
62708 values[n++] = running +
62709- atomic64_read(&event->child_total_time_running);
62710+ atomic64_read_unchecked(&event->child_total_time_running);
62711 }
62712 if (read_format & PERF_FORMAT_ID)
62713 values[n++] = primary_event_id(event);
62714@@ -4477,12 +4477,12 @@ static void perf_event_mmap_event(struct
62715 * need to add enough zero bytes after the string to handle
62716 * the 64bit alignment we do later.
62717 */
62718- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
62719+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
62720 if (!buf) {
62721 name = strncpy(tmp, "//enomem", sizeof(tmp));
62722 goto got_name;
62723 }
62724- name = d_path(&file->f_path, buf, PATH_MAX);
62725+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
62726 if (IS_ERR(name)) {
62727 name = strncpy(tmp, "//toolong", sizeof(tmp));
62728 goto got_name;
62729@@ -5833,7 +5833,7 @@ perf_event_alloc(struct perf_event_attr
62730 event->parent = parent_event;
62731
62732 event->ns = get_pid_ns(current->nsproxy->pid_ns);
62733- event->id = atomic64_inc_return(&perf_event_id);
62734+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
62735
62736 event->state = PERF_EVENT_STATE_INACTIVE;
62737
62738@@ -6355,10 +6355,10 @@ static void sync_child_event(struct perf
62739 /*
62740 * Add back the child's count to the parent's count:
62741 */
62742- atomic64_add(child_val, &parent_event->child_count);
62743- atomic64_add(child_event->total_time_enabled,
62744+ atomic64_add_unchecked(child_val, &parent_event->child_count);
62745+ atomic64_add_unchecked(child_event->total_time_enabled,
62746 &parent_event->child_total_time_enabled);
62747- atomic64_add(child_event->total_time_running,
62748+ atomic64_add_unchecked(child_event->total_time_running,
62749 &parent_event->child_total_time_running);
62750
62751 /*
62752diff -urNp linux-3.1.1/kernel/exit.c linux-3.1.1/kernel/exit.c
62753--- linux-3.1.1/kernel/exit.c 2011-11-11 15:19:27.000000000 -0500
62754+++ linux-3.1.1/kernel/exit.c 2011-11-16 19:33:48.000000000 -0500
62755@@ -57,6 +57,10 @@
62756 #include <asm/pgtable.h>
62757 #include <asm/mmu_context.h>
62758
62759+#ifdef CONFIG_GRKERNSEC
62760+extern rwlock_t grsec_exec_file_lock;
62761+#endif
62762+
62763 static void exit_mm(struct task_struct * tsk);
62764
62765 static void __unhash_process(struct task_struct *p, bool group_dead)
62766@@ -168,6 +172,10 @@ void release_task(struct task_struct * p
62767 struct task_struct *leader;
62768 int zap_leader;
62769 repeat:
62770+#ifdef CONFIG_NET
62771+ gr_del_task_from_ip_table(p);
62772+#endif
62773+
62774 /* don't need to get the RCU readlock here - the process is dead and
62775 * can't be modifying its own credentials. But shut RCU-lockdep up */
62776 rcu_read_lock();
62777@@ -324,11 +332,22 @@ static void reparent_to_kthreadd(void)
62778 {
62779 write_lock_irq(&tasklist_lock);
62780
62781+#ifdef CONFIG_GRKERNSEC
62782+ write_lock(&grsec_exec_file_lock);
62783+ if (current->exec_file) {
62784+ fput(current->exec_file);
62785+ current->exec_file = NULL;
62786+ }
62787+ write_unlock(&grsec_exec_file_lock);
62788+#endif
62789+
62790 ptrace_unlink(current);
62791 /* Reparent to init */
62792 current->real_parent = current->parent = kthreadd_task;
62793 list_move_tail(&current->sibling, &current->real_parent->children);
62794
62795+ gr_set_kernel_label(current);
62796+
62797 /* Set the exit signal to SIGCHLD so we signal init on exit */
62798 current->exit_signal = SIGCHLD;
62799
62800@@ -380,7 +399,7 @@ int allow_signal(int sig)
62801 * know it'll be handled, so that they don't get converted to
62802 * SIGKILL or just silently dropped.
62803 */
62804- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
62805+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
62806 recalc_sigpending();
62807 spin_unlock_irq(&current->sighand->siglock);
62808 return 0;
62809@@ -416,6 +435,17 @@ void daemonize(const char *name, ...)
62810 vsnprintf(current->comm, sizeof(current->comm), name, args);
62811 va_end(args);
62812
62813+#ifdef CONFIG_GRKERNSEC
62814+ write_lock(&grsec_exec_file_lock);
62815+ if (current->exec_file) {
62816+ fput(current->exec_file);
62817+ current->exec_file = NULL;
62818+ }
62819+ write_unlock(&grsec_exec_file_lock);
62820+#endif
62821+
62822+ gr_set_kernel_label(current);
62823+
62824 /*
62825 * If we were started as result of loading a module, close all of the
62826 * user space pages. We don't need them, and if we didn't close them
62827@@ -895,6 +925,8 @@ NORET_TYPE void do_exit(long code)
62828 struct task_struct *tsk = current;
62829 int group_dead;
62830
62831+ set_fs(USER_DS);
62832+
62833 profile_task_exit(tsk);
62834
62835 WARN_ON(blk_needs_flush_plug(tsk));
62836@@ -911,7 +943,6 @@ NORET_TYPE void do_exit(long code)
62837 * mm_release()->clear_child_tid() from writing to a user-controlled
62838 * kernel address.
62839 */
62840- set_fs(USER_DS);
62841
62842 ptrace_event(PTRACE_EVENT_EXIT, code);
62843
62844@@ -973,6 +1004,9 @@ NORET_TYPE void do_exit(long code)
62845 tsk->exit_code = code;
62846 taskstats_exit(tsk, group_dead);
62847
62848+ gr_acl_handle_psacct(tsk, code);
62849+ gr_acl_handle_exit();
62850+
62851 exit_mm(tsk);
62852
62853 if (group_dead)
62854diff -urNp linux-3.1.1/kernel/fork.c linux-3.1.1/kernel/fork.c
62855--- linux-3.1.1/kernel/fork.c 2011-11-11 15:19:27.000000000 -0500
62856+++ linux-3.1.1/kernel/fork.c 2011-11-16 19:36:31.000000000 -0500
62857@@ -285,7 +285,7 @@ static struct task_struct *dup_task_stru
62858 *stackend = STACK_END_MAGIC; /* for overflow detection */
62859
62860 #ifdef CONFIG_CC_STACKPROTECTOR
62861- tsk->stack_canary = get_random_int();
62862+ tsk->stack_canary = pax_get_random_long();
62863 #endif
62864
62865 /*
62866@@ -309,13 +309,77 @@ out:
62867 }
62868
62869 #ifdef CONFIG_MMU
62870+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
62871+{
62872+ struct vm_area_struct *tmp;
62873+ unsigned long charge;
62874+ struct mempolicy *pol;
62875+ struct file *file;
62876+
62877+ charge = 0;
62878+ if (mpnt->vm_flags & VM_ACCOUNT) {
62879+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
62880+ if (security_vm_enough_memory(len))
62881+ goto fail_nomem;
62882+ charge = len;
62883+ }
62884+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62885+ if (!tmp)
62886+ goto fail_nomem;
62887+ *tmp = *mpnt;
62888+ tmp->vm_mm = mm;
62889+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
62890+ pol = mpol_dup(vma_policy(mpnt));
62891+ if (IS_ERR(pol))
62892+ goto fail_nomem_policy;
62893+ vma_set_policy(tmp, pol);
62894+ if (anon_vma_fork(tmp, mpnt))
62895+ goto fail_nomem_anon_vma_fork;
62896+ tmp->vm_flags &= ~VM_LOCKED;
62897+ tmp->vm_next = tmp->vm_prev = NULL;
62898+ tmp->vm_mirror = NULL;
62899+ file = tmp->vm_file;
62900+ if (file) {
62901+ struct inode *inode = file->f_path.dentry->d_inode;
62902+ struct address_space *mapping = file->f_mapping;
62903+
62904+ get_file(file);
62905+ if (tmp->vm_flags & VM_DENYWRITE)
62906+ atomic_dec(&inode->i_writecount);
62907+ mutex_lock(&mapping->i_mmap_mutex);
62908+ if (tmp->vm_flags & VM_SHARED)
62909+ mapping->i_mmap_writable++;
62910+ flush_dcache_mmap_lock(mapping);
62911+ /* insert tmp into the share list, just after mpnt */
62912+ vma_prio_tree_add(tmp, mpnt);
62913+ flush_dcache_mmap_unlock(mapping);
62914+ mutex_unlock(&mapping->i_mmap_mutex);
62915+ }
62916+
62917+ /*
62918+ * Clear hugetlb-related page reserves for children. This only
62919+ * affects MAP_PRIVATE mappings. Faults generated by the child
62920+ * are not guaranteed to succeed, even if read-only
62921+ */
62922+ if (is_vm_hugetlb_page(tmp))
62923+ reset_vma_resv_huge_pages(tmp);
62924+
62925+ return tmp;
62926+
62927+fail_nomem_anon_vma_fork:
62928+ mpol_put(pol);
62929+fail_nomem_policy:
62930+ kmem_cache_free(vm_area_cachep, tmp);
62931+fail_nomem:
62932+ vm_unacct_memory(charge);
62933+ return NULL;
62934+}
62935+
62936 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
62937 {
62938 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
62939 struct rb_node **rb_link, *rb_parent;
62940 int retval;
62941- unsigned long charge;
62942- struct mempolicy *pol;
62943
62944 down_write(&oldmm->mmap_sem);
62945 flush_cache_dup_mm(oldmm);
62946@@ -327,8 +391,8 @@ static int dup_mmap(struct mm_struct *mm
62947 mm->locked_vm = 0;
62948 mm->mmap = NULL;
62949 mm->mmap_cache = NULL;
62950- mm->free_area_cache = oldmm->mmap_base;
62951- mm->cached_hole_size = ~0UL;
62952+ mm->free_area_cache = oldmm->free_area_cache;
62953+ mm->cached_hole_size = oldmm->cached_hole_size;
62954 mm->map_count = 0;
62955 cpumask_clear(mm_cpumask(mm));
62956 mm->mm_rb = RB_ROOT;
62957@@ -344,8 +408,6 @@ static int dup_mmap(struct mm_struct *mm
62958
62959 prev = NULL;
62960 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
62961- struct file *file;
62962-
62963 if (mpnt->vm_flags & VM_DONTCOPY) {
62964 long pages = vma_pages(mpnt);
62965 mm->total_vm -= pages;
62966@@ -353,55 +415,13 @@ static int dup_mmap(struct mm_struct *mm
62967 -pages);
62968 continue;
62969 }
62970- charge = 0;
62971- if (mpnt->vm_flags & VM_ACCOUNT) {
62972- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
62973- if (security_vm_enough_memory(len))
62974- goto fail_nomem;
62975- charge = len;
62976- }
62977- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62978- if (!tmp)
62979- goto fail_nomem;
62980- *tmp = *mpnt;
62981- INIT_LIST_HEAD(&tmp->anon_vma_chain);
62982- pol = mpol_dup(vma_policy(mpnt));
62983- retval = PTR_ERR(pol);
62984- if (IS_ERR(pol))
62985- goto fail_nomem_policy;
62986- vma_set_policy(tmp, pol);
62987- tmp->vm_mm = mm;
62988- if (anon_vma_fork(tmp, mpnt))
62989- goto fail_nomem_anon_vma_fork;
62990- tmp->vm_flags &= ~VM_LOCKED;
62991- tmp->vm_next = tmp->vm_prev = NULL;
62992- file = tmp->vm_file;
62993- if (file) {
62994- struct inode *inode = file->f_path.dentry->d_inode;
62995- struct address_space *mapping = file->f_mapping;
62996-
62997- get_file(file);
62998- if (tmp->vm_flags & VM_DENYWRITE)
62999- atomic_dec(&inode->i_writecount);
63000- mutex_lock(&mapping->i_mmap_mutex);
63001- if (tmp->vm_flags & VM_SHARED)
63002- mapping->i_mmap_writable++;
63003- flush_dcache_mmap_lock(mapping);
63004- /* insert tmp into the share list, just after mpnt */
63005- vma_prio_tree_add(tmp, mpnt);
63006- flush_dcache_mmap_unlock(mapping);
63007- mutex_unlock(&mapping->i_mmap_mutex);
63008+ tmp = dup_vma(mm, mpnt);
63009+ if (!tmp) {
63010+ retval = -ENOMEM;
63011+ goto out;
63012 }
63013
63014 /*
63015- * Clear hugetlb-related page reserves for children. This only
63016- * affects MAP_PRIVATE mappings. Faults generated by the child
63017- * are not guaranteed to succeed, even if read-only
63018- */
63019- if (is_vm_hugetlb_page(tmp))
63020- reset_vma_resv_huge_pages(tmp);
63021-
63022- /*
63023 * Link in the new vma and copy the page table entries.
63024 */
63025 *pprev = tmp;
63026@@ -422,6 +442,31 @@ static int dup_mmap(struct mm_struct *mm
63027 if (retval)
63028 goto out;
63029 }
63030+
63031+#ifdef CONFIG_PAX_SEGMEXEC
63032+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
63033+ struct vm_area_struct *mpnt_m;
63034+
63035+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
63036+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
63037+
63038+ if (!mpnt->vm_mirror)
63039+ continue;
63040+
63041+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
63042+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
63043+ mpnt->vm_mirror = mpnt_m;
63044+ } else {
63045+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
63046+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
63047+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
63048+ mpnt->vm_mirror->vm_mirror = mpnt;
63049+ }
63050+ }
63051+ BUG_ON(mpnt_m);
63052+ }
63053+#endif
63054+
63055 /* a new mm has just been created */
63056 arch_dup_mmap(oldmm, mm);
63057 retval = 0;
63058@@ -430,14 +475,6 @@ out:
63059 flush_tlb_mm(oldmm);
63060 up_write(&oldmm->mmap_sem);
63061 return retval;
63062-fail_nomem_anon_vma_fork:
63063- mpol_put(pol);
63064-fail_nomem_policy:
63065- kmem_cache_free(vm_area_cachep, tmp);
63066-fail_nomem:
63067- retval = -ENOMEM;
63068- vm_unacct_memory(charge);
63069- goto out;
63070 }
63071
63072 static inline int mm_alloc_pgd(struct mm_struct *mm)
63073@@ -837,13 +874,14 @@ static int copy_fs(unsigned long clone_f
63074 spin_unlock(&fs->lock);
63075 return -EAGAIN;
63076 }
63077- fs->users++;
63078+ atomic_inc(&fs->users);
63079 spin_unlock(&fs->lock);
63080 return 0;
63081 }
63082 tsk->fs = copy_fs_struct(fs);
63083 if (!tsk->fs)
63084 return -ENOMEM;
63085+ gr_set_chroot_entries(tsk, &tsk->fs->root);
63086 return 0;
63087 }
63088
63089@@ -1105,6 +1143,9 @@ static struct task_struct *copy_process(
63090 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
63091 #endif
63092 retval = -EAGAIN;
63093+
63094+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
63095+
63096 if (atomic_read(&p->real_cred->user->processes) >=
63097 task_rlimit(p, RLIMIT_NPROC)) {
63098 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
63099@@ -1264,6 +1305,8 @@ static struct task_struct *copy_process(
63100 if (clone_flags & CLONE_THREAD)
63101 p->tgid = current->tgid;
63102
63103+ gr_copy_label(p);
63104+
63105 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
63106 /*
63107 * Clear TID on mm_release()?
63108@@ -1428,6 +1471,8 @@ bad_fork_cleanup_count:
63109 bad_fork_free:
63110 free_task(p);
63111 fork_out:
63112+ gr_log_forkfail(retval);
63113+
63114 return ERR_PTR(retval);
63115 }
63116
63117@@ -1528,6 +1573,8 @@ long do_fork(unsigned long clone_flags,
63118 if (clone_flags & CLONE_PARENT_SETTID)
63119 put_user(nr, parent_tidptr);
63120
63121+ gr_handle_brute_check();
63122+
63123 if (clone_flags & CLONE_VFORK) {
63124 p->vfork_done = &vfork;
63125 init_completion(&vfork);
63126@@ -1637,7 +1684,7 @@ static int unshare_fs(unsigned long unsh
63127 return 0;
63128
63129 /* don't need lock here; in the worst case we'll do useless copy */
63130- if (fs->users == 1)
63131+ if (atomic_read(&fs->users) == 1)
63132 return 0;
63133
63134 *new_fsp = copy_fs_struct(fs);
63135@@ -1726,7 +1773,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
63136 fs = current->fs;
63137 spin_lock(&fs->lock);
63138 current->fs = new_fs;
63139- if (--fs->users)
63140+ gr_set_chroot_entries(current, &current->fs->root);
63141+ if (atomic_dec_return(&fs->users))
63142 new_fs = NULL;
63143 else
63144 new_fs = fs;
63145diff -urNp linux-3.1.1/kernel/futex.c linux-3.1.1/kernel/futex.c
63146--- linux-3.1.1/kernel/futex.c 2011-11-11 15:19:27.000000000 -0500
63147+++ linux-3.1.1/kernel/futex.c 2011-11-16 18:40:44.000000000 -0500
63148@@ -54,6 +54,7 @@
63149 #include <linux/mount.h>
63150 #include <linux/pagemap.h>
63151 #include <linux/syscalls.h>
63152+#include <linux/ptrace.h>
63153 #include <linux/signal.h>
63154 #include <linux/module.h>
63155 #include <linux/magic.h>
63156@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fsh
63157 struct page *page, *page_head;
63158 int err, ro = 0;
63159
63160+#ifdef CONFIG_PAX_SEGMEXEC
63161+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
63162+ return -EFAULT;
63163+#endif
63164+
63165 /*
63166 * The futex address must be "naturally" aligned.
63167 */
63168@@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr,
63169 struct futex_q q = futex_q_init;
63170 int ret;
63171
63172+ pax_track_stack();
63173+
63174 if (!bitset)
63175 return -EINVAL;
63176 q.bitset = bitset;
63177@@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __u
63178 struct futex_q q = futex_q_init;
63179 int res, ret;
63180
63181+ pax_track_stack();
63182+
63183 if (!bitset)
63184 return -EINVAL;
63185
63186@@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63187 {
63188 struct robust_list_head __user *head;
63189 unsigned long ret;
63190+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
63191 const struct cred *cred = current_cred(), *pcred;
63192+#endif
63193
63194 if (!futex_cmpxchg_enabled)
63195 return -ENOSYS;
63196@@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63197 if (!p)
63198 goto err_unlock;
63199 ret = -EPERM;
63200+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63201+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
63202+ goto err_unlock;
63203+#else
63204 pcred = __task_cred(p);
63205 /* If victim is in different user_ns, then uids are not
63206 comparable, so we must have CAP_SYS_PTRACE */
63207@@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63208 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
63209 goto err_unlock;
63210 ok:
63211+#endif
63212 head = p->robust_list;
63213 rcu_read_unlock();
63214 }
63215@@ -2712,6 +2729,7 @@ static int __init futex_init(void)
63216 {
63217 u32 curval;
63218 int i;
63219+ mm_segment_t oldfs;
63220
63221 /*
63222 * This will fail and we want it. Some arch implementations do
63223@@ -2723,8 +2741,11 @@ static int __init futex_init(void)
63224 * implementation, the non-functional ones will return
63225 * -ENOSYS.
63226 */
63227+ oldfs = get_fs();
63228+ set_fs(USER_DS);
63229 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
63230 futex_cmpxchg_enabled = 1;
63231+ set_fs(oldfs);
63232
63233 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
63234 plist_head_init(&futex_queues[i].chain);
63235diff -urNp linux-3.1.1/kernel/futex_compat.c linux-3.1.1/kernel/futex_compat.c
63236--- linux-3.1.1/kernel/futex_compat.c 2011-11-11 15:19:27.000000000 -0500
63237+++ linux-3.1.1/kernel/futex_compat.c 2011-11-16 18:40:44.000000000 -0500
63238@@ -10,6 +10,7 @@
63239 #include <linux/compat.h>
63240 #include <linux/nsproxy.h>
63241 #include <linux/futex.h>
63242+#include <linux/ptrace.h>
63243
63244 #include <asm/uaccess.h>
63245
63246@@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
63247 {
63248 struct compat_robust_list_head __user *head;
63249 unsigned long ret;
63250- const struct cred *cred = current_cred(), *pcred;
63251+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
63252+ const struct cred *cred = current_cred();
63253+ const struct cred *pcred;
63254+#endif
63255
63256 if (!futex_cmpxchg_enabled)
63257 return -ENOSYS;
63258@@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
63259 if (!p)
63260 goto err_unlock;
63261 ret = -EPERM;
63262+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63263+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
63264+ goto err_unlock;
63265+#else
63266 pcred = __task_cred(p);
63267 /* If victim is in different user_ns, then uids are not
63268 comparable, so we must have CAP_SYS_PTRACE */
63269@@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
63270 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
63271 goto err_unlock;
63272 ok:
63273+#endif
63274 head = p->compat_robust_list;
63275 rcu_read_unlock();
63276 }
63277diff -urNp linux-3.1.1/kernel/gcov/base.c linux-3.1.1/kernel/gcov/base.c
63278--- linux-3.1.1/kernel/gcov/base.c 2011-11-11 15:19:27.000000000 -0500
63279+++ linux-3.1.1/kernel/gcov/base.c 2011-11-16 18:39:08.000000000 -0500
63280@@ -102,11 +102,6 @@ void gcov_enable_events(void)
63281 }
63282
63283 #ifdef CONFIG_MODULES
63284-static inline int within(void *addr, void *start, unsigned long size)
63285-{
63286- return ((addr >= start) && (addr < start + size));
63287-}
63288-
63289 /* Update list and generate events when modules are unloaded. */
63290 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63291 void *data)
63292@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
63293 prev = NULL;
63294 /* Remove entries located in module from linked list. */
63295 for (info = gcov_info_head; info; info = info->next) {
63296- if (within(info, mod->module_core, mod->core_size)) {
63297+ if (within_module_core_rw((unsigned long)info, mod)) {
63298 if (prev)
63299 prev->next = info->next;
63300 else
63301diff -urNp linux-3.1.1/kernel/hrtimer.c linux-3.1.1/kernel/hrtimer.c
63302--- linux-3.1.1/kernel/hrtimer.c 2011-11-11 15:19:27.000000000 -0500
63303+++ linux-3.1.1/kernel/hrtimer.c 2011-11-16 18:39:08.000000000 -0500
63304@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
63305 local_irq_restore(flags);
63306 }
63307
63308-static void run_hrtimer_softirq(struct softirq_action *h)
63309+static void run_hrtimer_softirq(void)
63310 {
63311 hrtimer_peek_ahead_timers();
63312 }
63313diff -urNp linux-3.1.1/kernel/jump_label.c linux-3.1.1/kernel/jump_label.c
63314--- linux-3.1.1/kernel/jump_label.c 2011-11-11 15:19:27.000000000 -0500
63315+++ linux-3.1.1/kernel/jump_label.c 2011-11-16 18:39:08.000000000 -0500
63316@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entr
63317
63318 size = (((unsigned long)stop - (unsigned long)start)
63319 / sizeof(struct jump_entry));
63320+ pax_open_kernel();
63321 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
63322+ pax_close_kernel();
63323 }
63324
63325 static void jump_label_update(struct jump_label_key *key, int enable);
63326@@ -297,10 +299,12 @@ static void jump_label_invalidate_module
63327 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
63328 struct jump_entry *iter;
63329
63330+ pax_open_kernel();
63331 for (iter = iter_start; iter < iter_stop; iter++) {
63332 if (within_module_init(iter->code, mod))
63333 iter->code = 0;
63334 }
63335+ pax_close_kernel();
63336 }
63337
63338 static int
63339diff -urNp linux-3.1.1/kernel/kallsyms.c linux-3.1.1/kernel/kallsyms.c
63340--- linux-3.1.1/kernel/kallsyms.c 2011-11-11 15:19:27.000000000 -0500
63341+++ linux-3.1.1/kernel/kallsyms.c 2011-11-16 18:40:44.000000000 -0500
63342@@ -11,6 +11,9 @@
63343 * Changed the compression method from stem compression to "table lookup"
63344 * compression (see scripts/kallsyms.c for a more complete description)
63345 */
63346+#ifdef CONFIG_GRKERNSEC_HIDESYM
63347+#define __INCLUDED_BY_HIDESYM 1
63348+#endif
63349 #include <linux/kallsyms.h>
63350 #include <linux/module.h>
63351 #include <linux/init.h>
63352@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
63353
63354 static inline int is_kernel_inittext(unsigned long addr)
63355 {
63356+ if (system_state != SYSTEM_BOOTING)
63357+ return 0;
63358+
63359 if (addr >= (unsigned long)_sinittext
63360 && addr <= (unsigned long)_einittext)
63361 return 1;
63362 return 0;
63363 }
63364
63365+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63366+#ifdef CONFIG_MODULES
63367+static inline int is_module_text(unsigned long addr)
63368+{
63369+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
63370+ return 1;
63371+
63372+ addr = ktla_ktva(addr);
63373+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
63374+}
63375+#else
63376+static inline int is_module_text(unsigned long addr)
63377+{
63378+ return 0;
63379+}
63380+#endif
63381+#endif
63382+
63383 static inline int is_kernel_text(unsigned long addr)
63384 {
63385 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63386@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
63387
63388 static inline int is_kernel(unsigned long addr)
63389 {
63390+
63391+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63392+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
63393+ return 1;
63394+
63395+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
63396+#else
63397 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
63398+#endif
63399+
63400 return 1;
63401 return in_gate_area_no_mm(addr);
63402 }
63403
63404 static int is_ksym_addr(unsigned long addr)
63405 {
63406+
63407+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63408+ if (is_module_text(addr))
63409+ return 0;
63410+#endif
63411+
63412 if (all_var)
63413 return is_kernel(addr);
63414
63415@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
63416
63417 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
63418 {
63419- iter->name[0] = '\0';
63420 iter->nameoff = get_symbol_offset(new_pos);
63421 iter->pos = new_pos;
63422 }
63423@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
63424 {
63425 struct kallsym_iter *iter = m->private;
63426
63427+#ifdef CONFIG_GRKERNSEC_HIDESYM
63428+ if (current_uid())
63429+ return 0;
63430+#endif
63431+
63432 /* Some debugging symbols have no name. Ignore them. */
63433 if (!iter->name[0])
63434 return 0;
63435@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
63436 struct kallsym_iter *iter;
63437 int ret;
63438
63439- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63440+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63441 if (!iter)
63442 return -ENOMEM;
63443 reset_iter(iter, 0);
63444diff -urNp linux-3.1.1/kernel/kexec.c linux-3.1.1/kernel/kexec.c
63445--- linux-3.1.1/kernel/kexec.c 2011-11-11 15:19:27.000000000 -0500
63446+++ linux-3.1.1/kernel/kexec.c 2011-11-16 18:39:08.000000000 -0500
63447@@ -1033,7 +1033,8 @@ asmlinkage long compat_sys_kexec_load(un
63448 unsigned long flags)
63449 {
63450 struct compat_kexec_segment in;
63451- struct kexec_segment out, __user *ksegments;
63452+ struct kexec_segment out;
63453+ struct kexec_segment __user *ksegments;
63454 unsigned long i, result;
63455
63456 /* Don't allow clients that don't understand the native
63457diff -urNp linux-3.1.1/kernel/kmod.c linux-3.1.1/kernel/kmod.c
63458--- linux-3.1.1/kernel/kmod.c 2011-11-11 15:19:27.000000000 -0500
63459+++ linux-3.1.1/kernel/kmod.c 2011-11-16 18:40:44.000000000 -0500
63460@@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
63461 * If module auto-loading support is disabled then this function
63462 * becomes a no-operation.
63463 */
63464-int __request_module(bool wait, const char *fmt, ...)
63465+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63466 {
63467- va_list args;
63468 char module_name[MODULE_NAME_LEN];
63469 unsigned int max_modprobes;
63470 int ret;
63471- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63472+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63473 static char *envp[] = { "HOME=/",
63474 "TERM=linux",
63475 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63476@@ -88,9 +87,7 @@ int __request_module(bool wait, const ch
63477 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
63478 static int kmod_loop_msg;
63479
63480- va_start(args, fmt);
63481- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63482- va_end(args);
63483+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63484 if (ret >= MODULE_NAME_LEN)
63485 return -ENAMETOOLONG;
63486
63487@@ -98,6 +95,20 @@ int __request_module(bool wait, const ch
63488 if (ret)
63489 return ret;
63490
63491+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63492+ if (!current_uid()) {
63493+ /* hack to workaround consolekit/udisks stupidity */
63494+ read_lock(&tasklist_lock);
63495+ if (!strcmp(current->comm, "mount") &&
63496+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63497+ read_unlock(&tasklist_lock);
63498+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63499+ return -EPERM;
63500+ }
63501+ read_unlock(&tasklist_lock);
63502+ }
63503+#endif
63504+
63505 /* If modprobe needs a service that is in a module, we get a recursive
63506 * loop. Limit the number of running kmod threads to max_threads/2 or
63507 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63508@@ -133,6 +144,47 @@ int __request_module(bool wait, const ch
63509 atomic_dec(&kmod_concurrent);
63510 return ret;
63511 }
63512+
63513+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63514+{
63515+ va_list args;
63516+ int ret;
63517+
63518+ va_start(args, fmt);
63519+ ret = ____request_module(wait, module_param, fmt, args);
63520+ va_end(args);
63521+
63522+ return ret;
63523+}
63524+
63525+int __request_module(bool wait, const char *fmt, ...)
63526+{
63527+ va_list args;
63528+ int ret;
63529+
63530+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63531+ if (current_uid()) {
63532+ char module_param[MODULE_NAME_LEN];
63533+
63534+ memset(module_param, 0, sizeof(module_param));
63535+
63536+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63537+
63538+ va_start(args, fmt);
63539+ ret = ____request_module(wait, module_param, fmt, args);
63540+ va_end(args);
63541+
63542+ return ret;
63543+ }
63544+#endif
63545+
63546+ va_start(args, fmt);
63547+ ret = ____request_module(wait, NULL, fmt, args);
63548+ va_end(args);
63549+
63550+ return ret;
63551+}
63552+
63553 EXPORT_SYMBOL(__request_module);
63554 #endif /* CONFIG_MODULES */
63555
63556@@ -222,7 +274,7 @@ static int wait_for_helper(void *data)
63557 *
63558 * Thus the __user pointer cast is valid here.
63559 */
63560- sys_wait4(pid, (int __user *)&ret, 0, NULL);
63561+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
63562
63563 /*
63564 * If ret is 0, either ____call_usermodehelper failed and the
63565diff -urNp linux-3.1.1/kernel/kprobes.c linux-3.1.1/kernel/kprobes.c
63566--- linux-3.1.1/kernel/kprobes.c 2011-11-11 15:19:27.000000000 -0500
63567+++ linux-3.1.1/kernel/kprobes.c 2011-11-16 18:39:08.000000000 -0500
63568@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
63569 * kernel image and loaded module images reside. This is required
63570 * so x86_64 can correctly handle the %rip-relative fixups.
63571 */
63572- kip->insns = module_alloc(PAGE_SIZE);
63573+ kip->insns = module_alloc_exec(PAGE_SIZE);
63574 if (!kip->insns) {
63575 kfree(kip);
63576 return NULL;
63577@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
63578 */
63579 if (!list_is_singular(&kip->list)) {
63580 list_del(&kip->list);
63581- module_free(NULL, kip->insns);
63582+ module_free_exec(NULL, kip->insns);
63583 kfree(kip);
63584 }
63585 return 1;
63586@@ -1949,7 +1949,7 @@ static int __init init_kprobes(void)
63587 {
63588 int i, err = 0;
63589 unsigned long offset = 0, size = 0;
63590- char *modname, namebuf[128];
63591+ char *modname, namebuf[KSYM_NAME_LEN];
63592 const char *symbol_name;
63593 void *addr;
63594 struct kprobe_blackpoint *kb;
63595@@ -2075,7 +2075,7 @@ static int __kprobes show_kprobe_addr(st
63596 const char *sym = NULL;
63597 unsigned int i = *(loff_t *) v;
63598 unsigned long offset = 0;
63599- char *modname, namebuf[128];
63600+ char *modname, namebuf[KSYM_NAME_LEN];
63601
63602 head = &kprobe_table[i];
63603 preempt_disable();
63604diff -urNp linux-3.1.1/kernel/lockdep.c linux-3.1.1/kernel/lockdep.c
63605--- linux-3.1.1/kernel/lockdep.c 2011-11-11 15:19:27.000000000 -0500
63606+++ linux-3.1.1/kernel/lockdep.c 2011-11-16 18:39:08.000000000 -0500
63607@@ -583,6 +583,10 @@ static int static_obj(void *obj)
63608 end = (unsigned long) &_end,
63609 addr = (unsigned long) obj;
63610
63611+#ifdef CONFIG_PAX_KERNEXEC
63612+ start = ktla_ktva(start);
63613+#endif
63614+
63615 /*
63616 * static variable?
63617 */
63618@@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *
63619 if (!static_obj(lock->key)) {
63620 debug_locks_off();
63621 printk("INFO: trying to register non-static key.\n");
63622+ printk("lock:%pS key:%pS.\n", lock, lock->key);
63623 printk("the code is fine but needs lockdep annotation.\n");
63624 printk("turning off the locking correctness validator.\n");
63625 dump_stack();
63626@@ -2948,7 +2953,7 @@ static int __lock_acquire(struct lockdep
63627 if (!class)
63628 return 0;
63629 }
63630- atomic_inc((atomic_t *)&class->ops);
63631+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
63632 if (very_verbose(class)) {
63633 printk("\nacquire class [%p] %s", class->key, class->name);
63634 if (class->name_version > 1)
63635diff -urNp linux-3.1.1/kernel/lockdep_proc.c linux-3.1.1/kernel/lockdep_proc.c
63636--- linux-3.1.1/kernel/lockdep_proc.c 2011-11-11 15:19:27.000000000 -0500
63637+++ linux-3.1.1/kernel/lockdep_proc.c 2011-11-16 18:39:08.000000000 -0500
63638@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
63639
63640 static void print_name(struct seq_file *m, struct lock_class *class)
63641 {
63642- char str[128];
63643+ char str[KSYM_NAME_LEN];
63644 const char *name = class->name;
63645
63646 if (!name) {
63647diff -urNp linux-3.1.1/kernel/module.c linux-3.1.1/kernel/module.c
63648--- linux-3.1.1/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
63649+++ linux-3.1.1/kernel/module.c 2011-11-16 18:40:44.000000000 -0500
63650@@ -58,6 +58,7 @@
63651 #include <linux/jump_label.h>
63652 #include <linux/pfn.h>
63653 #include <linux/bsearch.h>
63654+#include <linux/grsecurity.h>
63655
63656 #define CREATE_TRACE_POINTS
63657 #include <trace/events/module.h>
63658@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
63659
63660 /* Bounds of module allocation, for speeding __module_address.
63661 * Protected by module_mutex. */
63662-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
63663+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
63664+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
63665
63666 int register_module_notifier(struct notifier_block * nb)
63667 {
63668@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(cons
63669 return true;
63670
63671 list_for_each_entry_rcu(mod, &modules, list) {
63672- struct symsearch arr[] = {
63673+ struct symsearch modarr[] = {
63674 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
63675 NOT_GPL_ONLY, false },
63676 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
63677@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(cons
63678 #endif
63679 };
63680
63681- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
63682+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
63683 return true;
63684 }
63685 return false;
63686@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(
63687 static int percpu_modalloc(struct module *mod,
63688 unsigned long size, unsigned long align)
63689 {
63690- if (align > PAGE_SIZE) {
63691+ if (align-1 >= PAGE_SIZE) {
63692 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
63693 mod->name, align, PAGE_SIZE);
63694 align = PAGE_SIZE;
63695@@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
63696 */
63697 #ifdef CONFIG_SYSFS
63698
63699-#ifdef CONFIG_KALLSYMS
63700+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63701 static inline bool sect_empty(const Elf_Shdr *sect)
63702 {
63703 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
63704@@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base
63705
63706 static void unset_module_core_ro_nx(struct module *mod)
63707 {
63708- set_page_attributes(mod->module_core + mod->core_text_size,
63709- mod->module_core + mod->core_size,
63710+ set_page_attributes(mod->module_core_rw,
63711+ mod->module_core_rw + mod->core_size_rw,
63712 set_memory_x);
63713- set_page_attributes(mod->module_core,
63714- mod->module_core + mod->core_ro_size,
63715+ set_page_attributes(mod->module_core_rx,
63716+ mod->module_core_rx + mod->core_size_rx,
63717 set_memory_rw);
63718 }
63719
63720 static void unset_module_init_ro_nx(struct module *mod)
63721 {
63722- set_page_attributes(mod->module_init + mod->init_text_size,
63723- mod->module_init + mod->init_size,
63724+ set_page_attributes(mod->module_init_rw,
63725+ mod->module_init_rw + mod->init_size_rw,
63726 set_memory_x);
63727- set_page_attributes(mod->module_init,
63728- mod->module_init + mod->init_ro_size,
63729+ set_page_attributes(mod->module_init_rx,
63730+ mod->module_init_rx + mod->init_size_rx,
63731 set_memory_rw);
63732 }
63733
63734@@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
63735
63736 mutex_lock(&module_mutex);
63737 list_for_each_entry_rcu(mod, &modules, list) {
63738- if ((mod->module_core) && (mod->core_text_size)) {
63739- set_page_attributes(mod->module_core,
63740- mod->module_core + mod->core_text_size,
63741+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
63742+ set_page_attributes(mod->module_core_rx,
63743+ mod->module_core_rx + mod->core_size_rx,
63744 set_memory_rw);
63745 }
63746- if ((mod->module_init) && (mod->init_text_size)) {
63747- set_page_attributes(mod->module_init,
63748- mod->module_init + mod->init_text_size,
63749+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
63750+ set_page_attributes(mod->module_init_rx,
63751+ mod->module_init_rx + mod->init_size_rx,
63752 set_memory_rw);
63753 }
63754 }
63755@@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
63756
63757 mutex_lock(&module_mutex);
63758 list_for_each_entry_rcu(mod, &modules, list) {
63759- if ((mod->module_core) && (mod->core_text_size)) {
63760- set_page_attributes(mod->module_core,
63761- mod->module_core + mod->core_text_size,
63762+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
63763+ set_page_attributes(mod->module_core_rx,
63764+ mod->module_core_rx + mod->core_size_rx,
63765 set_memory_ro);
63766 }
63767- if ((mod->module_init) && (mod->init_text_size)) {
63768- set_page_attributes(mod->module_init,
63769- mod->module_init + mod->init_text_size,
63770+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
63771+ set_page_attributes(mod->module_init_rx,
63772+ mod->module_init_rx + mod->init_size_rx,
63773 set_memory_ro);
63774 }
63775 }
63776@@ -1748,16 +1750,19 @@ static void free_module(struct module *m
63777
63778 /* This may be NULL, but that's OK */
63779 unset_module_init_ro_nx(mod);
63780- module_free(mod, mod->module_init);
63781+ module_free(mod, mod->module_init_rw);
63782+ module_free_exec(mod, mod->module_init_rx);
63783 kfree(mod->args);
63784 percpu_modfree(mod);
63785
63786 /* Free lock-classes: */
63787- lockdep_free_key_range(mod->module_core, mod->core_size);
63788+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
63789+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
63790
63791 /* Finally, free the core (containing the module structure) */
63792 unset_module_core_ro_nx(mod);
63793- module_free(mod, mod->module_core);
63794+ module_free_exec(mod, mod->module_core_rx);
63795+ module_free(mod, mod->module_core_rw);
63796
63797 #ifdef CONFIG_MPU
63798 update_protections(current->mm);
63799@@ -1826,10 +1831,31 @@ static int simplify_symbols(struct modul
63800 unsigned int i;
63801 int ret = 0;
63802 const struct kernel_symbol *ksym;
63803+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63804+ int is_fs_load = 0;
63805+ int register_filesystem_found = 0;
63806+ char *p;
63807+
63808+ p = strstr(mod->args, "grsec_modharden_fs");
63809+ if (p) {
63810+ char *endptr = p + strlen("grsec_modharden_fs");
63811+ /* copy \0 as well */
63812+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
63813+ is_fs_load = 1;
63814+ }
63815+#endif
63816
63817 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
63818 const char *name = info->strtab + sym[i].st_name;
63819
63820+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63821+ /* it's a real shame this will never get ripped and copied
63822+ upstream! ;(
63823+ */
63824+ if (is_fs_load && !strcmp(name, "register_filesystem"))
63825+ register_filesystem_found = 1;
63826+#endif
63827+
63828 switch (sym[i].st_shndx) {
63829 case SHN_COMMON:
63830 /* We compiled with -fno-common. These are not
63831@@ -1850,7 +1876,9 @@ static int simplify_symbols(struct modul
63832 ksym = resolve_symbol_wait(mod, info, name);
63833 /* Ok if resolved. */
63834 if (ksym && !IS_ERR(ksym)) {
63835+ pax_open_kernel();
63836 sym[i].st_value = ksym->value;
63837+ pax_close_kernel();
63838 break;
63839 }
63840
63841@@ -1869,11 +1897,20 @@ static int simplify_symbols(struct modul
63842 secbase = (unsigned long)mod_percpu(mod);
63843 else
63844 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
63845+ pax_open_kernel();
63846 sym[i].st_value += secbase;
63847+ pax_close_kernel();
63848 break;
63849 }
63850 }
63851
63852+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63853+ if (is_fs_load && !register_filesystem_found) {
63854+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
63855+ ret = -EPERM;
63856+ }
63857+#endif
63858+
63859 return ret;
63860 }
63861
63862@@ -1977,22 +2014,12 @@ static void layout_sections(struct modul
63863 || s->sh_entsize != ~0UL
63864 || strstarts(sname, ".init"))
63865 continue;
63866- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
63867+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63868+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
63869+ else
63870+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
63871 DEBUGP("\t%s\n", name);
63872 }
63873- switch (m) {
63874- case 0: /* executable */
63875- mod->core_size = debug_align(mod->core_size);
63876- mod->core_text_size = mod->core_size;
63877- break;
63878- case 1: /* RO: text and ro-data */
63879- mod->core_size = debug_align(mod->core_size);
63880- mod->core_ro_size = mod->core_size;
63881- break;
63882- case 3: /* whole core */
63883- mod->core_size = debug_align(mod->core_size);
63884- break;
63885- }
63886 }
63887
63888 DEBUGP("Init section allocation order:\n");
63889@@ -2006,23 +2033,13 @@ static void layout_sections(struct modul
63890 || s->sh_entsize != ~0UL
63891 || !strstarts(sname, ".init"))
63892 continue;
63893- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
63894- | INIT_OFFSET_MASK);
63895+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63896+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
63897+ else
63898+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
63899+ s->sh_entsize |= INIT_OFFSET_MASK;
63900 DEBUGP("\t%s\n", sname);
63901 }
63902- switch (m) {
63903- case 0: /* executable */
63904- mod->init_size = debug_align(mod->init_size);
63905- mod->init_text_size = mod->init_size;
63906- break;
63907- case 1: /* RO: text and ro-data */
63908- mod->init_size = debug_align(mod->init_size);
63909- mod->init_ro_size = mod->init_size;
63910- break;
63911- case 3: /* whole init */
63912- mod->init_size = debug_align(mod->init_size);
63913- break;
63914- }
63915 }
63916 }
63917
63918@@ -2187,7 +2204,7 @@ static void layout_symtab(struct module
63919
63920 /* Put symbol section at end of init part of module. */
63921 symsect->sh_flags |= SHF_ALLOC;
63922- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
63923+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
63924 info->index.sym) | INIT_OFFSET_MASK;
63925 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
63926
63927@@ -2204,19 +2221,19 @@ static void layout_symtab(struct module
63928 }
63929
63930 /* Append room for core symbols at end of core part. */
63931- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
63932- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
63933+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
63934+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
63935
63936 /* Put string table section at end of init part of module. */
63937 strsect->sh_flags |= SHF_ALLOC;
63938- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
63939+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
63940 info->index.str) | INIT_OFFSET_MASK;
63941 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
63942
63943 /* Append room for core symbols' strings at end of core part. */
63944- info->stroffs = mod->core_size;
63945+ info->stroffs = mod->core_size_rx;
63946 __set_bit(0, info->strmap);
63947- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
63948+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
63949 }
63950
63951 static void add_kallsyms(struct module *mod, const struct load_info *info)
63952@@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *
63953 /* Make sure we get permanent strtab: don't use info->strtab. */
63954 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
63955
63956+ pax_open_kernel();
63957+
63958 /* Set types up while we still have access to sections. */
63959 for (i = 0; i < mod->num_symtab; i++)
63960 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
63961
63962- mod->core_symtab = dst = mod->module_core + info->symoffs;
63963+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
63964 src = mod->symtab;
63965 *dst = *src;
63966 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
63967@@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *
63968 }
63969 mod->core_num_syms = ndst;
63970
63971- mod->core_strtab = s = mod->module_core + info->stroffs;
63972+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
63973 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
63974 if (test_bit(i, info->strmap))
63975 *++s = mod->strtab[i];
63976+
63977+ pax_close_kernel();
63978 }
63979 #else
63980 static inline void layout_symtab(struct module *mod, struct load_info *info)
63981@@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long
63982 return size == 0 ? NULL : vmalloc_exec(size);
63983 }
63984
63985-static void *module_alloc_update_bounds(unsigned long size)
63986+static void *module_alloc_update_bounds_rw(unsigned long size)
63987 {
63988 void *ret = module_alloc(size);
63989
63990 if (ret) {
63991 mutex_lock(&module_mutex);
63992 /* Update module bounds. */
63993- if ((unsigned long)ret < module_addr_min)
63994- module_addr_min = (unsigned long)ret;
63995- if ((unsigned long)ret + size > module_addr_max)
63996- module_addr_max = (unsigned long)ret + size;
63997+ if ((unsigned long)ret < module_addr_min_rw)
63998+ module_addr_min_rw = (unsigned long)ret;
63999+ if ((unsigned long)ret + size > module_addr_max_rw)
64000+ module_addr_max_rw = (unsigned long)ret + size;
64001+ mutex_unlock(&module_mutex);
64002+ }
64003+ return ret;
64004+}
64005+
64006+static void *module_alloc_update_bounds_rx(unsigned long size)
64007+{
64008+ void *ret = module_alloc_exec(size);
64009+
64010+ if (ret) {
64011+ mutex_lock(&module_mutex);
64012+ /* Update module bounds. */
64013+ if ((unsigned long)ret < module_addr_min_rx)
64014+ module_addr_min_rx = (unsigned long)ret;
64015+ if ((unsigned long)ret + size > module_addr_max_rx)
64016+ module_addr_max_rx = (unsigned long)ret + size;
64017 mutex_unlock(&module_mutex);
64018 }
64019 return ret;
64020@@ -2589,7 +2626,7 @@ static int move_module(struct module *mo
64021 void *ptr;
64022
64023 /* Do the allocs. */
64024- ptr = module_alloc_update_bounds(mod->core_size);
64025+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
64026 /*
64027 * The pointer to this block is stored in the module structure
64028 * which is inside the block. Just mark it as not being a
64029@@ -2599,23 +2636,50 @@ static int move_module(struct module *mo
64030 if (!ptr)
64031 return -ENOMEM;
64032
64033- memset(ptr, 0, mod->core_size);
64034- mod->module_core = ptr;
64035+ memset(ptr, 0, mod->core_size_rw);
64036+ mod->module_core_rw = ptr;
64037
64038- ptr = module_alloc_update_bounds(mod->init_size);
64039+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
64040 /*
64041 * The pointer to this block is stored in the module structure
64042 * which is inside the block. This block doesn't need to be
64043 * scanned as it contains data and code that will be freed
64044 * after the module is initialized.
64045 */
64046- kmemleak_ignore(ptr);
64047- if (!ptr && mod->init_size) {
64048- module_free(mod, mod->module_core);
64049+ kmemleak_not_leak(ptr);
64050+ if (!ptr && mod->init_size_rw) {
64051+ module_free(mod, mod->module_core_rw);
64052 return -ENOMEM;
64053 }
64054- memset(ptr, 0, mod->init_size);
64055- mod->module_init = ptr;
64056+ memset(ptr, 0, mod->init_size_rw);
64057+ mod->module_init_rw = ptr;
64058+
64059+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
64060+ kmemleak_not_leak(ptr);
64061+ if (!ptr) {
64062+ module_free(mod, mod->module_init_rw);
64063+ module_free(mod, mod->module_core_rw);
64064+ return -ENOMEM;
64065+ }
64066+
64067+ pax_open_kernel();
64068+ memset(ptr, 0, mod->core_size_rx);
64069+ pax_close_kernel();
64070+ mod->module_core_rx = ptr;
64071+
64072+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
64073+ kmemleak_not_leak(ptr);
64074+ if (!ptr && mod->init_size_rx) {
64075+ module_free_exec(mod, mod->module_core_rx);
64076+ module_free(mod, mod->module_init_rw);
64077+ module_free(mod, mod->module_core_rw);
64078+ return -ENOMEM;
64079+ }
64080+
64081+ pax_open_kernel();
64082+ memset(ptr, 0, mod->init_size_rx);
64083+ pax_close_kernel();
64084+ mod->module_init_rx = ptr;
64085
64086 /* Transfer each section which specifies SHF_ALLOC */
64087 DEBUGP("final section addresses:\n");
64088@@ -2626,16 +2690,45 @@ static int move_module(struct module *mo
64089 if (!(shdr->sh_flags & SHF_ALLOC))
64090 continue;
64091
64092- if (shdr->sh_entsize & INIT_OFFSET_MASK)
64093- dest = mod->module_init
64094- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64095- else
64096- dest = mod->module_core + shdr->sh_entsize;
64097+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
64098+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64099+ dest = mod->module_init_rw
64100+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64101+ else
64102+ dest = mod->module_init_rx
64103+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64104+ } else {
64105+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64106+ dest = mod->module_core_rw + shdr->sh_entsize;
64107+ else
64108+ dest = mod->module_core_rx + shdr->sh_entsize;
64109+ }
64110+
64111+ if (shdr->sh_type != SHT_NOBITS) {
64112+
64113+#ifdef CONFIG_PAX_KERNEXEC
64114+#ifdef CONFIG_X86_64
64115+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
64116+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
64117+#endif
64118+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
64119+ pax_open_kernel();
64120+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64121+ pax_close_kernel();
64122+ } else
64123+#endif
64124
64125- if (shdr->sh_type != SHT_NOBITS)
64126 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64127+ }
64128 /* Update sh_addr to point to copy in image. */
64129- shdr->sh_addr = (unsigned long)dest;
64130+
64131+#ifdef CONFIG_PAX_KERNEXEC
64132+ if (shdr->sh_flags & SHF_EXECINSTR)
64133+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
64134+ else
64135+#endif
64136+
64137+ shdr->sh_addr = (unsigned long)dest;
64138 DEBUGP("\t0x%lx %s\n",
64139 shdr->sh_addr, info->secstrings + shdr->sh_name);
64140 }
64141@@ -2686,12 +2779,12 @@ static void flush_module_icache(const st
64142 * Do it before processing of module parameters, so the module
64143 * can provide parameter accessor functions of its own.
64144 */
64145- if (mod->module_init)
64146- flush_icache_range((unsigned long)mod->module_init,
64147- (unsigned long)mod->module_init
64148- + mod->init_size);
64149- flush_icache_range((unsigned long)mod->module_core,
64150- (unsigned long)mod->module_core + mod->core_size);
64151+ if (mod->module_init_rx)
64152+ flush_icache_range((unsigned long)mod->module_init_rx,
64153+ (unsigned long)mod->module_init_rx
64154+ + mod->init_size_rx);
64155+ flush_icache_range((unsigned long)mod->module_core_rx,
64156+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
64157
64158 set_fs(old_fs);
64159 }
64160@@ -2771,8 +2864,10 @@ static void module_deallocate(struct mod
64161 {
64162 kfree(info->strmap);
64163 percpu_modfree(mod);
64164- module_free(mod, mod->module_init);
64165- module_free(mod, mod->module_core);
64166+ module_free_exec(mod, mod->module_init_rx);
64167+ module_free_exec(mod, mod->module_core_rx);
64168+ module_free(mod, mod->module_init_rw);
64169+ module_free(mod, mod->module_core_rw);
64170 }
64171
64172 int __weak module_finalize(const Elf_Ehdr *hdr,
64173@@ -2836,9 +2931,38 @@ static struct module *load_module(void _
64174 if (err)
64175 goto free_unload;
64176
64177+ /* Now copy in args */
64178+ mod->args = strndup_user(uargs, ~0UL >> 1);
64179+ if (IS_ERR(mod->args)) {
64180+ err = PTR_ERR(mod->args);
64181+ goto free_unload;
64182+ }
64183+
64184 /* Set up MODINFO_ATTR fields */
64185 setup_modinfo(mod, &info);
64186
64187+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64188+ {
64189+ char *p, *p2;
64190+
64191+ if (strstr(mod->args, "grsec_modharden_netdev")) {
64192+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
64193+ err = -EPERM;
64194+ goto free_modinfo;
64195+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
64196+ p += strlen("grsec_modharden_normal");
64197+ p2 = strstr(p, "_");
64198+ if (p2) {
64199+ *p2 = '\0';
64200+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
64201+ *p2 = '_';
64202+ }
64203+ err = -EPERM;
64204+ goto free_modinfo;
64205+ }
64206+ }
64207+#endif
64208+
64209 /* Fix up syms, so that st_value is a pointer to location. */
64210 err = simplify_symbols(mod, &info);
64211 if (err < 0)
64212@@ -2854,13 +2978,6 @@ static struct module *load_module(void _
64213
64214 flush_module_icache(mod);
64215
64216- /* Now copy in args */
64217- mod->args = strndup_user(uargs, ~0UL >> 1);
64218- if (IS_ERR(mod->args)) {
64219- err = PTR_ERR(mod->args);
64220- goto free_arch_cleanup;
64221- }
64222-
64223 /* Mark state as coming so strong_try_module_get() ignores us. */
64224 mod->state = MODULE_STATE_COMING;
64225
64226@@ -2920,11 +3037,10 @@ static struct module *load_module(void _
64227 unlock:
64228 mutex_unlock(&module_mutex);
64229 synchronize_sched();
64230- kfree(mod->args);
64231- free_arch_cleanup:
64232 module_arch_cleanup(mod);
64233 free_modinfo:
64234 free_modinfo(mod);
64235+ kfree(mod->args);
64236 free_unload:
64237 module_unload_free(mod);
64238 free_module:
64239@@ -2965,16 +3081,16 @@ SYSCALL_DEFINE3(init_module, void __user
64240 MODULE_STATE_COMING, mod);
64241
64242 /* Set RO and NX regions for core */
64243- set_section_ro_nx(mod->module_core,
64244- mod->core_text_size,
64245- mod->core_ro_size,
64246- mod->core_size);
64247+ set_section_ro_nx(mod->module_core_rx,
64248+ mod->core_size_rx,
64249+ mod->core_size_rx,
64250+ mod->core_size_rx);
64251
64252 /* Set RO and NX regions for init */
64253- set_section_ro_nx(mod->module_init,
64254- mod->init_text_size,
64255- mod->init_ro_size,
64256- mod->init_size);
64257+ set_section_ro_nx(mod->module_init_rx,
64258+ mod->init_size_rx,
64259+ mod->init_size_rx,
64260+ mod->init_size_rx);
64261
64262 do_mod_ctors(mod);
64263 /* Start the module */
64264@@ -3020,11 +3136,12 @@ SYSCALL_DEFINE3(init_module, void __user
64265 mod->strtab = mod->core_strtab;
64266 #endif
64267 unset_module_init_ro_nx(mod);
64268- module_free(mod, mod->module_init);
64269- mod->module_init = NULL;
64270- mod->init_size = 0;
64271- mod->init_ro_size = 0;
64272- mod->init_text_size = 0;
64273+ module_free(mod, mod->module_init_rw);
64274+ module_free_exec(mod, mod->module_init_rx);
64275+ mod->module_init_rw = NULL;
64276+ mod->module_init_rx = NULL;
64277+ mod->init_size_rw = 0;
64278+ mod->init_size_rx = 0;
64279 mutex_unlock(&module_mutex);
64280
64281 return 0;
64282@@ -3055,10 +3172,16 @@ static const char *get_ksymbol(struct mo
64283 unsigned long nextval;
64284
64285 /* At worse, next value is at end of module */
64286- if (within_module_init(addr, mod))
64287- nextval = (unsigned long)mod->module_init+mod->init_text_size;
64288+ if (within_module_init_rx(addr, mod))
64289+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
64290+ else if (within_module_init_rw(addr, mod))
64291+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
64292+ else if (within_module_core_rx(addr, mod))
64293+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
64294+ else if (within_module_core_rw(addr, mod))
64295+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
64296 else
64297- nextval = (unsigned long)mod->module_core+mod->core_text_size;
64298+ return NULL;
64299
64300 /* Scan for closest preceding symbol, and next symbol. (ELF
64301 starts real symbols at 1). */
64302@@ -3304,7 +3427,7 @@ static int m_show(struct seq_file *m, vo
64303 char buf[8];
64304
64305 seq_printf(m, "%s %u",
64306- mod->name, mod->init_size + mod->core_size);
64307+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
64308 print_unload_info(m, mod);
64309
64310 /* Informative for users. */
64311@@ -3313,7 +3436,7 @@ static int m_show(struct seq_file *m, vo
64312 mod->state == MODULE_STATE_COMING ? "Loading":
64313 "Live");
64314 /* Used by oprofile and other similar tools. */
64315- seq_printf(m, " 0x%pK", mod->module_core);
64316+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
64317
64318 /* Taints info */
64319 if (mod->taints)
64320@@ -3349,7 +3472,17 @@ static const struct file_operations proc
64321
64322 static int __init proc_modules_init(void)
64323 {
64324+#ifndef CONFIG_GRKERNSEC_HIDESYM
64325+#ifdef CONFIG_GRKERNSEC_PROC_USER
64326+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64327+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64328+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
64329+#else
64330 proc_create("modules", 0, NULL, &proc_modules_operations);
64331+#endif
64332+#else
64333+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64334+#endif
64335 return 0;
64336 }
64337 module_init(proc_modules_init);
64338@@ -3408,12 +3541,12 @@ struct module *__module_address(unsigned
64339 {
64340 struct module *mod;
64341
64342- if (addr < module_addr_min || addr > module_addr_max)
64343+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
64344+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
64345 return NULL;
64346
64347 list_for_each_entry_rcu(mod, &modules, list)
64348- if (within_module_core(addr, mod)
64349- || within_module_init(addr, mod))
64350+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
64351 return mod;
64352 return NULL;
64353 }
64354@@ -3447,11 +3580,20 @@ bool is_module_text_address(unsigned lon
64355 */
64356 struct module *__module_text_address(unsigned long addr)
64357 {
64358- struct module *mod = __module_address(addr);
64359+ struct module *mod;
64360+
64361+#ifdef CONFIG_X86_32
64362+ addr = ktla_ktva(addr);
64363+#endif
64364+
64365+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
64366+ return NULL;
64367+
64368+ mod = __module_address(addr);
64369+
64370 if (mod) {
64371 /* Make sure it's within the text section. */
64372- if (!within(addr, mod->module_init, mod->init_text_size)
64373- && !within(addr, mod->module_core, mod->core_text_size))
64374+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
64375 mod = NULL;
64376 }
64377 return mod;
64378diff -urNp linux-3.1.1/kernel/mutex.c linux-3.1.1/kernel/mutex.c
64379--- linux-3.1.1/kernel/mutex.c 2011-11-11 15:19:27.000000000 -0500
64380+++ linux-3.1.1/kernel/mutex.c 2011-11-16 18:39:08.000000000 -0500
64381@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock,
64382 spin_lock_mutex(&lock->wait_lock, flags);
64383
64384 debug_mutex_lock_common(lock, &waiter);
64385- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64386+ debug_mutex_add_waiter(lock, &waiter, task);
64387
64388 /* add waiting tasks to the end of the waitqueue (FIFO): */
64389 list_add_tail(&waiter.list, &lock->wait_list);
64390@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock,
64391 * TASK_UNINTERRUPTIBLE case.)
64392 */
64393 if (unlikely(signal_pending_state(state, task))) {
64394- mutex_remove_waiter(lock, &waiter,
64395- task_thread_info(task));
64396+ mutex_remove_waiter(lock, &waiter, task);
64397 mutex_release(&lock->dep_map, 1, ip);
64398 spin_unlock_mutex(&lock->wait_lock, flags);
64399
64400@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock,
64401 done:
64402 lock_acquired(&lock->dep_map, ip);
64403 /* got the lock - rejoice! */
64404- mutex_remove_waiter(lock, &waiter, current_thread_info());
64405+ mutex_remove_waiter(lock, &waiter, task);
64406 mutex_set_owner(lock);
64407
64408 /* set it to 0 if there are no waiters left: */
64409diff -urNp linux-3.1.1/kernel/mutex-debug.c linux-3.1.1/kernel/mutex-debug.c
64410--- linux-3.1.1/kernel/mutex-debug.c 2011-11-11 15:19:27.000000000 -0500
64411+++ linux-3.1.1/kernel/mutex-debug.c 2011-11-16 18:39:08.000000000 -0500
64412@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
64413 }
64414
64415 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64416- struct thread_info *ti)
64417+ struct task_struct *task)
64418 {
64419 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64420
64421 /* Mark the current thread as blocked on the lock: */
64422- ti->task->blocked_on = waiter;
64423+ task->blocked_on = waiter;
64424 }
64425
64426 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64427- struct thread_info *ti)
64428+ struct task_struct *task)
64429 {
64430 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64431- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64432- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64433- ti->task->blocked_on = NULL;
64434+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
64435+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64436+ task->blocked_on = NULL;
64437
64438 list_del_init(&waiter->list);
64439 waiter->task = NULL;
64440diff -urNp linux-3.1.1/kernel/mutex-debug.h linux-3.1.1/kernel/mutex-debug.h
64441--- linux-3.1.1/kernel/mutex-debug.h 2011-11-11 15:19:27.000000000 -0500
64442+++ linux-3.1.1/kernel/mutex-debug.h 2011-11-16 18:39:08.000000000 -0500
64443@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
64444 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64445 extern void debug_mutex_add_waiter(struct mutex *lock,
64446 struct mutex_waiter *waiter,
64447- struct thread_info *ti);
64448+ struct task_struct *task);
64449 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64450- struct thread_info *ti);
64451+ struct task_struct *task);
64452 extern void debug_mutex_unlock(struct mutex *lock);
64453 extern void debug_mutex_init(struct mutex *lock, const char *name,
64454 struct lock_class_key *key);
64455diff -urNp linux-3.1.1/kernel/padata.c linux-3.1.1/kernel/padata.c
64456--- linux-3.1.1/kernel/padata.c 2011-11-11 15:19:27.000000000 -0500
64457+++ linux-3.1.1/kernel/padata.c 2011-11-16 18:39:08.000000000 -0500
64458@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
64459 padata->pd = pd;
64460 padata->cb_cpu = cb_cpu;
64461
64462- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
64463- atomic_set(&pd->seq_nr, -1);
64464+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
64465+ atomic_set_unchecked(&pd->seq_nr, -1);
64466
64467- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
64468+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
64469
64470 target_cpu = padata_cpu_hash(padata);
64471 queue = per_cpu_ptr(pd->pqueue, target_cpu);
64472@@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
64473 padata_init_pqueues(pd);
64474 padata_init_squeues(pd);
64475 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
64476- atomic_set(&pd->seq_nr, -1);
64477+ atomic_set_unchecked(&pd->seq_nr, -1);
64478 atomic_set(&pd->reorder_objects, 0);
64479 atomic_set(&pd->refcnt, 0);
64480 pd->pinst = pinst;
64481diff -urNp linux-3.1.1/kernel/panic.c linux-3.1.1/kernel/panic.c
64482--- linux-3.1.1/kernel/panic.c 2011-11-11 15:19:27.000000000 -0500
64483+++ linux-3.1.1/kernel/panic.c 2011-11-16 18:40:44.000000000 -0500
64484@@ -371,7 +371,7 @@ static void warn_slowpath_common(const c
64485 const char *board;
64486
64487 printk(KERN_WARNING "------------[ cut here ]------------\n");
64488- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64489+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64490 board = dmi_get_system_info(DMI_PRODUCT_NAME);
64491 if (board)
64492 printk(KERN_WARNING "Hardware name: %s\n", board);
64493@@ -426,7 +426,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
64494 */
64495 void __stack_chk_fail(void)
64496 {
64497- panic("stack-protector: Kernel stack is corrupted in: %p\n",
64498+ dump_stack();
64499+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
64500 __builtin_return_address(0));
64501 }
64502 EXPORT_SYMBOL(__stack_chk_fail);
64503diff -urNp linux-3.1.1/kernel/pid.c linux-3.1.1/kernel/pid.c
64504--- linux-3.1.1/kernel/pid.c 2011-11-11 15:19:27.000000000 -0500
64505+++ linux-3.1.1/kernel/pid.c 2011-11-16 18:40:44.000000000 -0500
64506@@ -33,6 +33,7 @@
64507 #include <linux/rculist.h>
64508 #include <linux/bootmem.h>
64509 #include <linux/hash.h>
64510+#include <linux/security.h>
64511 #include <linux/pid_namespace.h>
64512 #include <linux/init_task.h>
64513 #include <linux/syscalls.h>
64514@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
64515
64516 int pid_max = PID_MAX_DEFAULT;
64517
64518-#define RESERVED_PIDS 300
64519+#define RESERVED_PIDS 500
64520
64521 int pid_max_min = RESERVED_PIDS + 1;
64522 int pid_max_max = PID_MAX_LIMIT;
64523@@ -418,8 +419,15 @@ EXPORT_SYMBOL(pid_task);
64524 */
64525 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
64526 {
64527+ struct task_struct *task;
64528+
64529 rcu_lockdep_assert(rcu_read_lock_held());
64530- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64531+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64532+
64533+ if (gr_pid_is_chrooted(task))
64534+ return NULL;
64535+
64536+ return task;
64537 }
64538
64539 struct task_struct *find_task_by_vpid(pid_t vnr)
64540@@ -427,6 +435,12 @@ struct task_struct *find_task_by_vpid(pi
64541 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
64542 }
64543
64544+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
64545+{
64546+ rcu_lockdep_assert(rcu_read_lock_held());
64547+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
64548+}
64549+
64550 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
64551 {
64552 struct pid *pid;
64553diff -urNp linux-3.1.1/kernel/posix-cpu-timers.c linux-3.1.1/kernel/posix-cpu-timers.c
64554--- linux-3.1.1/kernel/posix-cpu-timers.c 2011-11-11 15:19:27.000000000 -0500
64555+++ linux-3.1.1/kernel/posix-cpu-timers.c 2011-11-16 18:40:44.000000000 -0500
64556@@ -6,6 +6,7 @@
64557 #include <linux/posix-timers.h>
64558 #include <linux/errno.h>
64559 #include <linux/math64.h>
64560+#include <linux/security.h>
64561 #include <asm/uaccess.h>
64562 #include <linux/kernel_stat.h>
64563 #include <trace/events/timer.h>
64564@@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
64565
64566 static __init int init_posix_cpu_timers(void)
64567 {
64568- struct k_clock process = {
64569+ static struct k_clock process = {
64570 .clock_getres = process_cpu_clock_getres,
64571 .clock_get = process_cpu_clock_get,
64572 .timer_create = process_cpu_timer_create,
64573 .nsleep = process_cpu_nsleep,
64574 .nsleep_restart = process_cpu_nsleep_restart,
64575 };
64576- struct k_clock thread = {
64577+ static struct k_clock thread = {
64578 .clock_getres = thread_cpu_clock_getres,
64579 .clock_get = thread_cpu_clock_get,
64580 .timer_create = thread_cpu_timer_create,
64581diff -urNp linux-3.1.1/kernel/posix-timers.c linux-3.1.1/kernel/posix-timers.c
64582--- linux-3.1.1/kernel/posix-timers.c 2011-11-11 15:19:27.000000000 -0500
64583+++ linux-3.1.1/kernel/posix-timers.c 2011-11-16 18:40:44.000000000 -0500
64584@@ -43,6 +43,7 @@
64585 #include <linux/idr.h>
64586 #include <linux/posix-clock.h>
64587 #include <linux/posix-timers.h>
64588+#include <linux/grsecurity.h>
64589 #include <linux/syscalls.h>
64590 #include <linux/wait.h>
64591 #include <linux/workqueue.h>
64592@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
64593 * which we beg off on and pass to do_sys_settimeofday().
64594 */
64595
64596-static struct k_clock posix_clocks[MAX_CLOCKS];
64597+static struct k_clock *posix_clocks[MAX_CLOCKS];
64598
64599 /*
64600 * These ones are defined below.
64601@@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
64602 */
64603 static __init int init_posix_timers(void)
64604 {
64605- struct k_clock clock_realtime = {
64606+ static struct k_clock clock_realtime = {
64607 .clock_getres = hrtimer_get_res,
64608 .clock_get = posix_clock_realtime_get,
64609 .clock_set = posix_clock_realtime_set,
64610@@ -239,7 +240,7 @@ static __init int init_posix_timers(void
64611 .timer_get = common_timer_get,
64612 .timer_del = common_timer_del,
64613 };
64614- struct k_clock clock_monotonic = {
64615+ static struct k_clock clock_monotonic = {
64616 .clock_getres = hrtimer_get_res,
64617 .clock_get = posix_ktime_get_ts,
64618 .nsleep = common_nsleep,
64619@@ -249,19 +250,19 @@ static __init int init_posix_timers(void
64620 .timer_get = common_timer_get,
64621 .timer_del = common_timer_del,
64622 };
64623- struct k_clock clock_monotonic_raw = {
64624+ static struct k_clock clock_monotonic_raw = {
64625 .clock_getres = hrtimer_get_res,
64626 .clock_get = posix_get_monotonic_raw,
64627 };
64628- struct k_clock clock_realtime_coarse = {
64629+ static struct k_clock clock_realtime_coarse = {
64630 .clock_getres = posix_get_coarse_res,
64631 .clock_get = posix_get_realtime_coarse,
64632 };
64633- struct k_clock clock_monotonic_coarse = {
64634+ static struct k_clock clock_monotonic_coarse = {
64635 .clock_getres = posix_get_coarse_res,
64636 .clock_get = posix_get_monotonic_coarse,
64637 };
64638- struct k_clock clock_boottime = {
64639+ static struct k_clock clock_boottime = {
64640 .clock_getres = hrtimer_get_res,
64641 .clock_get = posix_get_boottime,
64642 .nsleep = common_nsleep,
64643@@ -272,6 +273,8 @@ static __init int init_posix_timers(void
64644 .timer_del = common_timer_del,
64645 };
64646
64647+ pax_track_stack();
64648+
64649 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
64650 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
64651 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
64652@@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
64653 return;
64654 }
64655
64656- posix_clocks[clock_id] = *new_clock;
64657+ posix_clocks[clock_id] = new_clock;
64658 }
64659 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
64660
64661@@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock
64662 return (id & CLOCKFD_MASK) == CLOCKFD ?
64663 &clock_posix_dynamic : &clock_posix_cpu;
64664
64665- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
64666+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
64667 return NULL;
64668- return &posix_clocks[id];
64669+ return posix_clocks[id];
64670 }
64671
64672 static int common_timer_create(struct k_itimer *new_timer)
64673@@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
64674 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
64675 return -EFAULT;
64676
64677+ /* only the CLOCK_REALTIME clock can be set, all other clocks
64678+ have their clock_set fptr set to a nosettime dummy function
64679+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
64680+ call common_clock_set, which calls do_sys_settimeofday, which
64681+ we hook
64682+ */
64683+
64684 return kc->clock_set(which_clock, &new_tp);
64685 }
64686
64687diff -urNp linux-3.1.1/kernel/power/poweroff.c linux-3.1.1/kernel/power/poweroff.c
64688--- linux-3.1.1/kernel/power/poweroff.c 2011-11-11 15:19:27.000000000 -0500
64689+++ linux-3.1.1/kernel/power/poweroff.c 2011-11-16 18:39:08.000000000 -0500
64690@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
64691 .enable_mask = SYSRQ_ENABLE_BOOT,
64692 };
64693
64694-static int pm_sysrq_init(void)
64695+static int __init pm_sysrq_init(void)
64696 {
64697 register_sysrq_key('o', &sysrq_poweroff_op);
64698 return 0;
64699diff -urNp linux-3.1.1/kernel/power/process.c linux-3.1.1/kernel/power/process.c
64700--- linux-3.1.1/kernel/power/process.c 2011-11-11 15:19:27.000000000 -0500
64701+++ linux-3.1.1/kernel/power/process.c 2011-11-16 18:39:08.000000000 -0500
64702@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
64703 u64 elapsed_csecs64;
64704 unsigned int elapsed_csecs;
64705 bool wakeup = false;
64706+ bool timedout = false;
64707
64708 do_gettimeofday(&start);
64709
64710@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
64711
64712 while (true) {
64713 todo = 0;
64714+ if (time_after(jiffies, end_time))
64715+ timedout = true;
64716 read_lock(&tasklist_lock);
64717 do_each_thread(g, p) {
64718 if (frozen(p) || !freezable(p))
64719@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
64720 * try_to_stop() after schedule() in ptrace/signal
64721 * stop sees TIF_FREEZE.
64722 */
64723- if (!task_is_stopped_or_traced(p) &&
64724- !freezer_should_skip(p))
64725+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
64726 todo++;
64727+ if (timedout) {
64728+ printk(KERN_ERR "Task refusing to freeze:\n");
64729+ sched_show_task(p);
64730+ }
64731+ }
64732 } while_each_thread(g, p);
64733 read_unlock(&tasklist_lock);
64734
64735@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
64736 todo += wq_busy;
64737 }
64738
64739- if (!todo || time_after(jiffies, end_time))
64740+ if (!todo || timedout)
64741 break;
64742
64743 if (pm_wakeup_pending()) {
64744diff -urNp linux-3.1.1/kernel/printk.c linux-3.1.1/kernel/printk.c
64745--- linux-3.1.1/kernel/printk.c 2011-11-11 15:19:27.000000000 -0500
64746+++ linux-3.1.1/kernel/printk.c 2011-11-16 19:38:11.000000000 -0500
64747@@ -313,6 +313,11 @@ static int check_syslog_permissions(int
64748 if (from_file && type != SYSLOG_ACTION_OPEN)
64749 return 0;
64750
64751+#ifdef CONFIG_GRKERNSEC_DMESG
64752+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
64753+ return -EPERM;
64754+#endif
64755+
64756 if (syslog_action_restricted(type)) {
64757 if (capable(CAP_SYSLOG))
64758 return 0;
64759diff -urNp linux-3.1.1/kernel/profile.c linux-3.1.1/kernel/profile.c
64760--- linux-3.1.1/kernel/profile.c 2011-11-11 15:19:27.000000000 -0500
64761+++ linux-3.1.1/kernel/profile.c 2011-11-16 18:39:08.000000000 -0500
64762@@ -39,7 +39,7 @@ struct profile_hit {
64763 /* Oprofile timer tick hook */
64764 static int (*timer_hook)(struct pt_regs *) __read_mostly;
64765
64766-static atomic_t *prof_buffer;
64767+static atomic_unchecked_t *prof_buffer;
64768 static unsigned long prof_len, prof_shift;
64769
64770 int prof_on __read_mostly;
64771@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
64772 hits[i].pc = 0;
64773 continue;
64774 }
64775- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64776+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64777 hits[i].hits = hits[i].pc = 0;
64778 }
64779 }
64780@@ -342,9 +342,9 @@ static void do_profile_hits(int type, vo
64781 * Add the current hit(s) and flush the write-queue out
64782 * to the global buffer:
64783 */
64784- atomic_add(nr_hits, &prof_buffer[pc]);
64785+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
64786 for (i = 0; i < NR_PROFILE_HIT; ++i) {
64787- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64788+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64789 hits[i].pc = hits[i].hits = 0;
64790 }
64791 out:
64792@@ -419,7 +419,7 @@ static void do_profile_hits(int type, vo
64793 {
64794 unsigned long pc;
64795 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
64796- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64797+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64798 }
64799 #endif /* !CONFIG_SMP */
64800
64801@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
64802 return -EFAULT;
64803 buf++; p++; count--; read++;
64804 }
64805- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
64806+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
64807 if (copy_to_user(buf, (void *)pnt, count))
64808 return -EFAULT;
64809 read += count;
64810@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
64811 }
64812 #endif
64813 profile_discard_flip_buffers();
64814- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
64815+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
64816 return count;
64817 }
64818
64819diff -urNp linux-3.1.1/kernel/ptrace.c linux-3.1.1/kernel/ptrace.c
64820--- linux-3.1.1/kernel/ptrace.c 2011-11-11 15:19:27.000000000 -0500
64821+++ linux-3.1.1/kernel/ptrace.c 2011-11-16 19:50:22.000000000 -0500
64822@@ -161,7 +161,8 @@ int ptrace_check_attach(struct task_stru
64823 return ret;
64824 }
64825
64826-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
64827+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
64828+ unsigned int log)
64829 {
64830 const struct cred *cred = current_cred(), *tcred;
64831
64832@@ -187,7 +188,8 @@ int __ptrace_may_access(struct task_stru
64833 cred->gid == tcred->sgid &&
64834 cred->gid == tcred->gid))
64835 goto ok;
64836- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
64837+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
64838+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
64839 goto ok;
64840 rcu_read_unlock();
64841 return -EPERM;
64842@@ -196,7 +198,9 @@ ok:
64843 smp_rmb();
64844 if (task->mm)
64845 dumpable = get_dumpable(task->mm);
64846- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
64847+ if (!dumpable &&
64848+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
64849+ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
64850 return -EPERM;
64851
64852 return security_ptrace_access_check(task, mode);
64853@@ -206,7 +210,16 @@ bool ptrace_may_access(struct task_struc
64854 {
64855 int err;
64856 task_lock(task);
64857- err = __ptrace_may_access(task, mode);
64858+ err = __ptrace_may_access(task, mode, 0);
64859+ task_unlock(task);
64860+ return !err;
64861+}
64862+
64863+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
64864+{
64865+ int err;
64866+ task_lock(task);
64867+ err = __ptrace_may_access(task, mode, 1);
64868 task_unlock(task);
64869 return !err;
64870 }
64871@@ -251,7 +264,7 @@ static int ptrace_attach(struct task_str
64872 goto out;
64873
64874 task_lock(task);
64875- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
64876+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
64877 task_unlock(task);
64878 if (retval)
64879 goto unlock_creds;
64880@@ -266,7 +279,7 @@ static int ptrace_attach(struct task_str
64881 task->ptrace = PT_PTRACED;
64882 if (seize)
64883 task->ptrace |= PT_SEIZED;
64884- if (task_ns_capable(task, CAP_SYS_PTRACE))
64885+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
64886 task->ptrace |= PT_PTRACE_CAP;
64887
64888 __ptrace_link(task, current);
64889@@ -461,6 +474,8 @@ int ptrace_readdata(struct task_struct *
64890 {
64891 int copied = 0;
64892
64893+ pax_track_stack();
64894+
64895 while (len > 0) {
64896 char buf[128];
64897 int this_len, retval;
64898@@ -472,7 +487,7 @@ int ptrace_readdata(struct task_struct *
64899 break;
64900 return -EIO;
64901 }
64902- if (copy_to_user(dst, buf, retval))
64903+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
64904 return -EFAULT;
64905 copied += retval;
64906 src += retval;
64907@@ -486,6 +501,8 @@ int ptrace_writedata(struct task_struct
64908 {
64909 int copied = 0;
64910
64911+ pax_track_stack();
64912+
64913 while (len > 0) {
64914 char buf[128];
64915 int this_len, retval;
64916@@ -669,10 +686,12 @@ int ptrace_request(struct task_struct *c
64917 bool seized = child->ptrace & PT_SEIZED;
64918 int ret = -EIO;
64919 siginfo_t siginfo, *si;
64920- void __user *datavp = (void __user *) data;
64921+ void __user *datavp = (__force void __user *) data;
64922 unsigned long __user *datalp = datavp;
64923 unsigned long flags;
64924
64925+ pax_track_stack();
64926+
64927 switch (request) {
64928 case PTRACE_PEEKTEXT:
64929 case PTRACE_PEEKDATA:
64930@@ -871,14 +890,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
64931 goto out;
64932 }
64933
64934+ if (gr_handle_ptrace(child, request)) {
64935+ ret = -EPERM;
64936+ goto out_put_task_struct;
64937+ }
64938+
64939 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
64940 ret = ptrace_attach(child, request, data);
64941 /*
64942 * Some architectures need to do book-keeping after
64943 * a ptrace attach.
64944 */
64945- if (!ret)
64946+ if (!ret) {
64947 arch_ptrace_attach(child);
64948+ gr_audit_ptrace(child);
64949+ }
64950 goto out_put_task_struct;
64951 }
64952
64953@@ -904,7 +930,7 @@ int generic_ptrace_peekdata(struct task_
64954 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
64955 if (copied != sizeof(tmp))
64956 return -EIO;
64957- return put_user(tmp, (unsigned long __user *)data);
64958+ return put_user(tmp, (__force unsigned long __user *)data);
64959 }
64960
64961 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
64962@@ -927,6 +953,8 @@ int compat_ptrace_request(struct task_st
64963 siginfo_t siginfo;
64964 int ret;
64965
64966+ pax_track_stack();
64967+
64968 switch (request) {
64969 case PTRACE_PEEKTEXT:
64970 case PTRACE_PEEKDATA:
64971@@ -1014,14 +1042,21 @@ asmlinkage long compat_sys_ptrace(compat
64972 goto out;
64973 }
64974
64975+ if (gr_handle_ptrace(child, request)) {
64976+ ret = -EPERM;
64977+ goto out_put_task_struct;
64978+ }
64979+
64980 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
64981 ret = ptrace_attach(child, request, data);
64982 /*
64983 * Some architectures need to do book-keeping after
64984 * a ptrace attach.
64985 */
64986- if (!ret)
64987+ if (!ret) {
64988 arch_ptrace_attach(child);
64989+ gr_audit_ptrace(child);
64990+ }
64991 goto out_put_task_struct;
64992 }
64993
64994diff -urNp linux-3.1.1/kernel/rcutorture.c linux-3.1.1/kernel/rcutorture.c
64995--- linux-3.1.1/kernel/rcutorture.c 2011-11-11 15:19:27.000000000 -0500
64996+++ linux-3.1.1/kernel/rcutorture.c 2011-11-16 18:39:08.000000000 -0500
64997@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
64998 { 0 };
64999 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65000 { 0 };
65001-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65002-static atomic_t n_rcu_torture_alloc;
65003-static atomic_t n_rcu_torture_alloc_fail;
65004-static atomic_t n_rcu_torture_free;
65005-static atomic_t n_rcu_torture_mberror;
65006-static atomic_t n_rcu_torture_error;
65007+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65008+static atomic_unchecked_t n_rcu_torture_alloc;
65009+static atomic_unchecked_t n_rcu_torture_alloc_fail;
65010+static atomic_unchecked_t n_rcu_torture_free;
65011+static atomic_unchecked_t n_rcu_torture_mberror;
65012+static atomic_unchecked_t n_rcu_torture_error;
65013 static long n_rcu_torture_boost_ktrerror;
65014 static long n_rcu_torture_boost_rterror;
65015 static long n_rcu_torture_boost_failure;
65016@@ -223,11 +223,11 @@ rcu_torture_alloc(void)
65017
65018 spin_lock_bh(&rcu_torture_lock);
65019 if (list_empty(&rcu_torture_freelist)) {
65020- atomic_inc(&n_rcu_torture_alloc_fail);
65021+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65022 spin_unlock_bh(&rcu_torture_lock);
65023 return NULL;
65024 }
65025- atomic_inc(&n_rcu_torture_alloc);
65026+ atomic_inc_unchecked(&n_rcu_torture_alloc);
65027 p = rcu_torture_freelist.next;
65028 list_del_init(p);
65029 spin_unlock_bh(&rcu_torture_lock);
65030@@ -240,7 +240,7 @@ rcu_torture_alloc(void)
65031 static void
65032 rcu_torture_free(struct rcu_torture *p)
65033 {
65034- atomic_inc(&n_rcu_torture_free);
65035+ atomic_inc_unchecked(&n_rcu_torture_free);
65036 spin_lock_bh(&rcu_torture_lock);
65037 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65038 spin_unlock_bh(&rcu_torture_lock);
65039@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
65040 i = rp->rtort_pipe_count;
65041 if (i > RCU_TORTURE_PIPE_LEN)
65042 i = RCU_TORTURE_PIPE_LEN;
65043- atomic_inc(&rcu_torture_wcount[i]);
65044+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65045 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65046 rp->rtort_mbtest = 0;
65047 rcu_torture_free(rp);
65048@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_fr
65049 i = rp->rtort_pipe_count;
65050 if (i > RCU_TORTURE_PIPE_LEN)
65051 i = RCU_TORTURE_PIPE_LEN;
65052- atomic_inc(&rcu_torture_wcount[i]);
65053+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65054 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65055 rp->rtort_mbtest = 0;
65056 list_del(&rp->rtort_free);
65057@@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
65058 i = old_rp->rtort_pipe_count;
65059 if (i > RCU_TORTURE_PIPE_LEN)
65060 i = RCU_TORTURE_PIPE_LEN;
65061- atomic_inc(&rcu_torture_wcount[i]);
65062+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65063 old_rp->rtort_pipe_count++;
65064 cur_ops->deferred_free(old_rp);
65065 }
65066@@ -950,7 +950,7 @@ static void rcu_torture_timer(unsigned l
65067 return;
65068 }
65069 if (p->rtort_mbtest == 0)
65070- atomic_inc(&n_rcu_torture_mberror);
65071+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65072 spin_lock(&rand_lock);
65073 cur_ops->read_delay(&rand);
65074 n_rcu_torture_timers++;
65075@@ -1011,7 +1011,7 @@ rcu_torture_reader(void *arg)
65076 continue;
65077 }
65078 if (p->rtort_mbtest == 0)
65079- atomic_inc(&n_rcu_torture_mberror);
65080+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65081 cur_ops->read_delay(&rand);
65082 preempt_disable();
65083 pipe_count = p->rtort_pipe_count;
65084@@ -1070,16 +1070,16 @@ rcu_torture_printk(char *page)
65085 rcu_torture_current,
65086 rcu_torture_current_version,
65087 list_empty(&rcu_torture_freelist),
65088- atomic_read(&n_rcu_torture_alloc),
65089- atomic_read(&n_rcu_torture_alloc_fail),
65090- atomic_read(&n_rcu_torture_free),
65091- atomic_read(&n_rcu_torture_mberror),
65092+ atomic_read_unchecked(&n_rcu_torture_alloc),
65093+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65094+ atomic_read_unchecked(&n_rcu_torture_free),
65095+ atomic_read_unchecked(&n_rcu_torture_mberror),
65096 n_rcu_torture_boost_ktrerror,
65097 n_rcu_torture_boost_rterror,
65098 n_rcu_torture_boost_failure,
65099 n_rcu_torture_boosts,
65100 n_rcu_torture_timers);
65101- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
65102+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
65103 n_rcu_torture_boost_ktrerror != 0 ||
65104 n_rcu_torture_boost_rterror != 0 ||
65105 n_rcu_torture_boost_failure != 0)
65106@@ -1087,7 +1087,7 @@ rcu_torture_printk(char *page)
65107 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65108 if (i > 1) {
65109 cnt += sprintf(&page[cnt], "!!! ");
65110- atomic_inc(&n_rcu_torture_error);
65111+ atomic_inc_unchecked(&n_rcu_torture_error);
65112 WARN_ON_ONCE(1);
65113 }
65114 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65115@@ -1101,7 +1101,7 @@ rcu_torture_printk(char *page)
65116 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65117 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65118 cnt += sprintf(&page[cnt], " %d",
65119- atomic_read(&rcu_torture_wcount[i]));
65120+ atomic_read_unchecked(&rcu_torture_wcount[i]));
65121 }
65122 cnt += sprintf(&page[cnt], "\n");
65123 if (cur_ops->stats)
65124@@ -1410,7 +1410,7 @@ rcu_torture_cleanup(void)
65125
65126 if (cur_ops->cleanup)
65127 cur_ops->cleanup();
65128- if (atomic_read(&n_rcu_torture_error))
65129+ if (atomic_read_unchecked(&n_rcu_torture_error))
65130 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
65131 else
65132 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
65133@@ -1474,17 +1474,17 @@ rcu_torture_init(void)
65134
65135 rcu_torture_current = NULL;
65136 rcu_torture_current_version = 0;
65137- atomic_set(&n_rcu_torture_alloc, 0);
65138- atomic_set(&n_rcu_torture_alloc_fail, 0);
65139- atomic_set(&n_rcu_torture_free, 0);
65140- atomic_set(&n_rcu_torture_mberror, 0);
65141- atomic_set(&n_rcu_torture_error, 0);
65142+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65143+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65144+ atomic_set_unchecked(&n_rcu_torture_free, 0);
65145+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65146+ atomic_set_unchecked(&n_rcu_torture_error, 0);
65147 n_rcu_torture_boost_ktrerror = 0;
65148 n_rcu_torture_boost_rterror = 0;
65149 n_rcu_torture_boost_failure = 0;
65150 n_rcu_torture_boosts = 0;
65151 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65152- atomic_set(&rcu_torture_wcount[i], 0);
65153+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65154 for_each_possible_cpu(cpu) {
65155 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65156 per_cpu(rcu_torture_count, cpu)[i] = 0;
65157diff -urNp linux-3.1.1/kernel/rcutree.c linux-3.1.1/kernel/rcutree.c
65158--- linux-3.1.1/kernel/rcutree.c 2011-11-11 15:19:27.000000000 -0500
65159+++ linux-3.1.1/kernel/rcutree.c 2011-11-16 18:39:08.000000000 -0500
65160@@ -356,9 +356,9 @@ void rcu_enter_nohz(void)
65161 }
65162 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65163 smp_mb__before_atomic_inc(); /* See above. */
65164- atomic_inc(&rdtp->dynticks);
65165+ atomic_inc_unchecked(&rdtp->dynticks);
65166 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
65167- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65168+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65169 local_irq_restore(flags);
65170
65171 /* If the interrupt queued a callback, get out of dyntick mode. */
65172@@ -387,10 +387,10 @@ void rcu_exit_nohz(void)
65173 return;
65174 }
65175 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
65176- atomic_inc(&rdtp->dynticks);
65177+ atomic_inc_unchecked(&rdtp->dynticks);
65178 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65179 smp_mb__after_atomic_inc(); /* See above. */
65180- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65181+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65182 local_irq_restore(flags);
65183 }
65184
65185@@ -406,14 +406,14 @@ void rcu_nmi_enter(void)
65186 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
65187
65188 if (rdtp->dynticks_nmi_nesting == 0 &&
65189- (atomic_read(&rdtp->dynticks) & 0x1))
65190+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
65191 return;
65192 rdtp->dynticks_nmi_nesting++;
65193 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
65194- atomic_inc(&rdtp->dynticks);
65195+ atomic_inc_unchecked(&rdtp->dynticks);
65196 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65197 smp_mb__after_atomic_inc(); /* See above. */
65198- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65199+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65200 }
65201
65202 /**
65203@@ -432,9 +432,9 @@ void rcu_nmi_exit(void)
65204 return;
65205 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65206 smp_mb__before_atomic_inc(); /* See above. */
65207- atomic_inc(&rdtp->dynticks);
65208+ atomic_inc_unchecked(&rdtp->dynticks);
65209 smp_mb__after_atomic_inc(); /* Force delay to next write. */
65210- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65211+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65212 }
65213
65214 /**
65215@@ -469,7 +469,7 @@ void rcu_irq_exit(void)
65216 */
65217 static int dyntick_save_progress_counter(struct rcu_data *rdp)
65218 {
65219- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
65220+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65221 return 0;
65222 }
65223
65224@@ -484,7 +484,7 @@ static int rcu_implicit_dynticks_qs(stru
65225 unsigned long curr;
65226 unsigned long snap;
65227
65228- curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
65229+ curr = (unsigned long)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65230 snap = (unsigned long)rdp->dynticks_snap;
65231
65232 /*
65233@@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state
65234 /*
65235 * Do softirq processing for the current CPU.
65236 */
65237-static void rcu_process_callbacks(struct softirq_action *unused)
65238+static void rcu_process_callbacks(void)
65239 {
65240 __rcu_process_callbacks(&rcu_sched_state,
65241 &__get_cpu_var(rcu_sched_data));
65242diff -urNp linux-3.1.1/kernel/rcutree.h linux-3.1.1/kernel/rcutree.h
65243--- linux-3.1.1/kernel/rcutree.h 2011-11-11 15:19:27.000000000 -0500
65244+++ linux-3.1.1/kernel/rcutree.h 2011-11-16 18:39:08.000000000 -0500
65245@@ -86,7 +86,7 @@
65246 struct rcu_dynticks {
65247 int dynticks_nesting; /* Track irq/process nesting level. */
65248 int dynticks_nmi_nesting; /* Track NMI nesting level. */
65249- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
65250+ atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
65251 };
65252
65253 /* RCU's kthread states for tracing. */
65254diff -urNp linux-3.1.1/kernel/rcutree_plugin.h linux-3.1.1/kernel/rcutree_plugin.h
65255--- linux-3.1.1/kernel/rcutree_plugin.h 2011-11-11 15:19:27.000000000 -0500
65256+++ linux-3.1.1/kernel/rcutree_plugin.h 2011-11-16 18:39:08.000000000 -0500
65257@@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
65258
65259 /* Clean up and exit. */
65260 smp_mb(); /* ensure expedited GP seen before counter increment. */
65261- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
65262+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
65263 unlock_mb_ret:
65264 mutex_unlock(&sync_rcu_preempt_exp_mutex);
65265 mb_ret:
65266@@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
65267
65268 #else /* #ifndef CONFIG_SMP */
65269
65270-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
65271-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
65272+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
65273+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
65274
65275 static int synchronize_sched_expedited_cpu_stop(void *data)
65276 {
65277@@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
65278 int firstsnap, s, snap, trycount = 0;
65279
65280 /* Note that atomic_inc_return() implies full memory barrier. */
65281- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
65282+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
65283 get_online_cpus();
65284
65285 /*
65286@@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
65287 }
65288
65289 /* Check to see if someone else did our work for us. */
65290- s = atomic_read(&sync_sched_expedited_done);
65291+ s = atomic_read_unchecked(&sync_sched_expedited_done);
65292 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
65293 smp_mb(); /* ensure test happens before caller kfree */
65294 return;
65295@@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
65296 * grace period works for us.
65297 */
65298 get_online_cpus();
65299- snap = atomic_read(&sync_sched_expedited_started) - 1;
65300+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
65301 smp_mb(); /* ensure read is before try_stop_cpus(). */
65302 }
65303
65304@@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
65305 * than we did beat us to the punch.
65306 */
65307 do {
65308- s = atomic_read(&sync_sched_expedited_done);
65309+ s = atomic_read_unchecked(&sync_sched_expedited_done);
65310 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
65311 smp_mb(); /* ensure test happens before caller kfree */
65312 break;
65313 }
65314- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
65315+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
65316
65317 put_online_cpus();
65318 }
65319diff -urNp linux-3.1.1/kernel/relay.c linux-3.1.1/kernel/relay.c
65320--- linux-3.1.1/kernel/relay.c 2011-11-11 15:19:27.000000000 -0500
65321+++ linux-3.1.1/kernel/relay.c 2011-11-16 18:40:44.000000000 -0500
65322@@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
65323 };
65324 ssize_t ret;
65325
65326+ pax_track_stack();
65327+
65328 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
65329 return 0;
65330 if (splice_grow_spd(pipe, &spd))
65331diff -urNp linux-3.1.1/kernel/resource.c linux-3.1.1/kernel/resource.c
65332--- linux-3.1.1/kernel/resource.c 2011-11-11 15:19:27.000000000 -0500
65333+++ linux-3.1.1/kernel/resource.c 2011-11-16 18:40:44.000000000 -0500
65334@@ -141,8 +141,18 @@ static const struct file_operations proc
65335
65336 static int __init ioresources_init(void)
65337 {
65338+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65339+#ifdef CONFIG_GRKERNSEC_PROC_USER
65340+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65341+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65342+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65343+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65344+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65345+#endif
65346+#else
65347 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65348 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65349+#endif
65350 return 0;
65351 }
65352 __initcall(ioresources_init);
65353diff -urNp linux-3.1.1/kernel/rtmutex-tester.c linux-3.1.1/kernel/rtmutex-tester.c
65354--- linux-3.1.1/kernel/rtmutex-tester.c 2011-11-11 15:19:27.000000000 -0500
65355+++ linux-3.1.1/kernel/rtmutex-tester.c 2011-11-16 18:39:08.000000000 -0500
65356@@ -20,7 +20,7 @@
65357 #define MAX_RT_TEST_MUTEXES 8
65358
65359 static spinlock_t rttest_lock;
65360-static atomic_t rttest_event;
65361+static atomic_unchecked_t rttest_event;
65362
65363 struct test_thread_data {
65364 int opcode;
65365@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
65366
65367 case RTTEST_LOCKCONT:
65368 td->mutexes[td->opdata] = 1;
65369- td->event = atomic_add_return(1, &rttest_event);
65370+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65371 return 0;
65372
65373 case RTTEST_RESET:
65374@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
65375 return 0;
65376
65377 case RTTEST_RESETEVENT:
65378- atomic_set(&rttest_event, 0);
65379+ atomic_set_unchecked(&rttest_event, 0);
65380 return 0;
65381
65382 default:
65383@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
65384 return ret;
65385
65386 td->mutexes[id] = 1;
65387- td->event = atomic_add_return(1, &rttest_event);
65388+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65389 rt_mutex_lock(&mutexes[id]);
65390- td->event = atomic_add_return(1, &rttest_event);
65391+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65392 td->mutexes[id] = 4;
65393 return 0;
65394
65395@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
65396 return ret;
65397
65398 td->mutexes[id] = 1;
65399- td->event = atomic_add_return(1, &rttest_event);
65400+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65401 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65402- td->event = atomic_add_return(1, &rttest_event);
65403+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65404 td->mutexes[id] = ret ? 0 : 4;
65405 return ret ? -EINTR : 0;
65406
65407@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
65408 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65409 return ret;
65410
65411- td->event = atomic_add_return(1, &rttest_event);
65412+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65413 rt_mutex_unlock(&mutexes[id]);
65414- td->event = atomic_add_return(1, &rttest_event);
65415+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65416 td->mutexes[id] = 0;
65417 return 0;
65418
65419@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
65420 break;
65421
65422 td->mutexes[dat] = 2;
65423- td->event = atomic_add_return(1, &rttest_event);
65424+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65425 break;
65426
65427 default:
65428@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
65429 return;
65430
65431 td->mutexes[dat] = 3;
65432- td->event = atomic_add_return(1, &rttest_event);
65433+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65434 break;
65435
65436 case RTTEST_LOCKNOWAIT:
65437@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
65438 return;
65439
65440 td->mutexes[dat] = 1;
65441- td->event = atomic_add_return(1, &rttest_event);
65442+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65443 return;
65444
65445 default:
65446diff -urNp linux-3.1.1/kernel/sched_autogroup.c linux-3.1.1/kernel/sched_autogroup.c
65447--- linux-3.1.1/kernel/sched_autogroup.c 2011-11-11 15:19:27.000000000 -0500
65448+++ linux-3.1.1/kernel/sched_autogroup.c 2011-11-16 18:39:08.000000000 -0500
65449@@ -7,7 +7,7 @@
65450
65451 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
65452 static struct autogroup autogroup_default;
65453-static atomic_t autogroup_seq_nr;
65454+static atomic_unchecked_t autogroup_seq_nr;
65455
65456 static void __init autogroup_init(struct task_struct *init_task)
65457 {
65458@@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
65459
65460 kref_init(&ag->kref);
65461 init_rwsem(&ag->lock);
65462- ag->id = atomic_inc_return(&autogroup_seq_nr);
65463+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
65464 ag->tg = tg;
65465 #ifdef CONFIG_RT_GROUP_SCHED
65466 /*
65467diff -urNp linux-3.1.1/kernel/sched.c linux-3.1.1/kernel/sched.c
65468--- linux-3.1.1/kernel/sched.c 2011-11-11 15:19:27.000000000 -0500
65469+++ linux-3.1.1/kernel/sched.c 2011-11-16 18:40:44.000000000 -0500
65470@@ -4264,6 +4264,8 @@ static void __sched __schedule(void)
65471 struct rq *rq;
65472 int cpu;
65473
65474+ pax_track_stack();
65475+
65476 need_resched:
65477 preempt_disable();
65478 cpu = smp_processor_id();
65479@@ -4950,6 +4952,8 @@ int can_nice(const struct task_struct *p
65480 /* convert nice value [19,-20] to rlimit style value [1,40] */
65481 int nice_rlim = 20 - nice;
65482
65483+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65484+
65485 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
65486 capable(CAP_SYS_NICE));
65487 }
65488@@ -4983,7 +4987,8 @@ SYSCALL_DEFINE1(nice, int, increment)
65489 if (nice > 19)
65490 nice = 19;
65491
65492- if (increment < 0 && !can_nice(current, nice))
65493+ if (increment < 0 && (!can_nice(current, nice) ||
65494+ gr_handle_chroot_nice()))
65495 return -EPERM;
65496
65497 retval = security_task_setnice(current, nice);
65498@@ -5127,6 +5132,7 @@ recheck:
65499 unsigned long rlim_rtprio =
65500 task_rlimit(p, RLIMIT_RTPRIO);
65501
65502+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
65503 /* can't set/change the rt policy */
65504 if (policy != p->policy && !rlim_rtprio)
65505 return -EPERM;
65506diff -urNp linux-3.1.1/kernel/sched_fair.c linux-3.1.1/kernel/sched_fair.c
65507--- linux-3.1.1/kernel/sched_fair.c 2011-11-11 15:19:27.000000000 -0500
65508+++ linux-3.1.1/kernel/sched_fair.c 2011-11-16 18:39:08.000000000 -0500
65509@@ -4062,7 +4062,7 @@ static void nohz_idle_balance(int this_c
65510 * run_rebalance_domains is triggered when needed from the scheduler tick.
65511 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
65512 */
65513-static void run_rebalance_domains(struct softirq_action *h)
65514+static void run_rebalance_domains(void)
65515 {
65516 int this_cpu = smp_processor_id();
65517 struct rq *this_rq = cpu_rq(this_cpu);
65518diff -urNp linux-3.1.1/kernel/signal.c linux-3.1.1/kernel/signal.c
65519--- linux-3.1.1/kernel/signal.c 2011-11-11 15:19:27.000000000 -0500
65520+++ linux-3.1.1/kernel/signal.c 2011-11-16 19:30:04.000000000 -0500
65521@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
65522
65523 int print_fatal_signals __read_mostly;
65524
65525-static void __user *sig_handler(struct task_struct *t, int sig)
65526+static __sighandler_t sig_handler(struct task_struct *t, int sig)
65527 {
65528 return t->sighand->action[sig - 1].sa.sa_handler;
65529 }
65530
65531-static int sig_handler_ignored(void __user *handler, int sig)
65532+static int sig_handler_ignored(__sighandler_t handler, int sig)
65533 {
65534 /* Is it explicitly or implicitly ignored? */
65535 return handler == SIG_IGN ||
65536@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
65537 static int sig_task_ignored(struct task_struct *t, int sig,
65538 int from_ancestor_ns)
65539 {
65540- void __user *handler;
65541+ __sighandler_t handler;
65542
65543 handler = sig_handler(t, sig);
65544
65545@@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_st
65546 atomic_inc(&user->sigpending);
65547 rcu_read_unlock();
65548
65549+ if (!override_rlimit)
65550+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
65551+
65552 if (override_rlimit ||
65553 atomic_read(&user->sigpending) <=
65554 task_rlimit(t, RLIMIT_SIGPENDING)) {
65555@@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct
65556
65557 int unhandled_signal(struct task_struct *tsk, int sig)
65558 {
65559- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
65560+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
65561 if (is_global_init(tsk))
65562 return 1;
65563 if (handler != SIG_IGN && handler != SIG_DFL)
65564@@ -815,6 +818,13 @@ static int check_kill_permission(int sig
65565 }
65566 }
65567
65568+ /* allow glibc communication via tgkill to other threads in our
65569+ thread group */
65570+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
65571+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
65572+ && gr_handle_signal(t, sig))
65573+ return -EPERM;
65574+
65575 return security_task_kill(t, info, sig, 0);
65576 }
65577
65578@@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct si
65579 return send_signal(sig, info, p, 1);
65580 }
65581
65582-static int
65583+int
65584 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
65585 {
65586 return send_signal(sig, info, t, 0);
65587@@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *
65588 unsigned long int flags;
65589 int ret, blocked, ignored;
65590 struct k_sigaction *action;
65591+ int is_unhandled = 0;
65592
65593 spin_lock_irqsave(&t->sighand->siglock, flags);
65594 action = &t->sighand->action[sig-1];
65595@@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *
65596 }
65597 if (action->sa.sa_handler == SIG_DFL)
65598 t->signal->flags &= ~SIGNAL_UNKILLABLE;
65599+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
65600+ is_unhandled = 1;
65601 ret = specific_send_sig_info(sig, info, t);
65602 spin_unlock_irqrestore(&t->sighand->siglock, flags);
65603
65604+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
65605+ normal operation */
65606+ if (is_unhandled) {
65607+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
65608+ gr_handle_crash(t, sig);
65609+ }
65610+
65611 return ret;
65612 }
65613
65614@@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct
65615 ret = check_kill_permission(sig, info, p);
65616 rcu_read_unlock();
65617
65618- if (!ret && sig)
65619+ if (!ret && sig) {
65620 ret = do_send_sig_info(sig, info, p, true);
65621+ if (!ret)
65622+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
65623+ }
65624
65625 return ret;
65626 }
65627@@ -1909,6 +1932,8 @@ static void ptrace_do_notify(int signr,
65628 {
65629 siginfo_t info;
65630
65631+ pax_track_stack();
65632+
65633 memset(&info, 0, sizeof info);
65634 info.si_signo = signr;
65635 info.si_code = exit_code;
65636@@ -2748,7 +2773,15 @@ do_send_specific(pid_t tgid, pid_t pid,
65637 int error = -ESRCH;
65638
65639 rcu_read_lock();
65640- p = find_task_by_vpid(pid);
65641+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65642+ /* allow glibc communication via tgkill to other threads in our
65643+ thread group */
65644+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
65645+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
65646+ p = find_task_by_vpid_unrestricted(pid);
65647+ else
65648+#endif
65649+ p = find_task_by_vpid(pid);
65650 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
65651 error = check_kill_permission(sig, info, p);
65652 /*
65653diff -urNp linux-3.1.1/kernel/smp.c linux-3.1.1/kernel/smp.c
65654--- linux-3.1.1/kernel/smp.c 2011-11-11 15:19:27.000000000 -0500
65655+++ linux-3.1.1/kernel/smp.c 2011-11-16 18:39:08.000000000 -0500
65656@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t fu
65657 }
65658 EXPORT_SYMBOL(smp_call_function);
65659
65660-void ipi_call_lock(void)
65661+void ipi_call_lock(void) __acquires(call_function.lock)
65662 {
65663 raw_spin_lock(&call_function.lock);
65664 }
65665
65666-void ipi_call_unlock(void)
65667+void ipi_call_unlock(void) __releases(call_function.lock)
65668 {
65669 raw_spin_unlock(&call_function.lock);
65670 }
65671
65672-void ipi_call_lock_irq(void)
65673+void ipi_call_lock_irq(void) __acquires(call_function.lock)
65674 {
65675 raw_spin_lock_irq(&call_function.lock);
65676 }
65677
65678-void ipi_call_unlock_irq(void)
65679+void ipi_call_unlock_irq(void) __releases(call_function.lock)
65680 {
65681 raw_spin_unlock_irq(&call_function.lock);
65682 }
65683diff -urNp linux-3.1.1/kernel/softirq.c linux-3.1.1/kernel/softirq.c
65684--- linux-3.1.1/kernel/softirq.c 2011-11-11 15:19:27.000000000 -0500
65685+++ linux-3.1.1/kernel/softirq.c 2011-11-16 18:39:08.000000000 -0500
65686@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
65687
65688 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
65689
65690-char *softirq_to_name[NR_SOFTIRQS] = {
65691+const char * const softirq_to_name[NR_SOFTIRQS] = {
65692 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
65693 "TASKLET", "SCHED", "HRTIMER", "RCU"
65694 };
65695@@ -235,7 +235,7 @@ restart:
65696 kstat_incr_softirqs_this_cpu(vec_nr);
65697
65698 trace_softirq_entry(vec_nr);
65699- h->action(h);
65700+ h->action();
65701 trace_softirq_exit(vec_nr);
65702 if (unlikely(prev_count != preempt_count())) {
65703 printk(KERN_ERR "huh, entered softirq %u %s %p"
65704@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
65705 local_irq_restore(flags);
65706 }
65707
65708-void open_softirq(int nr, void (*action)(struct softirq_action *))
65709+void open_softirq(int nr, void (*action)(void))
65710 {
65711- softirq_vec[nr].action = action;
65712+ pax_open_kernel();
65713+ *(void **)&softirq_vec[nr].action = action;
65714+ pax_close_kernel();
65715 }
65716
65717 /*
65718@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct
65719
65720 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
65721
65722-static void tasklet_action(struct softirq_action *a)
65723+static void tasklet_action(void)
65724 {
65725 struct tasklet_struct *list;
65726
65727@@ -476,7 +478,7 @@ static void tasklet_action(struct softir
65728 }
65729 }
65730
65731-static void tasklet_hi_action(struct softirq_action *a)
65732+static void tasklet_hi_action(void)
65733 {
65734 struct tasklet_struct *list;
65735
65736diff -urNp linux-3.1.1/kernel/sys.c linux-3.1.1/kernel/sys.c
65737--- linux-3.1.1/kernel/sys.c 2011-11-11 15:19:27.000000000 -0500
65738+++ linux-3.1.1/kernel/sys.c 2011-11-16 18:40:44.000000000 -0500
65739@@ -157,6 +157,12 @@ static int set_one_prio(struct task_stru
65740 error = -EACCES;
65741 goto out;
65742 }
65743+
65744+ if (gr_handle_chroot_setpriority(p, niceval)) {
65745+ error = -EACCES;
65746+ goto out;
65747+ }
65748+
65749 no_nice = security_task_setnice(p, niceval);
65750 if (no_nice) {
65751 error = no_nice;
65752@@ -571,6 +577,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
65753 goto error;
65754 }
65755
65756+ if (gr_check_group_change(new->gid, new->egid, -1))
65757+ goto error;
65758+
65759 if (rgid != (gid_t) -1 ||
65760 (egid != (gid_t) -1 && egid != old->gid))
65761 new->sgid = new->egid;
65762@@ -600,6 +609,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
65763 old = current_cred();
65764
65765 retval = -EPERM;
65766+
65767+ if (gr_check_group_change(gid, gid, gid))
65768+ goto error;
65769+
65770 if (nsown_capable(CAP_SETGID))
65771 new->gid = new->egid = new->sgid = new->fsgid = gid;
65772 else if (gid == old->gid || gid == old->sgid)
65773@@ -687,6 +700,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
65774 goto error;
65775 }
65776
65777+ if (gr_check_user_change(new->uid, new->euid, -1))
65778+ goto error;
65779+
65780 if (new->uid != old->uid) {
65781 retval = set_user(new);
65782 if (retval < 0)
65783@@ -731,6 +747,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
65784 old = current_cred();
65785
65786 retval = -EPERM;
65787+
65788+ if (gr_check_crash_uid(uid))
65789+ goto error;
65790+ if (gr_check_user_change(uid, uid, uid))
65791+ goto error;
65792+
65793 if (nsown_capable(CAP_SETUID)) {
65794 new->suid = new->uid = uid;
65795 if (uid != old->uid) {
65796@@ -785,6 +807,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
65797 goto error;
65798 }
65799
65800+ if (gr_check_user_change(ruid, euid, -1))
65801+ goto error;
65802+
65803 if (ruid != (uid_t) -1) {
65804 new->uid = ruid;
65805 if (ruid != old->uid) {
65806@@ -849,6 +874,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
65807 goto error;
65808 }
65809
65810+ if (gr_check_group_change(rgid, egid, -1))
65811+ goto error;
65812+
65813 if (rgid != (gid_t) -1)
65814 new->gid = rgid;
65815 if (egid != (gid_t) -1)
65816@@ -895,6 +923,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65817 old = current_cred();
65818 old_fsuid = old->fsuid;
65819
65820+ if (gr_check_user_change(-1, -1, uid))
65821+ goto error;
65822+
65823 if (uid == old->uid || uid == old->euid ||
65824 uid == old->suid || uid == old->fsuid ||
65825 nsown_capable(CAP_SETUID)) {
65826@@ -905,6 +936,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65827 }
65828 }
65829
65830+error:
65831 abort_creds(new);
65832 return old_fsuid;
65833
65834@@ -931,12 +963,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
65835 if (gid == old->gid || gid == old->egid ||
65836 gid == old->sgid || gid == old->fsgid ||
65837 nsown_capable(CAP_SETGID)) {
65838+ if (gr_check_group_change(-1, -1, gid))
65839+ goto error;
65840+
65841 if (gid != old_fsgid) {
65842 new->fsgid = gid;
65843 goto change_okay;
65844 }
65845 }
65846
65847+error:
65848 abort_creds(new);
65849 return old_fsgid;
65850
65851@@ -1242,19 +1278,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_
65852 return -EFAULT;
65853
65854 down_read(&uts_sem);
65855- error = __copy_to_user(&name->sysname, &utsname()->sysname,
65856+ error = __copy_to_user(name->sysname, &utsname()->sysname,
65857 __OLD_UTS_LEN);
65858 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
65859- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
65860+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
65861 __OLD_UTS_LEN);
65862 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
65863- error |= __copy_to_user(&name->release, &utsname()->release,
65864+ error |= __copy_to_user(name->release, &utsname()->release,
65865 __OLD_UTS_LEN);
65866 error |= __put_user(0, name->release + __OLD_UTS_LEN);
65867- error |= __copy_to_user(&name->version, &utsname()->version,
65868+ error |= __copy_to_user(name->version, &utsname()->version,
65869 __OLD_UTS_LEN);
65870 error |= __put_user(0, name->version + __OLD_UTS_LEN);
65871- error |= __copy_to_user(&name->machine, &utsname()->machine,
65872+ error |= __copy_to_user(name->machine, &utsname()->machine,
65873 __OLD_UTS_LEN);
65874 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
65875 up_read(&uts_sem);
65876@@ -1717,7 +1753,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
65877 error = get_dumpable(me->mm);
65878 break;
65879 case PR_SET_DUMPABLE:
65880- if (arg2 < 0 || arg2 > 1) {
65881+ if (arg2 > 1) {
65882 error = -EINVAL;
65883 break;
65884 }
65885diff -urNp linux-3.1.1/kernel/sysctl_binary.c linux-3.1.1/kernel/sysctl_binary.c
65886--- linux-3.1.1/kernel/sysctl_binary.c 2011-11-11 15:19:27.000000000 -0500
65887+++ linux-3.1.1/kernel/sysctl_binary.c 2011-11-16 18:39:08.000000000 -0500
65888@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *f
65889 int i;
65890
65891 set_fs(KERNEL_DS);
65892- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
65893+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
65894 set_fs(old_fs);
65895 if (result < 0)
65896 goto out_kfree;
65897@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *f
65898 }
65899
65900 set_fs(KERNEL_DS);
65901- result = vfs_write(file, buffer, str - buffer, &pos);
65902+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
65903 set_fs(old_fs);
65904 if (result < 0)
65905 goto out_kfree;
65906@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file
65907 int i;
65908
65909 set_fs(KERNEL_DS);
65910- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
65911+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
65912 set_fs(old_fs);
65913 if (result < 0)
65914 goto out_kfree;
65915@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file
65916 }
65917
65918 set_fs(KERNEL_DS);
65919- result = vfs_write(file, buffer, str - buffer, &pos);
65920+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
65921 set_fs(old_fs);
65922 if (result < 0)
65923 goto out_kfree;
65924@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *fil
65925 int i;
65926
65927 set_fs(KERNEL_DS);
65928- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
65929+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
65930 set_fs(old_fs);
65931 if (result < 0)
65932 goto out;
65933@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struc
65934 __le16 dnaddr;
65935
65936 set_fs(KERNEL_DS);
65937- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
65938+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
65939 set_fs(old_fs);
65940 if (result < 0)
65941 goto out;
65942@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struc
65943 le16_to_cpu(dnaddr) & 0x3ff);
65944
65945 set_fs(KERNEL_DS);
65946- result = vfs_write(file, buf, len, &pos);
65947+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
65948 set_fs(old_fs);
65949 if (result < 0)
65950 goto out;
65951diff -urNp linux-3.1.1/kernel/sysctl.c linux-3.1.1/kernel/sysctl.c
65952--- linux-3.1.1/kernel/sysctl.c 2011-11-11 15:19:27.000000000 -0500
65953+++ linux-3.1.1/kernel/sysctl.c 2011-11-16 18:40:44.000000000 -0500
65954@@ -85,6 +85,13 @@
65955
65956
65957 #if defined(CONFIG_SYSCTL)
65958+#include <linux/grsecurity.h>
65959+#include <linux/grinternal.h>
65960+
65961+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
65962+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
65963+ const int op);
65964+extern int gr_handle_chroot_sysctl(const int op);
65965
65966 /* External variables not in a header file. */
65967 extern int sysctl_overcommit_memory;
65968@@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_tabl
65969 }
65970
65971 #endif
65972+extern struct ctl_table grsecurity_table[];
65973
65974 static struct ctl_table root_table[];
65975 static struct ctl_table_root sysctl_table_root;
65976@@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
65977 int sysctl_legacy_va_layout;
65978 #endif
65979
65980+#ifdef CONFIG_PAX_SOFTMODE
65981+static ctl_table pax_table[] = {
65982+ {
65983+ .procname = "softmode",
65984+ .data = &pax_softmode,
65985+ .maxlen = sizeof(unsigned int),
65986+ .mode = 0600,
65987+ .proc_handler = &proc_dointvec,
65988+ },
65989+
65990+ { }
65991+};
65992+#endif
65993+
65994 /* The default sysctl tables: */
65995
65996 static struct ctl_table root_table[] = {
65997@@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
65998 #endif
65999
66000 static struct ctl_table kern_table[] = {
66001+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66002+ {
66003+ .procname = "grsecurity",
66004+ .mode = 0500,
66005+ .child = grsecurity_table,
66006+ },
66007+#endif
66008+
66009+#ifdef CONFIG_PAX_SOFTMODE
66010+ {
66011+ .procname = "pax",
66012+ .mode = 0500,
66013+ .child = pax_table,
66014+ },
66015+#endif
66016+
66017 {
66018 .procname = "sched_child_runs_first",
66019 .data = &sysctl_sched_child_runs_first,
66020@@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
66021 .data = &modprobe_path,
66022 .maxlen = KMOD_PATH_LEN,
66023 .mode = 0644,
66024- .proc_handler = proc_dostring,
66025+ .proc_handler = proc_dostring_modpriv,
66026 },
66027 {
66028 .procname = "modules_disabled",
66029@@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
66030 .extra1 = &zero,
66031 .extra2 = &one,
66032 },
66033+#endif
66034 {
66035 .procname = "kptr_restrict",
66036 .data = &kptr_restrict,
66037 .maxlen = sizeof(int),
66038 .mode = 0644,
66039 .proc_handler = proc_dmesg_restrict,
66040+#ifdef CONFIG_GRKERNSEC_HIDESYM
66041+ .extra1 = &two,
66042+#else
66043 .extra1 = &zero,
66044+#endif
66045 .extra2 = &two,
66046 },
66047-#endif
66048 {
66049 .procname = "ngroups_max",
66050 .data = &ngroups_max,
66051@@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
66052 .proc_handler = proc_dointvec_minmax,
66053 .extra1 = &zero,
66054 },
66055+ {
66056+ .procname = "heap_stack_gap",
66057+ .data = &sysctl_heap_stack_gap,
66058+ .maxlen = sizeof(sysctl_heap_stack_gap),
66059+ .mode = 0644,
66060+ .proc_handler = proc_doulongvec_minmax,
66061+ },
66062 #else
66063 {
66064 .procname = "nr_trim_pages",
66065@@ -1709,6 +1758,17 @@ static int test_perm(int mode, int op)
66066 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
66067 {
66068 int mode;
66069+ int error;
66070+
66071+ if (table->parent != NULL && table->parent->procname != NULL &&
66072+ table->procname != NULL &&
66073+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
66074+ return -EACCES;
66075+ if (gr_handle_chroot_sysctl(op))
66076+ return -EACCES;
66077+ error = gr_handle_sysctl(table, op);
66078+ if (error)
66079+ return error;
66080
66081 if (root->permissions)
66082 mode = root->permissions(root, current->nsproxy, table);
66083@@ -2113,6 +2173,16 @@ int proc_dostring(struct ctl_table *tabl
66084 buffer, lenp, ppos);
66085 }
66086
66087+int proc_dostring_modpriv(struct ctl_table *table, int write,
66088+ void __user *buffer, size_t *lenp, loff_t *ppos)
66089+{
66090+ if (write && !capable(CAP_SYS_MODULE))
66091+ return -EPERM;
66092+
66093+ return _proc_do_string(table->data, table->maxlen, write,
66094+ buffer, lenp, ppos);
66095+}
66096+
66097 static size_t proc_skip_spaces(char **buf)
66098 {
66099 size_t ret;
66100@@ -2218,6 +2288,8 @@ static int proc_put_long(void __user **b
66101 len = strlen(tmp);
66102 if (len > *size)
66103 len = *size;
66104+ if (len > sizeof(tmp))
66105+ len = sizeof(tmp);
66106 if (copy_to_user(*buf, tmp, len))
66107 return -EFAULT;
66108 *size -= len;
66109@@ -2534,8 +2606,11 @@ static int __do_proc_doulongvec_minmax(v
66110 *i = val;
66111 } else {
66112 val = convdiv * (*i) / convmul;
66113- if (!first)
66114+ if (!first) {
66115 err = proc_put_char(&buffer, &left, '\t');
66116+ if (err)
66117+ break;
66118+ }
66119 err = proc_put_long(&buffer, &left, val, false);
66120 if (err)
66121 break;
66122@@ -2930,6 +3005,12 @@ int proc_dostring(struct ctl_table *tabl
66123 return -ENOSYS;
66124 }
66125
66126+int proc_dostring_modpriv(struct ctl_table *table, int write,
66127+ void __user *buffer, size_t *lenp, loff_t *ppos)
66128+{
66129+ return -ENOSYS;
66130+}
66131+
66132 int proc_dointvec(struct ctl_table *table, int write,
66133 void __user *buffer, size_t *lenp, loff_t *ppos)
66134 {
66135@@ -2986,6 +3067,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66136 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66137 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66138 EXPORT_SYMBOL(proc_dostring);
66139+EXPORT_SYMBOL(proc_dostring_modpriv);
66140 EXPORT_SYMBOL(proc_doulongvec_minmax);
66141 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66142 EXPORT_SYMBOL(register_sysctl_table);
66143diff -urNp linux-3.1.1/kernel/sysctl_check.c linux-3.1.1/kernel/sysctl_check.c
66144--- linux-3.1.1/kernel/sysctl_check.c 2011-11-11 15:19:27.000000000 -0500
66145+++ linux-3.1.1/kernel/sysctl_check.c 2011-11-16 18:40:44.000000000 -0500
66146@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
66147 set_fail(&fail, table, "Directory with extra2");
66148 } else {
66149 if ((table->proc_handler == proc_dostring) ||
66150+ (table->proc_handler == proc_dostring_modpriv) ||
66151 (table->proc_handler == proc_dointvec) ||
66152 (table->proc_handler == proc_dointvec_minmax) ||
66153 (table->proc_handler == proc_dointvec_jiffies) ||
66154diff -urNp linux-3.1.1/kernel/taskstats.c linux-3.1.1/kernel/taskstats.c
66155--- linux-3.1.1/kernel/taskstats.c 2011-11-11 15:19:27.000000000 -0500
66156+++ linux-3.1.1/kernel/taskstats.c 2011-11-16 19:35:09.000000000 -0500
66157@@ -27,9 +27,12 @@
66158 #include <linux/cgroup.h>
66159 #include <linux/fs.h>
66160 #include <linux/file.h>
66161+#include <linux/grsecurity.h>
66162 #include <net/genetlink.h>
66163 #include <linux/atomic.h>
66164
66165+extern int gr_is_taskstats_denied(int pid);
66166+
66167 /*
66168 * Maximum length of a cpumask that can be specified in
66169 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
66170@@ -556,6 +559,9 @@ err:
66171
66172 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
66173 {
66174+ if (gr_is_taskstats_denied(current->pid))
66175+ return -EACCES;
66176+
66177 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
66178 return cmd_attr_register_cpumask(info);
66179 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
66180diff -urNp linux-3.1.1/kernel/time/alarmtimer.c linux-3.1.1/kernel/time/alarmtimer.c
66181--- linux-3.1.1/kernel/time/alarmtimer.c 2011-11-11 15:19:27.000000000 -0500
66182+++ linux-3.1.1/kernel/time/alarmtimer.c 2011-11-16 18:39:08.000000000 -0500
66183@@ -693,7 +693,7 @@ static int __init alarmtimer_init(void)
66184 {
66185 int error = 0;
66186 int i;
66187- struct k_clock alarm_clock = {
66188+ static struct k_clock alarm_clock = {
66189 .clock_getres = alarm_clock_getres,
66190 .clock_get = alarm_clock_get,
66191 .timer_create = alarm_timer_create,
66192diff -urNp linux-3.1.1/kernel/time/tick-broadcast.c linux-3.1.1/kernel/time/tick-broadcast.c
66193--- linux-3.1.1/kernel/time/tick-broadcast.c 2011-11-11 15:19:27.000000000 -0500
66194+++ linux-3.1.1/kernel/time/tick-broadcast.c 2011-11-16 18:39:08.000000000 -0500
66195@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
66196 * then clear the broadcast bit.
66197 */
66198 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
66199- int cpu = smp_processor_id();
66200+ cpu = smp_processor_id();
66201
66202 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
66203 tick_broadcast_clear_oneshot(cpu);
66204diff -urNp linux-3.1.1/kernel/time/timekeeping.c linux-3.1.1/kernel/time/timekeeping.c
66205--- linux-3.1.1/kernel/time/timekeeping.c 2011-11-11 15:19:27.000000000 -0500
66206+++ linux-3.1.1/kernel/time/timekeeping.c 2011-11-16 18:40:44.000000000 -0500
66207@@ -14,6 +14,7 @@
66208 #include <linux/init.h>
66209 #include <linux/mm.h>
66210 #include <linux/sched.h>
66211+#include <linux/grsecurity.h>
66212 #include <linux/syscore_ops.h>
66213 #include <linux/clocksource.h>
66214 #include <linux/jiffies.h>
66215@@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
66216 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
66217 return -EINVAL;
66218
66219+ gr_log_timechange();
66220+
66221 write_seqlock_irqsave(&xtime_lock, flags);
66222
66223 timekeeping_forward_now();
66224diff -urNp linux-3.1.1/kernel/time/timer_list.c linux-3.1.1/kernel/time/timer_list.c
66225--- linux-3.1.1/kernel/time/timer_list.c 2011-11-11 15:19:27.000000000 -0500
66226+++ linux-3.1.1/kernel/time/timer_list.c 2011-11-16 18:40:44.000000000 -0500
66227@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
66228
66229 static void print_name_offset(struct seq_file *m, void *sym)
66230 {
66231+#ifdef CONFIG_GRKERNSEC_HIDESYM
66232+ SEQ_printf(m, "<%p>", NULL);
66233+#else
66234 char symname[KSYM_NAME_LEN];
66235
66236 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66237 SEQ_printf(m, "<%pK>", sym);
66238 else
66239 SEQ_printf(m, "%s", symname);
66240+#endif
66241 }
66242
66243 static void
66244@@ -112,7 +116,11 @@ next_one:
66245 static void
66246 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66247 {
66248+#ifdef CONFIG_GRKERNSEC_HIDESYM
66249+ SEQ_printf(m, " .base: %p\n", NULL);
66250+#else
66251 SEQ_printf(m, " .base: %pK\n", base);
66252+#endif
66253 SEQ_printf(m, " .index: %d\n",
66254 base->index);
66255 SEQ_printf(m, " .resolution: %Lu nsecs\n",
66256@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
66257 {
66258 struct proc_dir_entry *pe;
66259
66260+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66261+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66262+#else
66263 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66264+#endif
66265 if (!pe)
66266 return -ENOMEM;
66267 return 0;
66268diff -urNp linux-3.1.1/kernel/time/timer_stats.c linux-3.1.1/kernel/time/timer_stats.c
66269--- linux-3.1.1/kernel/time/timer_stats.c 2011-11-11 15:19:27.000000000 -0500
66270+++ linux-3.1.1/kernel/time/timer_stats.c 2011-11-16 18:40:44.000000000 -0500
66271@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66272 static unsigned long nr_entries;
66273 static struct entry entries[MAX_ENTRIES];
66274
66275-static atomic_t overflow_count;
66276+static atomic_unchecked_t overflow_count;
66277
66278 /*
66279 * The entries are in a hash-table, for fast lookup:
66280@@ -140,7 +140,7 @@ static void reset_entries(void)
66281 nr_entries = 0;
66282 memset(entries, 0, sizeof(entries));
66283 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66284- atomic_set(&overflow_count, 0);
66285+ atomic_set_unchecked(&overflow_count, 0);
66286 }
66287
66288 static struct entry *alloc_entry(void)
66289@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
66290 if (likely(entry))
66291 entry->count++;
66292 else
66293- atomic_inc(&overflow_count);
66294+ atomic_inc_unchecked(&overflow_count);
66295
66296 out_unlock:
66297 raw_spin_unlock_irqrestore(lock, flags);
66298@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
66299
66300 static void print_name_offset(struct seq_file *m, unsigned long addr)
66301 {
66302+#ifdef CONFIG_GRKERNSEC_HIDESYM
66303+ seq_printf(m, "<%p>", NULL);
66304+#else
66305 char symname[KSYM_NAME_LEN];
66306
66307 if (lookup_symbol_name(addr, symname) < 0)
66308 seq_printf(m, "<%p>", (void *)addr);
66309 else
66310 seq_printf(m, "%s", symname);
66311+#endif
66312 }
66313
66314 static int tstats_show(struct seq_file *m, void *v)
66315@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
66316
66317 seq_puts(m, "Timer Stats Version: v0.2\n");
66318 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66319- if (atomic_read(&overflow_count))
66320+ if (atomic_read_unchecked(&overflow_count))
66321 seq_printf(m, "Overflow: %d entries\n",
66322- atomic_read(&overflow_count));
66323+ atomic_read_unchecked(&overflow_count));
66324
66325 for (i = 0; i < nr_entries; i++) {
66326 entry = entries + i;
66327@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
66328 {
66329 struct proc_dir_entry *pe;
66330
66331+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66332+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66333+#else
66334 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66335+#endif
66336 if (!pe)
66337 return -ENOMEM;
66338 return 0;
66339diff -urNp linux-3.1.1/kernel/time.c linux-3.1.1/kernel/time.c
66340--- linux-3.1.1/kernel/time.c 2011-11-11 15:19:27.000000000 -0500
66341+++ linux-3.1.1/kernel/time.c 2011-11-16 18:40:44.000000000 -0500
66342@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
66343 return error;
66344
66345 if (tz) {
66346+ /* we log in do_settimeofday called below, so don't log twice
66347+ */
66348+ if (!tv)
66349+ gr_log_timechange();
66350+
66351 /* SMP safe, global irq locking makes it work. */
66352 sys_tz = *tz;
66353 update_vsyscall_tz();
66354diff -urNp linux-3.1.1/kernel/timer.c linux-3.1.1/kernel/timer.c
66355--- linux-3.1.1/kernel/timer.c 2011-11-11 15:19:27.000000000 -0500
66356+++ linux-3.1.1/kernel/timer.c 2011-11-16 18:39:08.000000000 -0500
66357@@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
66358 /*
66359 * This function runs timers and the timer-tq in bottom half context.
66360 */
66361-static void run_timer_softirq(struct softirq_action *h)
66362+static void run_timer_softirq(void)
66363 {
66364 struct tvec_base *base = __this_cpu_read(tvec_bases);
66365
66366diff -urNp linux-3.1.1/kernel/trace/blktrace.c linux-3.1.1/kernel/trace/blktrace.c
66367--- linux-3.1.1/kernel/trace/blktrace.c 2011-11-11 15:19:27.000000000 -0500
66368+++ linux-3.1.1/kernel/trace/blktrace.c 2011-11-16 18:39:08.000000000 -0500
66369@@ -323,7 +323,7 @@ static ssize_t blk_dropped_read(struct f
66370 struct blk_trace *bt = filp->private_data;
66371 char buf[16];
66372
66373- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66374+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66375
66376 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66377 }
66378@@ -388,7 +388,7 @@ static int blk_subbuf_start_callback(str
66379 return 1;
66380
66381 bt = buf->chan->private_data;
66382- atomic_inc(&bt->dropped);
66383+ atomic_inc_unchecked(&bt->dropped);
66384 return 0;
66385 }
66386
66387@@ -489,7 +489,7 @@ int do_blk_trace_setup(struct request_qu
66388
66389 bt->dir = dir;
66390 bt->dev = dev;
66391- atomic_set(&bt->dropped, 0);
66392+ atomic_set_unchecked(&bt->dropped, 0);
66393
66394 ret = -EIO;
66395 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66396diff -urNp linux-3.1.1/kernel/trace/ftrace.c linux-3.1.1/kernel/trace/ftrace.c
66397--- linux-3.1.1/kernel/trace/ftrace.c 2011-11-11 15:19:27.000000000 -0500
66398+++ linux-3.1.1/kernel/trace/ftrace.c 2011-11-16 18:39:08.000000000 -0500
66399@@ -1585,12 +1585,17 @@ ftrace_code_disable(struct module *mod,
66400 if (unlikely(ftrace_disabled))
66401 return 0;
66402
66403+ ret = ftrace_arch_code_modify_prepare();
66404+ FTRACE_WARN_ON(ret);
66405+ if (ret)
66406+ return 0;
66407+
66408 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66409+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66410 if (ret) {
66411 ftrace_bug(ret, ip);
66412- return 0;
66413 }
66414- return 1;
66415+ return ret ? 0 : 1;
66416 }
66417
66418 /*
66419@@ -2607,7 +2612,7 @@ static void ftrace_free_entry_rcu(struct
66420
66421 int
66422 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
66423- void *data)
66424+ void *data)
66425 {
66426 struct ftrace_func_probe *entry;
66427 struct ftrace_page *pg;
66428diff -urNp linux-3.1.1/kernel/trace/trace.c linux-3.1.1/kernel/trace/trace.c
66429--- linux-3.1.1/kernel/trace/trace.c 2011-11-11 15:19:27.000000000 -0500
66430+++ linux-3.1.1/kernel/trace/trace.c 2011-11-16 18:40:44.000000000 -0500
66431@@ -3451,6 +3451,8 @@ static ssize_t tracing_splice_read_pipe(
66432 size_t rem;
66433 unsigned int i;
66434
66435+ pax_track_stack();
66436+
66437 if (splice_grow_spd(pipe, &spd))
66438 return -ENOMEM;
66439
66440@@ -3926,6 +3928,8 @@ tracing_buffers_splice_read(struct file
66441 int entries, size, i;
66442 size_t ret;
66443
66444+ pax_track_stack();
66445+
66446 if (splice_grow_spd(pipe, &spd))
66447 return -ENOMEM;
66448
66449@@ -4093,10 +4097,9 @@ static const struct file_operations trac
66450 };
66451 #endif
66452
66453-static struct dentry *d_tracer;
66454-
66455 struct dentry *tracing_init_dentry(void)
66456 {
66457+ static struct dentry *d_tracer;
66458 static int once;
66459
66460 if (d_tracer)
66461@@ -4116,10 +4119,9 @@ struct dentry *tracing_init_dentry(void)
66462 return d_tracer;
66463 }
66464
66465-static struct dentry *d_percpu;
66466-
66467 struct dentry *tracing_dentry_percpu(void)
66468 {
66469+ static struct dentry *d_percpu;
66470 static int once;
66471 struct dentry *d_tracer;
66472
66473diff -urNp linux-3.1.1/kernel/trace/trace_events.c linux-3.1.1/kernel/trace/trace_events.c
66474--- linux-3.1.1/kernel/trace/trace_events.c 2011-11-11 15:19:27.000000000 -0500
66475+++ linux-3.1.1/kernel/trace/trace_events.c 2011-11-16 18:39:08.000000000 -0500
66476@@ -1300,10 +1300,6 @@ static LIST_HEAD(ftrace_module_file_list
66477 struct ftrace_module_file_ops {
66478 struct list_head list;
66479 struct module *mod;
66480- struct file_operations id;
66481- struct file_operations enable;
66482- struct file_operations format;
66483- struct file_operations filter;
66484 };
66485
66486 static struct ftrace_module_file_ops *
66487@@ -1324,17 +1320,12 @@ trace_create_file_ops(struct module *mod
66488
66489 file_ops->mod = mod;
66490
66491- file_ops->id = ftrace_event_id_fops;
66492- file_ops->id.owner = mod;
66493-
66494- file_ops->enable = ftrace_enable_fops;
66495- file_ops->enable.owner = mod;
66496-
66497- file_ops->filter = ftrace_event_filter_fops;
66498- file_ops->filter.owner = mod;
66499-
66500- file_ops->format = ftrace_event_format_fops;
66501- file_ops->format.owner = mod;
66502+ pax_open_kernel();
66503+ *(void **)&mod->trace_id.owner = mod;
66504+ *(void **)&mod->trace_enable.owner = mod;
66505+ *(void **)&mod->trace_filter.owner = mod;
66506+ *(void **)&mod->trace_format.owner = mod;
66507+ pax_close_kernel();
66508
66509 list_add(&file_ops->list, &ftrace_module_file_list);
66510
66511@@ -1358,8 +1349,8 @@ static void trace_module_add_events(stru
66512
66513 for_each_event(call, start, end) {
66514 __trace_add_event_call(*call, mod,
66515- &file_ops->id, &file_ops->enable,
66516- &file_ops->filter, &file_ops->format);
66517+ &mod->trace_id, &mod->trace_enable,
66518+ &mod->trace_filter, &mod->trace_format);
66519 }
66520 }
66521
66522diff -urNp linux-3.1.1/kernel/trace/trace_kprobe.c linux-3.1.1/kernel/trace/trace_kprobe.c
66523--- linux-3.1.1/kernel/trace/trace_kprobe.c 2011-11-11 15:19:27.000000000 -0500
66524+++ linux-3.1.1/kernel/trace/trace_kprobe.c 2011-11-16 18:39:08.000000000 -0500
66525@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66526 long ret;
66527 int maxlen = get_rloc_len(*(u32 *)dest);
66528 u8 *dst = get_rloc_data(dest);
66529- u8 *src = addr;
66530+ const u8 __user *src = (const u8 __force_user *)addr;
66531 mm_segment_t old_fs = get_fs();
66532 if (!maxlen)
66533 return;
66534@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66535 pagefault_disable();
66536 do
66537 ret = __copy_from_user_inatomic(dst++, src++, 1);
66538- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
66539+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
66540 dst[-1] = '\0';
66541 pagefault_enable();
66542 set_fs(old_fs);
66543@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66544 ((u8 *)get_rloc_data(dest))[0] = '\0';
66545 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
66546 } else
66547- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
66548+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
66549 get_rloc_offs(*(u32 *)dest));
66550 }
66551 /* Return the length of string -- including null terminal byte */
66552@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66553 set_fs(KERNEL_DS);
66554 pagefault_disable();
66555 do {
66556- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
66557+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
66558 len++;
66559 } while (c && ret == 0 && len < MAX_STRING_SIZE);
66560 pagefault_enable();
66561diff -urNp linux-3.1.1/kernel/trace/trace_mmiotrace.c linux-3.1.1/kernel/trace/trace_mmiotrace.c
66562--- linux-3.1.1/kernel/trace/trace_mmiotrace.c 2011-11-11 15:19:27.000000000 -0500
66563+++ linux-3.1.1/kernel/trace/trace_mmiotrace.c 2011-11-16 18:39:08.000000000 -0500
66564@@ -24,7 +24,7 @@ struct header_iter {
66565 static struct trace_array *mmio_trace_array;
66566 static bool overrun_detected;
66567 static unsigned long prev_overruns;
66568-static atomic_t dropped_count;
66569+static atomic_unchecked_t dropped_count;
66570
66571 static void mmio_reset_data(struct trace_array *tr)
66572 {
66573@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
66574
66575 static unsigned long count_overruns(struct trace_iterator *iter)
66576 {
66577- unsigned long cnt = atomic_xchg(&dropped_count, 0);
66578+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
66579 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
66580
66581 if (over > prev_overruns)
66582@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
66583 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
66584 sizeof(*entry), 0, pc);
66585 if (!event) {
66586- atomic_inc(&dropped_count);
66587+ atomic_inc_unchecked(&dropped_count);
66588 return;
66589 }
66590 entry = ring_buffer_event_data(event);
66591@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
66592 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
66593 sizeof(*entry), 0, pc);
66594 if (!event) {
66595- atomic_inc(&dropped_count);
66596+ atomic_inc_unchecked(&dropped_count);
66597 return;
66598 }
66599 entry = ring_buffer_event_data(event);
66600diff -urNp linux-3.1.1/kernel/trace/trace_output.c linux-3.1.1/kernel/trace/trace_output.c
66601--- linux-3.1.1/kernel/trace/trace_output.c 2011-11-11 15:19:27.000000000 -0500
66602+++ linux-3.1.1/kernel/trace/trace_output.c 2011-11-16 18:39:08.000000000 -0500
66603@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
66604
66605 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
66606 if (!IS_ERR(p)) {
66607- p = mangle_path(s->buffer + s->len, p, "\n");
66608+ p = mangle_path(s->buffer + s->len, p, "\n\\");
66609 if (p) {
66610 s->len = p - s->buffer;
66611 return 1;
66612diff -urNp linux-3.1.1/kernel/trace/trace_stack.c linux-3.1.1/kernel/trace/trace_stack.c
66613--- linux-3.1.1/kernel/trace/trace_stack.c 2011-11-11 15:19:27.000000000 -0500
66614+++ linux-3.1.1/kernel/trace/trace_stack.c 2011-11-16 18:39:08.000000000 -0500
66615@@ -50,7 +50,7 @@ static inline void check_stack(void)
66616 return;
66617
66618 /* we do not handle interrupt stacks yet */
66619- if (!object_is_on_stack(&this_size))
66620+ if (!object_starts_on_stack(&this_size))
66621 return;
66622
66623 local_irq_save(flags);
66624diff -urNp linux-3.1.1/kernel/trace/trace_workqueue.c linux-3.1.1/kernel/trace/trace_workqueue.c
66625--- linux-3.1.1/kernel/trace/trace_workqueue.c 2011-11-11 15:19:27.000000000 -0500
66626+++ linux-3.1.1/kernel/trace/trace_workqueue.c 2011-11-16 18:39:08.000000000 -0500
66627@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
66628 int cpu;
66629 pid_t pid;
66630 /* Can be inserted from interrupt or user context, need to be atomic */
66631- atomic_t inserted;
66632+ atomic_unchecked_t inserted;
66633 /*
66634 * Don't need to be atomic, works are serialized in a single workqueue thread
66635 * on a single CPU.
66636@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
66637 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
66638 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
66639 if (node->pid == wq_thread->pid) {
66640- atomic_inc(&node->inserted);
66641+ atomic_inc_unchecked(&node->inserted);
66642 goto found;
66643 }
66644 }
66645@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
66646 tsk = get_pid_task(pid, PIDTYPE_PID);
66647 if (tsk) {
66648 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
66649- atomic_read(&cws->inserted), cws->executed,
66650+ atomic_read_unchecked(&cws->inserted), cws->executed,
66651 tsk->comm);
66652 put_task_struct(tsk);
66653 }
66654diff -urNp linux-3.1.1/lib/bitmap.c linux-3.1.1/lib/bitmap.c
66655--- linux-3.1.1/lib/bitmap.c 2011-11-11 15:19:27.000000000 -0500
66656+++ linux-3.1.1/lib/bitmap.c 2011-11-16 18:39:08.000000000 -0500
66657@@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsi
66658 {
66659 int c, old_c, totaldigits, ndigits, nchunks, nbits;
66660 u32 chunk;
66661- const char __user *ubuf = buf;
66662+ const char __user *ubuf = (const char __force_user *)buf;
66663
66664 bitmap_zero(maskp, nmaskbits);
66665
66666@@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user
66667 {
66668 if (!access_ok(VERIFY_READ, ubuf, ulen))
66669 return -EFAULT;
66670- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
66671+ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
66672 }
66673 EXPORT_SYMBOL(bitmap_parse_user);
66674
66675@@ -594,7 +594,7 @@ static int __bitmap_parselist(const char
66676 {
66677 unsigned a, b;
66678 int c, old_c, totaldigits;
66679- const char __user *ubuf = buf;
66680+ const char __user *ubuf = (const char __force_user *)buf;
66681 int exp_digit, in_range;
66682
66683 totaldigits = c = 0;
66684@@ -694,7 +694,7 @@ int bitmap_parselist_user(const char __u
66685 {
66686 if (!access_ok(VERIFY_READ, ubuf, ulen))
66687 return -EFAULT;
66688- return __bitmap_parselist((const char *)ubuf,
66689+ return __bitmap_parselist((const char __force_kernel *)ubuf,
66690 ulen, 1, maskp, nmaskbits);
66691 }
66692 EXPORT_SYMBOL(bitmap_parselist_user);
66693diff -urNp linux-3.1.1/lib/bug.c linux-3.1.1/lib/bug.c
66694--- linux-3.1.1/lib/bug.c 2011-11-11 15:19:27.000000000 -0500
66695+++ linux-3.1.1/lib/bug.c 2011-11-16 18:39:08.000000000 -0500
66696@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
66697 return BUG_TRAP_TYPE_NONE;
66698
66699 bug = find_bug(bugaddr);
66700+ if (!bug)
66701+ return BUG_TRAP_TYPE_NONE;
66702
66703 file = NULL;
66704 line = 0;
66705diff -urNp linux-3.1.1/lib/debugobjects.c linux-3.1.1/lib/debugobjects.c
66706--- linux-3.1.1/lib/debugobjects.c 2011-11-11 15:19:27.000000000 -0500
66707+++ linux-3.1.1/lib/debugobjects.c 2011-11-16 18:39:08.000000000 -0500
66708@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
66709 if (limit > 4)
66710 return;
66711
66712- is_on_stack = object_is_on_stack(addr);
66713+ is_on_stack = object_starts_on_stack(addr);
66714 if (is_on_stack == onstack)
66715 return;
66716
66717diff -urNp linux-3.1.1/lib/devres.c linux-3.1.1/lib/devres.c
66718--- linux-3.1.1/lib/devres.c 2011-11-11 15:19:27.000000000 -0500
66719+++ linux-3.1.1/lib/devres.c 2011-11-16 18:39:08.000000000 -0500
66720@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
66721 void devm_iounmap(struct device *dev, void __iomem *addr)
66722 {
66723 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
66724- (void *)addr));
66725+ (void __force *)addr));
66726 iounmap(addr);
66727 }
66728 EXPORT_SYMBOL(devm_iounmap);
66729@@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *de
66730 {
66731 ioport_unmap(addr);
66732 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
66733- devm_ioport_map_match, (void *)addr));
66734+ devm_ioport_map_match, (void __force *)addr));
66735 }
66736 EXPORT_SYMBOL(devm_ioport_unmap);
66737
66738diff -urNp linux-3.1.1/lib/dma-debug.c linux-3.1.1/lib/dma-debug.c
66739--- linux-3.1.1/lib/dma-debug.c 2011-11-11 15:19:27.000000000 -0500
66740+++ linux-3.1.1/lib/dma-debug.c 2011-11-16 18:39:08.000000000 -0500
66741@@ -870,7 +870,7 @@ out:
66742
66743 static void check_for_stack(struct device *dev, void *addr)
66744 {
66745- if (object_is_on_stack(addr))
66746+ if (object_starts_on_stack(addr))
66747 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
66748 "stack [addr=%p]\n", addr);
66749 }
66750diff -urNp linux-3.1.1/lib/extable.c linux-3.1.1/lib/extable.c
66751--- linux-3.1.1/lib/extable.c 2011-11-11 15:19:27.000000000 -0500
66752+++ linux-3.1.1/lib/extable.c 2011-11-16 18:39:08.000000000 -0500
66753@@ -13,6 +13,7 @@
66754 #include <linux/init.h>
66755 #include <linux/sort.h>
66756 #include <asm/uaccess.h>
66757+#include <asm/pgtable.h>
66758
66759 #ifndef ARCH_HAS_SORT_EXTABLE
66760 /*
66761@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const v
66762 void sort_extable(struct exception_table_entry *start,
66763 struct exception_table_entry *finish)
66764 {
66765+ pax_open_kernel();
66766 sort(start, finish - start, sizeof(struct exception_table_entry),
66767 cmp_ex, NULL);
66768+ pax_close_kernel();
66769 }
66770
66771 #ifdef CONFIG_MODULES
66772diff -urNp linux-3.1.1/lib/inflate.c linux-3.1.1/lib/inflate.c
66773--- linux-3.1.1/lib/inflate.c 2011-11-11 15:19:27.000000000 -0500
66774+++ linux-3.1.1/lib/inflate.c 2011-11-16 18:39:08.000000000 -0500
66775@@ -269,7 +269,7 @@ static void free(void *where)
66776 malloc_ptr = free_mem_ptr;
66777 }
66778 #else
66779-#define malloc(a) kmalloc(a, GFP_KERNEL)
66780+#define malloc(a) kmalloc((a), GFP_KERNEL)
66781 #define free(a) kfree(a)
66782 #endif
66783
66784diff -urNp linux-3.1.1/lib/Kconfig.debug linux-3.1.1/lib/Kconfig.debug
66785--- linux-3.1.1/lib/Kconfig.debug 2011-11-11 15:19:27.000000000 -0500
66786+++ linux-3.1.1/lib/Kconfig.debug 2011-11-16 18:40:44.000000000 -0500
66787@@ -1091,6 +1091,7 @@ config LATENCYTOP
66788 depends on DEBUG_KERNEL
66789 depends on STACKTRACE_SUPPORT
66790 depends on PROC_FS
66791+ depends on !GRKERNSEC_HIDESYM
66792 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
66793 select KALLSYMS
66794 select KALLSYMS_ALL
66795diff -urNp linux-3.1.1/lib/kref.c linux-3.1.1/lib/kref.c
66796--- linux-3.1.1/lib/kref.c 2011-11-11 15:19:27.000000000 -0500
66797+++ linux-3.1.1/lib/kref.c 2011-11-16 18:39:08.000000000 -0500
66798@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
66799 */
66800 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
66801 {
66802- WARN_ON(release == NULL);
66803+ BUG_ON(release == NULL);
66804 WARN_ON(release == (void (*)(struct kref *))kfree);
66805
66806 if (atomic_dec_and_test(&kref->refcount)) {
66807diff -urNp linux-3.1.1/lib/radix-tree.c linux-3.1.1/lib/radix-tree.c
66808--- linux-3.1.1/lib/radix-tree.c 2011-11-11 15:19:27.000000000 -0500
66809+++ linux-3.1.1/lib/radix-tree.c 2011-11-16 18:39:08.000000000 -0500
66810@@ -80,7 +80,7 @@ struct radix_tree_preload {
66811 int nr;
66812 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
66813 };
66814-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
66815+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
66816
66817 static inline void *ptr_to_indirect(void *ptr)
66818 {
66819diff -urNp linux-3.1.1/lib/vsprintf.c linux-3.1.1/lib/vsprintf.c
66820--- linux-3.1.1/lib/vsprintf.c 2011-11-11 15:19:27.000000000 -0500
66821+++ linux-3.1.1/lib/vsprintf.c 2011-11-16 18:40:44.000000000 -0500
66822@@ -16,6 +16,9 @@
66823 * - scnprintf and vscnprintf
66824 */
66825
66826+#ifdef CONFIG_GRKERNSEC_HIDESYM
66827+#define __INCLUDED_BY_HIDESYM 1
66828+#endif
66829 #include <stdarg.h>
66830 #include <linux/module.h>
66831 #include <linux/types.h>
66832@@ -432,7 +435,7 @@ char *symbol_string(char *buf, char *end
66833 char sym[KSYM_SYMBOL_LEN];
66834 if (ext == 'B')
66835 sprint_backtrace(sym, value);
66836- else if (ext != 'f' && ext != 's')
66837+ else if (ext != 'f' && ext != 's' && ext != 'a')
66838 sprint_symbol(sym, value);
66839 else
66840 kallsyms_lookup(value, NULL, NULL, NULL, sym);
66841@@ -796,7 +799,11 @@ char *uuid_string(char *buf, char *end,
66842 return string(buf, end, uuid, spec);
66843 }
66844
66845+#ifdef CONFIG_GRKERNSEC_HIDESYM
66846+int kptr_restrict __read_mostly = 2;
66847+#else
66848 int kptr_restrict __read_mostly;
66849+#endif
66850
66851 /*
66852 * Show a '%p' thing. A kernel extension is that the '%p' is followed
66853@@ -810,6 +817,8 @@ int kptr_restrict __read_mostly;
66854 * - 'S' For symbolic direct pointers with offset
66855 * - 's' For symbolic direct pointers without offset
66856 * - 'B' For backtraced symbolic direct pointers with offset
66857+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
66858+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
66859 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
66860 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
66861 * - 'M' For a 6-byte MAC address, it prints the address in the
66862@@ -854,12 +863,12 @@ char *pointer(const char *fmt, char *buf
66863 {
66864 if (!ptr && *fmt != 'K') {
66865 /*
66866- * Print (null) with the same width as a pointer so it makes
66867+ * Print (nil) with the same width as a pointer so it makes
66868 * tabular output look nice.
66869 */
66870 if (spec.field_width == -1)
66871 spec.field_width = 2 * sizeof(void *);
66872- return string(buf, end, "(null)", spec);
66873+ return string(buf, end, "(nil)", spec);
66874 }
66875
66876 switch (*fmt) {
66877@@ -869,6 +878,13 @@ char *pointer(const char *fmt, char *buf
66878 /* Fallthrough */
66879 case 'S':
66880 case 's':
66881+#ifdef CONFIG_GRKERNSEC_HIDESYM
66882+ break;
66883+#else
66884+ return symbol_string(buf, end, ptr, spec, *fmt);
66885+#endif
66886+ case 'A':
66887+ case 'a':
66888 case 'B':
66889 return symbol_string(buf, end, ptr, spec, *fmt);
66890 case 'R':
66891@@ -1627,11 +1643,11 @@ int bstr_printf(char *buf, size_t size,
66892 typeof(type) value; \
66893 if (sizeof(type) == 8) { \
66894 args = PTR_ALIGN(args, sizeof(u32)); \
66895- *(u32 *)&value = *(u32 *)args; \
66896- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
66897+ *(u32 *)&value = *(const u32 *)args; \
66898+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
66899 } else { \
66900 args = PTR_ALIGN(args, sizeof(type)); \
66901- value = *(typeof(type) *)args; \
66902+ value = *(const typeof(type) *)args; \
66903 } \
66904 args += sizeof(type); \
66905 value; \
66906@@ -1694,7 +1710,7 @@ int bstr_printf(char *buf, size_t size,
66907 case FORMAT_TYPE_STR: {
66908 const char *str_arg = args;
66909 args += strlen(str_arg) + 1;
66910- str = string(str, end, (char *)str_arg, spec);
66911+ str = string(str, end, str_arg, spec);
66912 break;
66913 }
66914
66915diff -urNp linux-3.1.1/localversion-grsec linux-3.1.1/localversion-grsec
66916--- linux-3.1.1/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
66917+++ linux-3.1.1/localversion-grsec 2011-11-16 18:40:44.000000000 -0500
66918@@ -0,0 +1 @@
66919+-grsec
66920diff -urNp linux-3.1.1/Makefile linux-3.1.1/Makefile
66921--- linux-3.1.1/Makefile 2011-11-11 15:19:27.000000000 -0500
66922+++ linux-3.1.1/Makefile 2011-11-17 18:56:01.000000000 -0500
66923@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
66924
66925 HOSTCC = gcc
66926 HOSTCXX = g++
66927-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
66928-HOSTCXXFLAGS = -O2
66929+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
66930+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
66931+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
66932
66933 # Decide whether to build built-in, modular, or both.
66934 # Normally, just do built-in.
66935@@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
66936 # Rules shared between *config targets and build targets
66937
66938 # Basic helpers built in scripts/
66939-PHONY += scripts_basic
66940-scripts_basic:
66941+PHONY += scripts_basic gcc-plugins
66942+scripts_basic: gcc-plugins
66943 $(Q)$(MAKE) $(build)=scripts/basic
66944 $(Q)rm -f .tmp_quiet_recordmcount
66945
66946@@ -564,6 +565,37 @@ else
66947 KBUILD_CFLAGS += -O2
66948 endif
66949
66950+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
66951+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
66952+ifdef CONFIG_PAX_MEMORY_STACKLEAK
66953+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
66954+STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
66955+endif
66956+ifdef CONFIG_KALLOCSTAT_PLUGIN
66957+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
66958+endif
66959+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
66960+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
66961+endif
66962+ifdef CONFIG_CHECKER_PLUGIN
66963+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
66964+CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
66965+endif
66966+endif
66967+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
66968+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
66969+gcc-plugins:
66970+ $(Q)$(MAKE) $(build)=tools/gcc
66971+else
66972+gcc-plugins:
66973+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
66974+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
66975+else
66976+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
66977+endif
66978+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
66979+endif
66980+
66981 include $(srctree)/arch/$(SRCARCH)/Makefile
66982
66983 ifneq ($(CONFIG_FRAME_WARN),0)
66984@@ -708,7 +740,7 @@ export mod_strip_cmd
66985
66986
66987 ifeq ($(KBUILD_EXTMOD),)
66988-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
66989+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
66990
66991 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
66992 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
66993@@ -932,6 +964,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-mai
66994
66995 # The actual objects are generated when descending,
66996 # make sure no implicit rule kicks in
66997+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
66998 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
66999
67000 # Handle descending into subdirectories listed in $(vmlinux-dirs)
67001@@ -941,7 +974,7 @@ $(sort $(vmlinux-init) $(vmlinux-main))
67002 # Error messages still appears in the original language
67003
67004 PHONY += $(vmlinux-dirs)
67005-$(vmlinux-dirs): prepare scripts
67006+$(vmlinux-dirs): gcc-plugins prepare scripts
67007 $(Q)$(MAKE) $(build)=$@
67008
67009 # Store (new) KERNELRELASE string in include/config/kernel.release
67010@@ -986,6 +1019,7 @@ prepare0: archprepare FORCE
67011 $(Q)$(MAKE) $(build)=. missing-syscalls
67012
67013 # All the preparing..
67014+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
67015 prepare: prepare0
67016
67017 # Generate some files
67018@@ -1087,6 +1121,7 @@ all: modules
67019 # using awk while concatenating to the final file.
67020
67021 PHONY += modules
67022+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
67023 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
67024 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
67025 @$(kecho) ' Building modules, stage 2.';
67026@@ -1102,7 +1137,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modu
67027
67028 # Target to prepare building external modules
67029 PHONY += modules_prepare
67030-modules_prepare: prepare scripts
67031+modules_prepare: gcc-plugins prepare scripts
67032
67033 # Target to install modules
67034 PHONY += modules_install
67035@@ -1198,7 +1233,7 @@ distclean: mrproper
67036 @find $(srctree) $(RCS_FIND_IGNORE) \
67037 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
67038 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
67039- -o -name '.*.rej' -o -size 0 \
67040+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
67041 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
67042 -type f -print | xargs rm -f
67043
67044@@ -1360,6 +1395,7 @@ PHONY += $(module-dirs) modules
67045 $(module-dirs): crmodverdir $(objtree)/Module.symvers
67046 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
67047
67048+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
67049 modules: $(module-dirs)
67050 @$(kecho) ' Building modules, stage 2.';
67051 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
67052@@ -1486,17 +1522,19 @@ else
67053 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
67054 endif
67055
67056-%.s: %.c prepare scripts FORCE
67057+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
67058+%.s: %.c gcc-plugins prepare scripts FORCE
67059 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67060 %.i: %.c prepare scripts FORCE
67061 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67062-%.o: %.c prepare scripts FORCE
67063+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
67064+%.o: %.c gcc-plugins prepare scripts FORCE
67065 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67066 %.lst: %.c prepare scripts FORCE
67067 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67068-%.s: %.S prepare scripts FORCE
67069+%.s: %.S gcc-plugins prepare scripts FORCE
67070 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67071-%.o: %.S prepare scripts FORCE
67072+%.o: %.S gcc-plugins prepare scripts FORCE
67073 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67074 %.symtypes: %.c prepare scripts FORCE
67075 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67076@@ -1506,11 +1544,13 @@ endif
67077 $(cmd_crmodverdir)
67078 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
67079 $(build)=$(build-dir)
67080-%/: prepare scripts FORCE
67081+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
67082+%/: gcc-plugins prepare scripts FORCE
67083 $(cmd_crmodverdir)
67084 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
67085 $(build)=$(build-dir)
67086-%.ko: prepare scripts FORCE
67087+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
67088+%.ko: gcc-plugins prepare scripts FORCE
67089 $(cmd_crmodverdir)
67090 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
67091 $(build)=$(build-dir) $(@:.ko=.o)
67092diff -urNp linux-3.1.1/mm/filemap.c linux-3.1.1/mm/filemap.c
67093--- linux-3.1.1/mm/filemap.c 2011-11-11 15:19:27.000000000 -0500
67094+++ linux-3.1.1/mm/filemap.c 2011-11-16 18:40:44.000000000 -0500
67095@@ -1784,7 +1784,7 @@ int generic_file_mmap(struct file * file
67096 struct address_space *mapping = file->f_mapping;
67097
67098 if (!mapping->a_ops->readpage)
67099- return -ENOEXEC;
67100+ return -ENODEV;
67101 file_accessed(file);
67102 vma->vm_ops = &generic_file_vm_ops;
67103 vma->vm_flags |= VM_CAN_NONLINEAR;
67104@@ -2190,6 +2190,7 @@ inline int generic_write_checks(struct f
67105 *pos = i_size_read(inode);
67106
67107 if (limit != RLIM_INFINITY) {
67108+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67109 if (*pos >= limit) {
67110 send_sig(SIGXFSZ, current, 0);
67111 return -EFBIG;
67112diff -urNp linux-3.1.1/mm/fremap.c linux-3.1.1/mm/fremap.c
67113--- linux-3.1.1/mm/fremap.c 2011-11-11 15:19:27.000000000 -0500
67114+++ linux-3.1.1/mm/fremap.c 2011-11-16 18:39:08.000000000 -0500
67115@@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
67116 retry:
67117 vma = find_vma(mm, start);
67118
67119+#ifdef CONFIG_PAX_SEGMEXEC
67120+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67121+ goto out;
67122+#endif
67123+
67124 /*
67125 * Make sure the vma is shared, that it supports prefaulting,
67126 * and that the remapped range is valid and fully within
67127diff -urNp linux-3.1.1/mm/highmem.c linux-3.1.1/mm/highmem.c
67128--- linux-3.1.1/mm/highmem.c 2011-11-11 15:19:27.000000000 -0500
67129+++ linux-3.1.1/mm/highmem.c 2011-11-16 18:39:08.000000000 -0500
67130@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
67131 * So no dangers, even with speculative execution.
67132 */
67133 page = pte_page(pkmap_page_table[i]);
67134+ pax_open_kernel();
67135 pte_clear(&init_mm, (unsigned long)page_address(page),
67136 &pkmap_page_table[i]);
67137-
67138+ pax_close_kernel();
67139 set_page_address(page, NULL);
67140 need_flush = 1;
67141 }
67142@@ -186,9 +187,11 @@ start:
67143 }
67144 }
67145 vaddr = PKMAP_ADDR(last_pkmap_nr);
67146+
67147+ pax_open_kernel();
67148 set_pte_at(&init_mm, vaddr,
67149 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67150-
67151+ pax_close_kernel();
67152 pkmap_count[last_pkmap_nr] = 1;
67153 set_page_address(page, (void *)vaddr);
67154
67155diff -urNp linux-3.1.1/mm/huge_memory.c linux-3.1.1/mm/huge_memory.c
67156--- linux-3.1.1/mm/huge_memory.c 2011-11-11 15:19:27.000000000 -0500
67157+++ linux-3.1.1/mm/huge_memory.c 2011-11-16 18:39:08.000000000 -0500
67158@@ -702,7 +702,7 @@ out:
67159 * run pte_offset_map on the pmd, if an huge pmd could
67160 * materialize from under us from a different thread.
67161 */
67162- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
67163+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
67164 return VM_FAULT_OOM;
67165 /* if an huge pmd materialized from under us just retry later */
67166 if (unlikely(pmd_trans_huge(*pmd)))
67167diff -urNp linux-3.1.1/mm/hugetlb.c linux-3.1.1/mm/hugetlb.c
67168--- linux-3.1.1/mm/hugetlb.c 2011-11-11 15:19:27.000000000 -0500
67169+++ linux-3.1.1/mm/hugetlb.c 2011-11-16 18:39:08.000000000 -0500
67170@@ -2346,6 +2346,27 @@ static int unmap_ref_private(struct mm_s
67171 return 1;
67172 }
67173
67174+#ifdef CONFIG_PAX_SEGMEXEC
67175+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67176+{
67177+ struct mm_struct *mm = vma->vm_mm;
67178+ struct vm_area_struct *vma_m;
67179+ unsigned long address_m;
67180+ pte_t *ptep_m;
67181+
67182+ vma_m = pax_find_mirror_vma(vma);
67183+ if (!vma_m)
67184+ return;
67185+
67186+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67187+ address_m = address + SEGMEXEC_TASK_SIZE;
67188+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67189+ get_page(page_m);
67190+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
67191+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67192+}
67193+#endif
67194+
67195 /*
67196 * Hugetlb_cow() should be called with page lock of the original hugepage held.
67197 */
67198@@ -2447,6 +2468,11 @@ retry_avoidcopy:
67199 make_huge_pte(vma, new_page, 1));
67200 page_remove_rmap(old_page);
67201 hugepage_add_new_anon_rmap(new_page, vma, address);
67202+
67203+#ifdef CONFIG_PAX_SEGMEXEC
67204+ pax_mirror_huge_pte(vma, address, new_page);
67205+#endif
67206+
67207 /* Make the old page be freed below */
67208 new_page = old_page;
67209 mmu_notifier_invalidate_range_end(mm,
67210@@ -2598,6 +2624,10 @@ retry:
67211 && (vma->vm_flags & VM_SHARED)));
67212 set_huge_pte_at(mm, address, ptep, new_pte);
67213
67214+#ifdef CONFIG_PAX_SEGMEXEC
67215+ pax_mirror_huge_pte(vma, address, page);
67216+#endif
67217+
67218 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67219 /* Optimization, do the COW without a second fault */
67220 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67221@@ -2627,6 +2657,10 @@ int hugetlb_fault(struct mm_struct *mm,
67222 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67223 struct hstate *h = hstate_vma(vma);
67224
67225+#ifdef CONFIG_PAX_SEGMEXEC
67226+ struct vm_area_struct *vma_m;
67227+#endif
67228+
67229 ptep = huge_pte_offset(mm, address);
67230 if (ptep) {
67231 entry = huge_ptep_get(ptep);
67232@@ -2638,6 +2672,26 @@ int hugetlb_fault(struct mm_struct *mm,
67233 VM_FAULT_SET_HINDEX(h - hstates);
67234 }
67235
67236+#ifdef CONFIG_PAX_SEGMEXEC
67237+ vma_m = pax_find_mirror_vma(vma);
67238+ if (vma_m) {
67239+ unsigned long address_m;
67240+
67241+ if (vma->vm_start > vma_m->vm_start) {
67242+ address_m = address;
67243+ address -= SEGMEXEC_TASK_SIZE;
67244+ vma = vma_m;
67245+ h = hstate_vma(vma);
67246+ } else
67247+ address_m = address + SEGMEXEC_TASK_SIZE;
67248+
67249+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67250+ return VM_FAULT_OOM;
67251+ address_m &= HPAGE_MASK;
67252+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67253+ }
67254+#endif
67255+
67256 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67257 if (!ptep)
67258 return VM_FAULT_OOM;
67259diff -urNp linux-3.1.1/mm/internal.h linux-3.1.1/mm/internal.h
67260--- linux-3.1.1/mm/internal.h 2011-11-11 15:19:27.000000000 -0500
67261+++ linux-3.1.1/mm/internal.h 2011-11-16 18:39:08.000000000 -0500
67262@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page
67263 * in mm/page_alloc.c
67264 */
67265 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67266+extern void free_compound_page(struct page *page);
67267 extern void prep_compound_page(struct page *page, unsigned long order);
67268 #ifdef CONFIG_MEMORY_FAILURE
67269 extern bool is_free_buddy_page(struct page *page);
67270diff -urNp linux-3.1.1/mm/Kconfig linux-3.1.1/mm/Kconfig
67271--- linux-3.1.1/mm/Kconfig 2011-11-11 15:19:27.000000000 -0500
67272+++ linux-3.1.1/mm/Kconfig 2011-11-17 18:57:00.000000000 -0500
67273@@ -238,10 +238,10 @@ config KSM
67274 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
67275
67276 config DEFAULT_MMAP_MIN_ADDR
67277- int "Low address space to protect from user allocation"
67278+ int "Low address space to protect from user allocation"
67279 depends on MMU
67280- default 4096
67281- help
67282+ default 65536
67283+ help
67284 This is the portion of low virtual memory which should be protected
67285 from userspace allocation. Keeping a user from writing to low pages
67286 can help reduce the impact of kernel NULL pointer bugs.
67287diff -urNp linux-3.1.1/mm/kmemleak.c linux-3.1.1/mm/kmemleak.c
67288--- linux-3.1.1/mm/kmemleak.c 2011-11-11 15:19:27.000000000 -0500
67289+++ linux-3.1.1/mm/kmemleak.c 2011-11-16 18:40:44.000000000 -0500
67290@@ -357,7 +357,7 @@ static void print_unreferenced(struct se
67291
67292 for (i = 0; i < object->trace_len; i++) {
67293 void *ptr = (void *)object->trace[i];
67294- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67295+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67296 }
67297 }
67298
67299diff -urNp linux-3.1.1/mm/maccess.c linux-3.1.1/mm/maccess.c
67300--- linux-3.1.1/mm/maccess.c 2011-11-11 15:19:27.000000000 -0500
67301+++ linux-3.1.1/mm/maccess.c 2011-11-16 18:39:08.000000000 -0500
67302@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, cons
67303 set_fs(KERNEL_DS);
67304 pagefault_disable();
67305 ret = __copy_from_user_inatomic(dst,
67306- (__force const void __user *)src, size);
67307+ (const void __force_user *)src, size);
67308 pagefault_enable();
67309 set_fs(old_fs);
67310
67311@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, con
67312
67313 set_fs(KERNEL_DS);
67314 pagefault_disable();
67315- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
67316+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
67317 pagefault_enable();
67318 set_fs(old_fs);
67319
67320diff -urNp linux-3.1.1/mm/madvise.c linux-3.1.1/mm/madvise.c
67321--- linux-3.1.1/mm/madvise.c 2011-11-11 15:19:27.000000000 -0500
67322+++ linux-3.1.1/mm/madvise.c 2011-11-16 18:39:08.000000000 -0500
67323@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
67324 pgoff_t pgoff;
67325 unsigned long new_flags = vma->vm_flags;
67326
67327+#ifdef CONFIG_PAX_SEGMEXEC
67328+ struct vm_area_struct *vma_m;
67329+#endif
67330+
67331 switch (behavior) {
67332 case MADV_NORMAL:
67333 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67334@@ -110,6 +114,13 @@ success:
67335 /*
67336 * vm_flags is protected by the mmap_sem held in write mode.
67337 */
67338+
67339+#ifdef CONFIG_PAX_SEGMEXEC
67340+ vma_m = pax_find_mirror_vma(vma);
67341+ if (vma_m)
67342+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67343+#endif
67344+
67345 vma->vm_flags = new_flags;
67346
67347 out:
67348@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
67349 struct vm_area_struct ** prev,
67350 unsigned long start, unsigned long end)
67351 {
67352+
67353+#ifdef CONFIG_PAX_SEGMEXEC
67354+ struct vm_area_struct *vma_m;
67355+#endif
67356+
67357 *prev = vma;
67358 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67359 return -EINVAL;
67360@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
67361 zap_page_range(vma, start, end - start, &details);
67362 } else
67363 zap_page_range(vma, start, end - start, NULL);
67364+
67365+#ifdef CONFIG_PAX_SEGMEXEC
67366+ vma_m = pax_find_mirror_vma(vma);
67367+ if (vma_m) {
67368+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67369+ struct zap_details details = {
67370+ .nonlinear_vma = vma_m,
67371+ .last_index = ULONG_MAX,
67372+ };
67373+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67374+ } else
67375+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67376+ }
67377+#endif
67378+
67379 return 0;
67380 }
67381
67382@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
67383 if (end < start)
67384 goto out;
67385
67386+#ifdef CONFIG_PAX_SEGMEXEC
67387+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67388+ if (end > SEGMEXEC_TASK_SIZE)
67389+ goto out;
67390+ } else
67391+#endif
67392+
67393+ if (end > TASK_SIZE)
67394+ goto out;
67395+
67396 error = 0;
67397 if (end == start)
67398 goto out;
67399diff -urNp linux-3.1.1/mm/memory.c linux-3.1.1/mm/memory.c
67400--- linux-3.1.1/mm/memory.c 2011-11-11 15:19:27.000000000 -0500
67401+++ linux-3.1.1/mm/memory.c 2011-11-16 18:39:08.000000000 -0500
67402@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct
67403 return;
67404
67405 pmd = pmd_offset(pud, start);
67406+
67407+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67408 pud_clear(pud);
67409 pmd_free_tlb(tlb, pmd, start);
67410+#endif
67411+
67412 }
67413
67414 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67415@@ -489,9 +493,12 @@ static inline void free_pud_range(struct
67416 if (end - 1 > ceiling - 1)
67417 return;
67418
67419+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67420 pud = pud_offset(pgd, start);
67421 pgd_clear(pgd);
67422 pud_free_tlb(tlb, pud, start);
67423+#endif
67424+
67425 }
67426
67427 /*
67428@@ -1566,12 +1573,6 @@ no_page_table:
67429 return page;
67430 }
67431
67432-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
67433-{
67434- return stack_guard_page_start(vma, addr) ||
67435- stack_guard_page_end(vma, addr+PAGE_SIZE);
67436-}
67437-
67438 /**
67439 * __get_user_pages() - pin user pages in memory
67440 * @tsk: task_struct of target task
67441@@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct
67442 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67443 i = 0;
67444
67445- do {
67446+ while (nr_pages) {
67447 struct vm_area_struct *vma;
67448
67449- vma = find_extend_vma(mm, start);
67450+ vma = find_vma(mm, start);
67451 if (!vma && in_gate_area(mm, start)) {
67452 unsigned long pg = start & PAGE_MASK;
67453 pgd_t *pgd;
67454@@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct
67455 goto next_page;
67456 }
67457
67458- if (!vma ||
67459+ if (!vma || start < vma->vm_start ||
67460 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67461 !(vm_flags & vma->vm_flags))
67462 return i ? : -EFAULT;
67463@@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct
67464 int ret;
67465 unsigned int fault_flags = 0;
67466
67467- /* For mlock, just skip the stack guard page. */
67468- if (foll_flags & FOLL_MLOCK) {
67469- if (stack_guard_page(vma, start))
67470- goto next_page;
67471- }
67472 if (foll_flags & FOLL_WRITE)
67473 fault_flags |= FAULT_FLAG_WRITE;
67474 if (nonblocking)
67475@@ -1800,7 +1796,7 @@ next_page:
67476 start += PAGE_SIZE;
67477 nr_pages--;
67478 } while (nr_pages && start < vma->vm_end);
67479- } while (nr_pages);
67480+ }
67481 return i;
67482 }
67483 EXPORT_SYMBOL(__get_user_pages);
67484@@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_st
67485 page_add_file_rmap(page);
67486 set_pte_at(mm, addr, pte, mk_pte(page, prot));
67487
67488+#ifdef CONFIG_PAX_SEGMEXEC
67489+ pax_mirror_file_pte(vma, addr, page, ptl);
67490+#endif
67491+
67492 retval = 0;
67493 pte_unmap_unlock(pte, ptl);
67494 return retval;
67495@@ -2041,10 +2041,22 @@ out:
67496 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
67497 struct page *page)
67498 {
67499+
67500+#ifdef CONFIG_PAX_SEGMEXEC
67501+ struct vm_area_struct *vma_m;
67502+#endif
67503+
67504 if (addr < vma->vm_start || addr >= vma->vm_end)
67505 return -EFAULT;
67506 if (!page_count(page))
67507 return -EINVAL;
67508+
67509+#ifdef CONFIG_PAX_SEGMEXEC
67510+ vma_m = pax_find_mirror_vma(vma);
67511+ if (vma_m)
67512+ vma_m->vm_flags |= VM_INSERTPAGE;
67513+#endif
67514+
67515 vma->vm_flags |= VM_INSERTPAGE;
67516 return insert_page(vma, addr, page, vma->vm_page_prot);
67517 }
67518@@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struc
67519 unsigned long pfn)
67520 {
67521 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
67522+ BUG_ON(vma->vm_mirror);
67523
67524 if (addr < vma->vm_start || addr >= vma->vm_end)
67525 return -EFAULT;
67526@@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct
67527 copy_user_highpage(dst, src, va, vma);
67528 }
67529
67530+#ifdef CONFIG_PAX_SEGMEXEC
67531+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
67532+{
67533+ struct mm_struct *mm = vma->vm_mm;
67534+ spinlock_t *ptl;
67535+ pte_t *pte, entry;
67536+
67537+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
67538+ entry = *pte;
67539+ if (!pte_present(entry)) {
67540+ if (!pte_none(entry)) {
67541+ BUG_ON(pte_file(entry));
67542+ free_swap_and_cache(pte_to_swp_entry(entry));
67543+ pte_clear_not_present_full(mm, address, pte, 0);
67544+ }
67545+ } else {
67546+ struct page *page;
67547+
67548+ flush_cache_page(vma, address, pte_pfn(entry));
67549+ entry = ptep_clear_flush(vma, address, pte);
67550+ BUG_ON(pte_dirty(entry));
67551+ page = vm_normal_page(vma, address, entry);
67552+ if (page) {
67553+ update_hiwater_rss(mm);
67554+ if (PageAnon(page))
67555+ dec_mm_counter_fast(mm, MM_ANONPAGES);
67556+ else
67557+ dec_mm_counter_fast(mm, MM_FILEPAGES);
67558+ page_remove_rmap(page);
67559+ page_cache_release(page);
67560+ }
67561+ }
67562+ pte_unmap_unlock(pte, ptl);
67563+}
67564+
67565+/* PaX: if vma is mirrored, synchronize the mirror's PTE
67566+ *
67567+ * the ptl of the lower mapped page is held on entry and is not released on exit
67568+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
67569+ */
67570+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67571+{
67572+ struct mm_struct *mm = vma->vm_mm;
67573+ unsigned long address_m;
67574+ spinlock_t *ptl_m;
67575+ struct vm_area_struct *vma_m;
67576+ pmd_t *pmd_m;
67577+ pte_t *pte_m, entry_m;
67578+
67579+ BUG_ON(!page_m || !PageAnon(page_m));
67580+
67581+ vma_m = pax_find_mirror_vma(vma);
67582+ if (!vma_m)
67583+ return;
67584+
67585+ BUG_ON(!PageLocked(page_m));
67586+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67587+ address_m = address + SEGMEXEC_TASK_SIZE;
67588+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67589+ pte_m = pte_offset_map(pmd_m, address_m);
67590+ ptl_m = pte_lockptr(mm, pmd_m);
67591+ if (ptl != ptl_m) {
67592+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67593+ if (!pte_none(*pte_m))
67594+ goto out;
67595+ }
67596+
67597+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67598+ page_cache_get(page_m);
67599+ page_add_anon_rmap(page_m, vma_m, address_m);
67600+ inc_mm_counter_fast(mm, MM_ANONPAGES);
67601+ set_pte_at(mm, address_m, pte_m, entry_m);
67602+ update_mmu_cache(vma_m, address_m, entry_m);
67603+out:
67604+ if (ptl != ptl_m)
67605+ spin_unlock(ptl_m);
67606+ pte_unmap(pte_m);
67607+ unlock_page(page_m);
67608+}
67609+
67610+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67611+{
67612+ struct mm_struct *mm = vma->vm_mm;
67613+ unsigned long address_m;
67614+ spinlock_t *ptl_m;
67615+ struct vm_area_struct *vma_m;
67616+ pmd_t *pmd_m;
67617+ pte_t *pte_m, entry_m;
67618+
67619+ BUG_ON(!page_m || PageAnon(page_m));
67620+
67621+ vma_m = pax_find_mirror_vma(vma);
67622+ if (!vma_m)
67623+ return;
67624+
67625+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67626+ address_m = address + SEGMEXEC_TASK_SIZE;
67627+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67628+ pte_m = pte_offset_map(pmd_m, address_m);
67629+ ptl_m = pte_lockptr(mm, pmd_m);
67630+ if (ptl != ptl_m) {
67631+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67632+ if (!pte_none(*pte_m))
67633+ goto out;
67634+ }
67635+
67636+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67637+ page_cache_get(page_m);
67638+ page_add_file_rmap(page_m);
67639+ inc_mm_counter_fast(mm, MM_FILEPAGES);
67640+ set_pte_at(mm, address_m, pte_m, entry_m);
67641+ update_mmu_cache(vma_m, address_m, entry_m);
67642+out:
67643+ if (ptl != ptl_m)
67644+ spin_unlock(ptl_m);
67645+ pte_unmap(pte_m);
67646+}
67647+
67648+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
67649+{
67650+ struct mm_struct *mm = vma->vm_mm;
67651+ unsigned long address_m;
67652+ spinlock_t *ptl_m;
67653+ struct vm_area_struct *vma_m;
67654+ pmd_t *pmd_m;
67655+ pte_t *pte_m, entry_m;
67656+
67657+ vma_m = pax_find_mirror_vma(vma);
67658+ if (!vma_m)
67659+ return;
67660+
67661+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67662+ address_m = address + SEGMEXEC_TASK_SIZE;
67663+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67664+ pte_m = pte_offset_map(pmd_m, address_m);
67665+ ptl_m = pte_lockptr(mm, pmd_m);
67666+ if (ptl != ptl_m) {
67667+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67668+ if (!pte_none(*pte_m))
67669+ goto out;
67670+ }
67671+
67672+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
67673+ set_pte_at(mm, address_m, pte_m, entry_m);
67674+out:
67675+ if (ptl != ptl_m)
67676+ spin_unlock(ptl_m);
67677+ pte_unmap(pte_m);
67678+}
67679+
67680+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
67681+{
67682+ struct page *page_m;
67683+ pte_t entry;
67684+
67685+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
67686+ goto out;
67687+
67688+ entry = *pte;
67689+ page_m = vm_normal_page(vma, address, entry);
67690+ if (!page_m)
67691+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
67692+ else if (PageAnon(page_m)) {
67693+ if (pax_find_mirror_vma(vma)) {
67694+ pte_unmap_unlock(pte, ptl);
67695+ lock_page(page_m);
67696+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
67697+ if (pte_same(entry, *pte))
67698+ pax_mirror_anon_pte(vma, address, page_m, ptl);
67699+ else
67700+ unlock_page(page_m);
67701+ }
67702+ } else
67703+ pax_mirror_file_pte(vma, address, page_m, ptl);
67704+
67705+out:
67706+ pte_unmap_unlock(pte, ptl);
67707+}
67708+#endif
67709+
67710 /*
67711 * This routine handles present pages, when users try to write
67712 * to a shared page. It is done by copying the page to a new address
67713@@ -2656,6 +2849,12 @@ gotten:
67714 */
67715 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67716 if (likely(pte_same(*page_table, orig_pte))) {
67717+
67718+#ifdef CONFIG_PAX_SEGMEXEC
67719+ if (pax_find_mirror_vma(vma))
67720+ BUG_ON(!trylock_page(new_page));
67721+#endif
67722+
67723 if (old_page) {
67724 if (!PageAnon(old_page)) {
67725 dec_mm_counter_fast(mm, MM_FILEPAGES);
67726@@ -2707,6 +2906,10 @@ gotten:
67727 page_remove_rmap(old_page);
67728 }
67729
67730+#ifdef CONFIG_PAX_SEGMEXEC
67731+ pax_mirror_anon_pte(vma, address, new_page, ptl);
67732+#endif
67733+
67734 /* Free the old page.. */
67735 new_page = old_page;
67736 ret |= VM_FAULT_WRITE;
67737@@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct
67738 swap_free(entry);
67739 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
67740 try_to_free_swap(page);
67741+
67742+#ifdef CONFIG_PAX_SEGMEXEC
67743+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
67744+#endif
67745+
67746 unlock_page(page);
67747 if (swapcache) {
67748 /*
67749@@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct
67750
67751 /* No need to invalidate - it was non-present before */
67752 update_mmu_cache(vma, address, page_table);
67753+
67754+#ifdef CONFIG_PAX_SEGMEXEC
67755+ pax_mirror_anon_pte(vma, address, page, ptl);
67756+#endif
67757+
67758 unlock:
67759 pte_unmap_unlock(page_table, ptl);
67760 out:
67761@@ -3028,40 +3241,6 @@ out_release:
67762 }
67763
67764 /*
67765- * This is like a special single-page "expand_{down|up}wards()",
67766- * except we must first make sure that 'address{-|+}PAGE_SIZE'
67767- * doesn't hit another vma.
67768- */
67769-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
67770-{
67771- address &= PAGE_MASK;
67772- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
67773- struct vm_area_struct *prev = vma->vm_prev;
67774-
67775- /*
67776- * Is there a mapping abutting this one below?
67777- *
67778- * That's only ok if it's the same stack mapping
67779- * that has gotten split..
67780- */
67781- if (prev && prev->vm_end == address)
67782- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
67783-
67784- expand_downwards(vma, address - PAGE_SIZE);
67785- }
67786- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
67787- struct vm_area_struct *next = vma->vm_next;
67788-
67789- /* As VM_GROWSDOWN but s/below/above/ */
67790- if (next && next->vm_start == address + PAGE_SIZE)
67791- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
67792-
67793- expand_upwards(vma, address + PAGE_SIZE);
67794- }
67795- return 0;
67796-}
67797-
67798-/*
67799 * We enter with non-exclusive mmap_sem (to exclude vma changes,
67800 * but allow concurrent faults), and pte mapped but not yet locked.
67801 * We return with mmap_sem still held, but pte unmapped and unlocked.
67802@@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_s
67803 unsigned long address, pte_t *page_table, pmd_t *pmd,
67804 unsigned int flags)
67805 {
67806- struct page *page;
67807+ struct page *page = NULL;
67808 spinlock_t *ptl;
67809 pte_t entry;
67810
67811- pte_unmap(page_table);
67812-
67813- /* Check if we need to add a guard page to the stack */
67814- if (check_stack_guard_page(vma, address) < 0)
67815- return VM_FAULT_SIGBUS;
67816-
67817- /* Use the zero-page for reads */
67818 if (!(flags & FAULT_FLAG_WRITE)) {
67819 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
67820 vma->vm_page_prot));
67821- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67822+ ptl = pte_lockptr(mm, pmd);
67823+ spin_lock(ptl);
67824 if (!pte_none(*page_table))
67825 goto unlock;
67826 goto setpte;
67827 }
67828
67829 /* Allocate our own private page. */
67830+ pte_unmap(page_table);
67831+
67832 if (unlikely(anon_vma_prepare(vma)))
67833 goto oom;
67834 page = alloc_zeroed_user_highpage_movable(vma, address);
67835@@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_s
67836 if (!pte_none(*page_table))
67837 goto release;
67838
67839+#ifdef CONFIG_PAX_SEGMEXEC
67840+ if (pax_find_mirror_vma(vma))
67841+ BUG_ON(!trylock_page(page));
67842+#endif
67843+
67844 inc_mm_counter_fast(mm, MM_ANONPAGES);
67845 page_add_new_anon_rmap(page, vma, address);
67846 setpte:
67847@@ -3116,6 +3296,12 @@ setpte:
67848
67849 /* No need to invalidate - it was non-present before */
67850 update_mmu_cache(vma, address, page_table);
67851+
67852+#ifdef CONFIG_PAX_SEGMEXEC
67853+ if (page)
67854+ pax_mirror_anon_pte(vma, address, page, ptl);
67855+#endif
67856+
67857 unlock:
67858 pte_unmap_unlock(page_table, ptl);
67859 return 0;
67860@@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *
67861 */
67862 /* Only go through if we didn't race with anybody else... */
67863 if (likely(pte_same(*page_table, orig_pte))) {
67864+
67865+#ifdef CONFIG_PAX_SEGMEXEC
67866+ if (anon && pax_find_mirror_vma(vma))
67867+ BUG_ON(!trylock_page(page));
67868+#endif
67869+
67870 flush_icache_page(vma, page);
67871 entry = mk_pte(page, vma->vm_page_prot);
67872 if (flags & FAULT_FLAG_WRITE)
67873@@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *
67874
67875 /* no need to invalidate: a not-present page won't be cached */
67876 update_mmu_cache(vma, address, page_table);
67877+
67878+#ifdef CONFIG_PAX_SEGMEXEC
67879+ if (anon)
67880+ pax_mirror_anon_pte(vma, address, page, ptl);
67881+ else
67882+ pax_mirror_file_pte(vma, address, page, ptl);
67883+#endif
67884+
67885 } else {
67886 if (cow_page)
67887 mem_cgroup_uncharge_page(cow_page);
67888@@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *m
67889 if (flags & FAULT_FLAG_WRITE)
67890 flush_tlb_fix_spurious_fault(vma, address);
67891 }
67892+
67893+#ifdef CONFIG_PAX_SEGMEXEC
67894+ pax_mirror_pte(vma, address, pte, pmd, ptl);
67895+ return 0;
67896+#endif
67897+
67898 unlock:
67899 pte_unmap_unlock(pte, ptl);
67900 return 0;
67901@@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm
67902 pmd_t *pmd;
67903 pte_t *pte;
67904
67905+#ifdef CONFIG_PAX_SEGMEXEC
67906+ struct vm_area_struct *vma_m;
67907+#endif
67908+
67909 __set_current_state(TASK_RUNNING);
67910
67911 count_vm_event(PGFAULT);
67912@@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm
67913 if (unlikely(is_vm_hugetlb_page(vma)))
67914 return hugetlb_fault(mm, vma, address, flags);
67915
67916+#ifdef CONFIG_PAX_SEGMEXEC
67917+ vma_m = pax_find_mirror_vma(vma);
67918+ if (vma_m) {
67919+ unsigned long address_m;
67920+ pgd_t *pgd_m;
67921+ pud_t *pud_m;
67922+ pmd_t *pmd_m;
67923+
67924+ if (vma->vm_start > vma_m->vm_start) {
67925+ address_m = address;
67926+ address -= SEGMEXEC_TASK_SIZE;
67927+ vma = vma_m;
67928+ } else
67929+ address_m = address + SEGMEXEC_TASK_SIZE;
67930+
67931+ pgd_m = pgd_offset(mm, address_m);
67932+ pud_m = pud_alloc(mm, pgd_m, address_m);
67933+ if (!pud_m)
67934+ return VM_FAULT_OOM;
67935+ pmd_m = pmd_alloc(mm, pud_m, address_m);
67936+ if (!pmd_m)
67937+ return VM_FAULT_OOM;
67938+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
67939+ return VM_FAULT_OOM;
67940+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
67941+ }
67942+#endif
67943+
67944 pgd = pgd_offset(mm, address);
67945 pud = pud_alloc(mm, pgd, address);
67946 if (!pud)
67947@@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm
67948 * run pte_offset_map on the pmd, if an huge pmd could
67949 * materialize from under us from a different thread.
67950 */
67951- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
67952+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
67953 return VM_FAULT_OOM;
67954 /* if an huge pmd materialized from under us just retry later */
67955 if (unlikely(pmd_trans_huge(*pmd)))
67956@@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void)
67957 gate_vma.vm_start = FIXADDR_USER_START;
67958 gate_vma.vm_end = FIXADDR_USER_END;
67959 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
67960- gate_vma.vm_page_prot = __P101;
67961+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
67962 /*
67963 * Make sure the vDSO gets into every core dump.
67964 * Dumping its contents makes post-mortem fully interpretable later
67965diff -urNp linux-3.1.1/mm/memory-failure.c linux-3.1.1/mm/memory-failure.c
67966--- linux-3.1.1/mm/memory-failure.c 2011-11-11 15:19:27.000000000 -0500
67967+++ linux-3.1.1/mm/memory-failure.c 2011-11-16 18:39:08.000000000 -0500
67968@@ -60,7 +60,7 @@ int sysctl_memory_failure_early_kill __r
67969
67970 int sysctl_memory_failure_recovery __read_mostly = 1;
67971
67972-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67973+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67974
67975 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
67976
67977@@ -201,7 +201,7 @@ static int kill_proc_ao(struct task_stru
67978 si.si_signo = SIGBUS;
67979 si.si_errno = 0;
67980 si.si_code = BUS_MCEERR_AO;
67981- si.si_addr = (void *)addr;
67982+ si.si_addr = (void __user *)addr;
67983 #ifdef __ARCH_SI_TRAPNO
67984 si.si_trapno = trapno;
67985 #endif
67986@@ -1009,7 +1009,7 @@ int __memory_failure(unsigned long pfn,
67987 }
67988
67989 nr_pages = 1 << compound_trans_order(hpage);
67990- atomic_long_add(nr_pages, &mce_bad_pages);
67991+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
67992
67993 /*
67994 * We need/can do nothing about count=0 pages.
67995@@ -1039,7 +1039,7 @@ int __memory_failure(unsigned long pfn,
67996 if (!PageHWPoison(hpage)
67997 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
67998 || (p != hpage && TestSetPageHWPoison(hpage))) {
67999- atomic_long_sub(nr_pages, &mce_bad_pages);
68000+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68001 return 0;
68002 }
68003 set_page_hwpoison_huge_page(hpage);
68004@@ -1097,7 +1097,7 @@ int __memory_failure(unsigned long pfn,
68005 }
68006 if (hwpoison_filter(p)) {
68007 if (TestClearPageHWPoison(p))
68008- atomic_long_sub(nr_pages, &mce_bad_pages);
68009+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68010 unlock_page(hpage);
68011 put_page(hpage);
68012 return 0;
68013@@ -1314,7 +1314,7 @@ int unpoison_memory(unsigned long pfn)
68014 return 0;
68015 }
68016 if (TestClearPageHWPoison(p))
68017- atomic_long_sub(nr_pages, &mce_bad_pages);
68018+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68019 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
68020 return 0;
68021 }
68022@@ -1328,7 +1328,7 @@ int unpoison_memory(unsigned long pfn)
68023 */
68024 if (TestClearPageHWPoison(page)) {
68025 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
68026- atomic_long_sub(nr_pages, &mce_bad_pages);
68027+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68028 freeit = 1;
68029 if (PageHuge(page))
68030 clear_page_hwpoison_huge_page(page);
68031@@ -1441,7 +1441,7 @@ static int soft_offline_huge_page(struct
68032 }
68033 done:
68034 if (!PageHWPoison(hpage))
68035- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
68036+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
68037 set_page_hwpoison_huge_page(hpage);
68038 dequeue_hwpoisoned_huge_page(hpage);
68039 /* keep elevated page count for bad page */
68040@@ -1572,7 +1572,7 @@ int soft_offline_page(struct page *page,
68041 return ret;
68042
68043 done:
68044- atomic_long_add(1, &mce_bad_pages);
68045+ atomic_long_add_unchecked(1, &mce_bad_pages);
68046 SetPageHWPoison(page);
68047 /* keep elevated page count for bad page */
68048 return ret;
68049diff -urNp linux-3.1.1/mm/mempolicy.c linux-3.1.1/mm/mempolicy.c
68050--- linux-3.1.1/mm/mempolicy.c 2011-11-11 15:19:27.000000000 -0500
68051+++ linux-3.1.1/mm/mempolicy.c 2011-11-16 18:40:44.000000000 -0500
68052@@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct
68053 unsigned long vmstart;
68054 unsigned long vmend;
68055
68056+#ifdef CONFIG_PAX_SEGMEXEC
68057+ struct vm_area_struct *vma_m;
68058+#endif
68059+
68060 vma = find_vma_prev(mm, start, &prev);
68061 if (!vma || vma->vm_start > start)
68062 return -EFAULT;
68063@@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct
68064 err = policy_vma(vma, new_pol);
68065 if (err)
68066 goto out;
68067+
68068+#ifdef CONFIG_PAX_SEGMEXEC
68069+ vma_m = pax_find_mirror_vma(vma);
68070+ if (vma_m) {
68071+ err = policy_vma(vma_m, new_pol);
68072+ if (err)
68073+ goto out;
68074+ }
68075+#endif
68076+
68077 }
68078
68079 out:
68080@@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start
68081
68082 if (end < start)
68083 return -EINVAL;
68084+
68085+#ifdef CONFIG_PAX_SEGMEXEC
68086+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68087+ if (end > SEGMEXEC_TASK_SIZE)
68088+ return -EINVAL;
68089+ } else
68090+#endif
68091+
68092+ if (end > TASK_SIZE)
68093+ return -EINVAL;
68094+
68095 if (end == start)
68096 return 0;
68097
68098@@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
68099 if (!mm)
68100 goto out;
68101
68102+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68103+ if (mm != current->mm &&
68104+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68105+ err = -EPERM;
68106+ goto out;
68107+ }
68108+#endif
68109+
68110 /*
68111 * Check if this process has the right to modify the specified
68112 * process. The right exists if the process has administrative
68113@@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
68114 rcu_read_lock();
68115 tcred = __task_cred(task);
68116 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68117- cred->uid != tcred->suid && cred->uid != tcred->uid &&
68118- !capable(CAP_SYS_NICE)) {
68119+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68120 rcu_read_unlock();
68121 err = -EPERM;
68122 goto out;
68123diff -urNp linux-3.1.1/mm/migrate.c linux-3.1.1/mm/migrate.c
68124--- linux-3.1.1/mm/migrate.c 2011-11-11 15:19:27.000000000 -0500
68125+++ linux-3.1.1/mm/migrate.c 2011-11-16 18:40:44.000000000 -0500
68126@@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struc
68127 unsigned long chunk_start;
68128 int err;
68129
68130+ pax_track_stack();
68131+
68132 task_nodes = cpuset_mems_allowed(task);
68133
68134 err = -ENOMEM;
68135@@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
68136 if (!mm)
68137 return -EINVAL;
68138
68139+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68140+ if (mm != current->mm &&
68141+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68142+ err = -EPERM;
68143+ goto out;
68144+ }
68145+#endif
68146+
68147 /*
68148 * Check if this process has the right to modify the specified
68149 * process. The right exists if the process has administrative
68150@@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
68151 rcu_read_lock();
68152 tcred = __task_cred(task);
68153 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68154- cred->uid != tcred->suid && cred->uid != tcred->uid &&
68155- !capable(CAP_SYS_NICE)) {
68156+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68157 rcu_read_unlock();
68158 err = -EPERM;
68159 goto out;
68160diff -urNp linux-3.1.1/mm/mlock.c linux-3.1.1/mm/mlock.c
68161--- linux-3.1.1/mm/mlock.c 2011-11-11 15:19:27.000000000 -0500
68162+++ linux-3.1.1/mm/mlock.c 2011-11-16 18:40:44.000000000 -0500
68163@@ -13,6 +13,7 @@
68164 #include <linux/pagemap.h>
68165 #include <linux/mempolicy.h>
68166 #include <linux/syscalls.h>
68167+#include <linux/security.h>
68168 #include <linux/sched.h>
68169 #include <linux/module.h>
68170 #include <linux/rmap.h>
68171@@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
68172 return -EINVAL;
68173 if (end == start)
68174 return 0;
68175+ if (end > TASK_SIZE)
68176+ return -EINVAL;
68177+
68178 vma = find_vma_prev(current->mm, start, &prev);
68179 if (!vma || vma->vm_start > start)
68180 return -ENOMEM;
68181@@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
68182 for (nstart = start ; ; ) {
68183 vm_flags_t newflags;
68184
68185+#ifdef CONFIG_PAX_SEGMEXEC
68186+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68187+ break;
68188+#endif
68189+
68190 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68191
68192 newflags = vma->vm_flags | VM_LOCKED;
68193@@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
68194 lock_limit >>= PAGE_SHIFT;
68195
68196 /* check against resource limits */
68197+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68198 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68199 error = do_mlock(start, len, 1);
68200 up_write(&current->mm->mmap_sem);
68201@@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
68202 static int do_mlockall(int flags)
68203 {
68204 struct vm_area_struct * vma, * prev = NULL;
68205- unsigned int def_flags = 0;
68206
68207 if (flags & MCL_FUTURE)
68208- def_flags = VM_LOCKED;
68209- current->mm->def_flags = def_flags;
68210+ current->mm->def_flags |= VM_LOCKED;
68211+ else
68212+ current->mm->def_flags &= ~VM_LOCKED;
68213 if (flags == MCL_FUTURE)
68214 goto out;
68215
68216 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68217 vm_flags_t newflags;
68218
68219+#ifdef CONFIG_PAX_SEGMEXEC
68220+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68221+ break;
68222+#endif
68223+
68224+ BUG_ON(vma->vm_end > TASK_SIZE);
68225 newflags = vma->vm_flags | VM_LOCKED;
68226 if (!(flags & MCL_CURRENT))
68227 newflags &= ~VM_LOCKED;
68228@@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68229 lock_limit >>= PAGE_SHIFT;
68230
68231 ret = -ENOMEM;
68232+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68233 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68234 capable(CAP_IPC_LOCK))
68235 ret = do_mlockall(flags);
68236diff -urNp linux-3.1.1/mm/mmap.c linux-3.1.1/mm/mmap.c
68237--- linux-3.1.1/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
68238+++ linux-3.1.1/mm/mmap.c 2011-11-16 18:40:44.000000000 -0500
68239@@ -46,6 +46,16 @@
68240 #define arch_rebalance_pgtables(addr, len) (addr)
68241 #endif
68242
68243+static inline void verify_mm_writelocked(struct mm_struct *mm)
68244+{
68245+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68246+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68247+ up_read(&mm->mmap_sem);
68248+ BUG();
68249+ }
68250+#endif
68251+}
68252+
68253 static void unmap_region(struct mm_struct *mm,
68254 struct vm_area_struct *vma, struct vm_area_struct *prev,
68255 unsigned long start, unsigned long end);
68256@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
68257 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68258 *
68259 */
68260-pgprot_t protection_map[16] = {
68261+pgprot_t protection_map[16] __read_only = {
68262 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68263 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68264 };
68265
68266-pgprot_t vm_get_page_prot(unsigned long vm_flags)
68267+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
68268 {
68269- return __pgprot(pgprot_val(protection_map[vm_flags &
68270+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68271 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68272 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68273+
68274+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68275+ if (!(__supported_pte_mask & _PAGE_NX) &&
68276+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68277+ (vm_flags & (VM_READ | VM_WRITE)))
68278+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68279+#endif
68280+
68281+ return prot;
68282 }
68283 EXPORT_SYMBOL(vm_get_page_prot);
68284
68285 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
68286 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
68287 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68288+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68289 /*
68290 * Make sure vm_committed_as in one cacheline and not cacheline shared with
68291 * other variables. It can be updated by several CPUs frequently.
68292@@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma
68293 struct vm_area_struct *next = vma->vm_next;
68294
68295 might_sleep();
68296+ BUG_ON(vma->vm_mirror);
68297 if (vma->vm_ops && vma->vm_ops->close)
68298 vma->vm_ops->close(vma);
68299 if (vma->vm_file) {
68300@@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68301 * not page aligned -Ram Gupta
68302 */
68303 rlim = rlimit(RLIMIT_DATA);
68304+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68305 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68306 (mm->end_data - mm->start_data) > rlim)
68307 goto out;
68308@@ -689,6 +711,12 @@ static int
68309 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68310 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68311 {
68312+
68313+#ifdef CONFIG_PAX_SEGMEXEC
68314+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68315+ return 0;
68316+#endif
68317+
68318 if (is_mergeable_vma(vma, file, vm_flags) &&
68319 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68320 if (vma->vm_pgoff == vm_pgoff)
68321@@ -708,6 +736,12 @@ static int
68322 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68323 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68324 {
68325+
68326+#ifdef CONFIG_PAX_SEGMEXEC
68327+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68328+ return 0;
68329+#endif
68330+
68331 if (is_mergeable_vma(vma, file, vm_flags) &&
68332 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68333 pgoff_t vm_pglen;
68334@@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struc
68335 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68336 struct vm_area_struct *prev, unsigned long addr,
68337 unsigned long end, unsigned long vm_flags,
68338- struct anon_vma *anon_vma, struct file *file,
68339+ struct anon_vma *anon_vma, struct file *file,
68340 pgoff_t pgoff, struct mempolicy *policy)
68341 {
68342 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68343 struct vm_area_struct *area, *next;
68344 int err;
68345
68346+#ifdef CONFIG_PAX_SEGMEXEC
68347+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68348+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68349+
68350+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68351+#endif
68352+
68353 /*
68354 * We later require that vma->vm_flags == vm_flags,
68355 * so this tests vma->vm_flags & VM_SPECIAL, too.
68356@@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct
68357 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68358 next = next->vm_next;
68359
68360+#ifdef CONFIG_PAX_SEGMEXEC
68361+ if (prev)
68362+ prev_m = pax_find_mirror_vma(prev);
68363+ if (area)
68364+ area_m = pax_find_mirror_vma(area);
68365+ if (next)
68366+ next_m = pax_find_mirror_vma(next);
68367+#endif
68368+
68369 /*
68370 * Can it merge with the predecessor?
68371 */
68372@@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct
68373 /* cases 1, 6 */
68374 err = vma_adjust(prev, prev->vm_start,
68375 next->vm_end, prev->vm_pgoff, NULL);
68376- } else /* cases 2, 5, 7 */
68377+
68378+#ifdef CONFIG_PAX_SEGMEXEC
68379+ if (!err && prev_m)
68380+ err = vma_adjust(prev_m, prev_m->vm_start,
68381+ next_m->vm_end, prev_m->vm_pgoff, NULL);
68382+#endif
68383+
68384+ } else { /* cases 2, 5, 7 */
68385 err = vma_adjust(prev, prev->vm_start,
68386 end, prev->vm_pgoff, NULL);
68387+
68388+#ifdef CONFIG_PAX_SEGMEXEC
68389+ if (!err && prev_m)
68390+ err = vma_adjust(prev_m, prev_m->vm_start,
68391+ end_m, prev_m->vm_pgoff, NULL);
68392+#endif
68393+
68394+ }
68395 if (err)
68396 return NULL;
68397 khugepaged_enter_vma_merge(prev);
68398@@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct
68399 mpol_equal(policy, vma_policy(next)) &&
68400 can_vma_merge_before(next, vm_flags,
68401 anon_vma, file, pgoff+pglen)) {
68402- if (prev && addr < prev->vm_end) /* case 4 */
68403+ if (prev && addr < prev->vm_end) { /* case 4 */
68404 err = vma_adjust(prev, prev->vm_start,
68405 addr, prev->vm_pgoff, NULL);
68406- else /* cases 3, 8 */
68407+
68408+#ifdef CONFIG_PAX_SEGMEXEC
68409+ if (!err && prev_m)
68410+ err = vma_adjust(prev_m, prev_m->vm_start,
68411+ addr_m, prev_m->vm_pgoff, NULL);
68412+#endif
68413+
68414+ } else { /* cases 3, 8 */
68415 err = vma_adjust(area, addr, next->vm_end,
68416 next->vm_pgoff - pglen, NULL);
68417+
68418+#ifdef CONFIG_PAX_SEGMEXEC
68419+ if (!err && area_m)
68420+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
68421+ next_m->vm_pgoff - pglen, NULL);
68422+#endif
68423+
68424+ }
68425 if (err)
68426 return NULL;
68427 khugepaged_enter_vma_merge(area);
68428@@ -921,14 +1001,11 @@ none:
68429 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68430 struct file *file, long pages)
68431 {
68432- const unsigned long stack_flags
68433- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68434-
68435 if (file) {
68436 mm->shared_vm += pages;
68437 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68438 mm->exec_vm += pages;
68439- } else if (flags & stack_flags)
68440+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68441 mm->stack_vm += pages;
68442 if (flags & (VM_RESERVED|VM_IO))
68443 mm->reserved_vm += pages;
68444@@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file
68445 * (the exception is when the underlying filesystem is noexec
68446 * mounted, in which case we dont add PROT_EXEC.)
68447 */
68448- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68449+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68450 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68451 prot |= PROT_EXEC;
68452
68453@@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file
68454 /* Obtain the address to map to. we verify (or select) it and ensure
68455 * that it represents a valid section of the address space.
68456 */
68457- addr = get_unmapped_area(file, addr, len, pgoff, flags);
68458+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68459 if (addr & ~PAGE_MASK)
68460 return addr;
68461
68462@@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file
68463 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68464 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68465
68466+#ifdef CONFIG_PAX_MPROTECT
68467+ if (mm->pax_flags & MF_PAX_MPROTECT) {
68468+#ifndef CONFIG_PAX_MPROTECT_COMPAT
68469+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68470+ gr_log_rwxmmap(file);
68471+
68472+#ifdef CONFIG_PAX_EMUPLT
68473+ vm_flags &= ~VM_EXEC;
68474+#else
68475+ return -EPERM;
68476+#endif
68477+
68478+ }
68479+
68480+ if (!(vm_flags & VM_EXEC))
68481+ vm_flags &= ~VM_MAYEXEC;
68482+#else
68483+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68484+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68485+#endif
68486+ else
68487+ vm_flags &= ~VM_MAYWRITE;
68488+ }
68489+#endif
68490+
68491+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68492+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68493+ vm_flags &= ~VM_PAGEEXEC;
68494+#endif
68495+
68496 if (flags & MAP_LOCKED)
68497 if (!can_do_mlock())
68498 return -EPERM;
68499@@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file
68500 locked += mm->locked_vm;
68501 lock_limit = rlimit(RLIMIT_MEMLOCK);
68502 lock_limit >>= PAGE_SHIFT;
68503+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68504 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68505 return -EAGAIN;
68506 }
68507@@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file
68508 if (error)
68509 return error;
68510
68511+ if (!gr_acl_handle_mmap(file, prot))
68512+ return -EACCES;
68513+
68514 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68515 }
68516 EXPORT_SYMBOL(do_mmap_pgoff);
68517@@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area
68518 vm_flags_t vm_flags = vma->vm_flags;
68519
68520 /* If it was private or non-writable, the write bit is already clear */
68521- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68522+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68523 return 0;
68524
68525 /* The backer wishes to know when pages are first written to? */
68526@@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *f
68527 unsigned long charged = 0;
68528 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68529
68530+#ifdef CONFIG_PAX_SEGMEXEC
68531+ struct vm_area_struct *vma_m = NULL;
68532+#endif
68533+
68534+ /*
68535+ * mm->mmap_sem is required to protect against another thread
68536+ * changing the mappings in case we sleep.
68537+ */
68538+ verify_mm_writelocked(mm);
68539+
68540 /* Clear old maps */
68541 error = -ENOMEM;
68542-munmap_back:
68543 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68544 if (vma && vma->vm_start < addr + len) {
68545 if (do_munmap(mm, addr, len))
68546 return -ENOMEM;
68547- goto munmap_back;
68548+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68549+ BUG_ON(vma && vma->vm_start < addr + len);
68550 }
68551
68552 /* Check against address space limit. */
68553@@ -1258,6 +1379,16 @@ munmap_back:
68554 goto unacct_error;
68555 }
68556
68557+#ifdef CONFIG_PAX_SEGMEXEC
68558+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68559+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68560+ if (!vma_m) {
68561+ error = -ENOMEM;
68562+ goto free_vma;
68563+ }
68564+ }
68565+#endif
68566+
68567 vma->vm_mm = mm;
68568 vma->vm_start = addr;
68569 vma->vm_end = addr + len;
68570@@ -1281,6 +1412,19 @@ munmap_back:
68571 error = file->f_op->mmap(file, vma);
68572 if (error)
68573 goto unmap_and_free_vma;
68574+
68575+#ifdef CONFIG_PAX_SEGMEXEC
68576+ if (vma_m && (vm_flags & VM_EXECUTABLE))
68577+ added_exe_file_vma(mm);
68578+#endif
68579+
68580+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68581+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
68582+ vma->vm_flags |= VM_PAGEEXEC;
68583+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68584+ }
68585+#endif
68586+
68587 if (vm_flags & VM_EXECUTABLE)
68588 added_exe_file_vma(mm);
68589
68590@@ -1316,6 +1460,11 @@ munmap_back:
68591 vma_link(mm, vma, prev, rb_link, rb_parent);
68592 file = vma->vm_file;
68593
68594+#ifdef CONFIG_PAX_SEGMEXEC
68595+ if (vma_m)
68596+ BUG_ON(pax_mirror_vma(vma_m, vma));
68597+#endif
68598+
68599 /* Once vma denies write, undo our temporary denial count */
68600 if (correct_wcount)
68601 atomic_inc(&inode->i_writecount);
68602@@ -1324,6 +1473,7 @@ out:
68603
68604 mm->total_vm += len >> PAGE_SHIFT;
68605 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
68606+ track_exec_limit(mm, addr, addr + len, vm_flags);
68607 if (vm_flags & VM_LOCKED) {
68608 if (!mlock_vma_pages_range(vma, addr, addr + len))
68609 mm->locked_vm += (len >> PAGE_SHIFT);
68610@@ -1341,6 +1491,12 @@ unmap_and_free_vma:
68611 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
68612 charged = 0;
68613 free_vma:
68614+
68615+#ifdef CONFIG_PAX_SEGMEXEC
68616+ if (vma_m)
68617+ kmem_cache_free(vm_area_cachep, vma_m);
68618+#endif
68619+
68620 kmem_cache_free(vm_area_cachep, vma);
68621 unacct_error:
68622 if (charged)
68623@@ -1348,6 +1504,44 @@ unacct_error:
68624 return error;
68625 }
68626
68627+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
68628+{
68629+ if (!vma) {
68630+#ifdef CONFIG_STACK_GROWSUP
68631+ if (addr > sysctl_heap_stack_gap)
68632+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
68633+ else
68634+ vma = find_vma(current->mm, 0);
68635+ if (vma && (vma->vm_flags & VM_GROWSUP))
68636+ return false;
68637+#endif
68638+ return true;
68639+ }
68640+
68641+ if (addr + len > vma->vm_start)
68642+ return false;
68643+
68644+ if (vma->vm_flags & VM_GROWSDOWN)
68645+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
68646+#ifdef CONFIG_STACK_GROWSUP
68647+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
68648+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
68649+#endif
68650+
68651+ return true;
68652+}
68653+
68654+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
68655+{
68656+ if (vma->vm_start < len)
68657+ return -ENOMEM;
68658+ if (!(vma->vm_flags & VM_GROWSDOWN))
68659+ return vma->vm_start - len;
68660+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
68661+ return vma->vm_start - len - sysctl_heap_stack_gap;
68662+ return -ENOMEM;
68663+}
68664+
68665 /* Get an address range which is currently unmapped.
68666 * For shmat() with addr=0.
68667 *
68668@@ -1374,18 +1568,23 @@ arch_get_unmapped_area(struct file *filp
68669 if (flags & MAP_FIXED)
68670 return addr;
68671
68672+#ifdef CONFIG_PAX_RANDMMAP
68673+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68674+#endif
68675+
68676 if (addr) {
68677 addr = PAGE_ALIGN(addr);
68678- vma = find_vma(mm, addr);
68679- if (TASK_SIZE - len >= addr &&
68680- (!vma || addr + len <= vma->vm_start))
68681- return addr;
68682+ if (TASK_SIZE - len >= addr) {
68683+ vma = find_vma(mm, addr);
68684+ if (check_heap_stack_gap(vma, addr, len))
68685+ return addr;
68686+ }
68687 }
68688 if (len > mm->cached_hole_size) {
68689- start_addr = addr = mm->free_area_cache;
68690+ start_addr = addr = mm->free_area_cache;
68691 } else {
68692- start_addr = addr = TASK_UNMAPPED_BASE;
68693- mm->cached_hole_size = 0;
68694+ start_addr = addr = mm->mmap_base;
68695+ mm->cached_hole_size = 0;
68696 }
68697
68698 full_search:
68699@@ -1396,34 +1595,40 @@ full_search:
68700 * Start a new search - just in case we missed
68701 * some holes.
68702 */
68703- if (start_addr != TASK_UNMAPPED_BASE) {
68704- addr = TASK_UNMAPPED_BASE;
68705- start_addr = addr;
68706+ if (start_addr != mm->mmap_base) {
68707+ start_addr = addr = mm->mmap_base;
68708 mm->cached_hole_size = 0;
68709 goto full_search;
68710 }
68711 return -ENOMEM;
68712 }
68713- if (!vma || addr + len <= vma->vm_start) {
68714- /*
68715- * Remember the place where we stopped the search:
68716- */
68717- mm->free_area_cache = addr + len;
68718- return addr;
68719- }
68720+ if (check_heap_stack_gap(vma, addr, len))
68721+ break;
68722 if (addr + mm->cached_hole_size < vma->vm_start)
68723 mm->cached_hole_size = vma->vm_start - addr;
68724 addr = vma->vm_end;
68725 }
68726+
68727+ /*
68728+ * Remember the place where we stopped the search:
68729+ */
68730+ mm->free_area_cache = addr + len;
68731+ return addr;
68732 }
68733 #endif
68734
68735 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
68736 {
68737+
68738+#ifdef CONFIG_PAX_SEGMEXEC
68739+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68740+ return;
68741+#endif
68742+
68743 /*
68744 * Is this a new hole at the lowest possible address?
68745 */
68746- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
68747+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
68748 mm->free_area_cache = addr;
68749 mm->cached_hole_size = ~0UL;
68750 }
68751@@ -1441,7 +1646,7 @@ arch_get_unmapped_area_topdown(struct fi
68752 {
68753 struct vm_area_struct *vma;
68754 struct mm_struct *mm = current->mm;
68755- unsigned long addr = addr0;
68756+ unsigned long base = mm->mmap_base, addr = addr0;
68757
68758 /* requested length too big for entire address space */
68759 if (len > TASK_SIZE)
68760@@ -1450,13 +1655,18 @@ arch_get_unmapped_area_topdown(struct fi
68761 if (flags & MAP_FIXED)
68762 return addr;
68763
68764+#ifdef CONFIG_PAX_RANDMMAP
68765+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68766+#endif
68767+
68768 /* requesting a specific address */
68769 if (addr) {
68770 addr = PAGE_ALIGN(addr);
68771- vma = find_vma(mm, addr);
68772- if (TASK_SIZE - len >= addr &&
68773- (!vma || addr + len <= vma->vm_start))
68774- return addr;
68775+ if (TASK_SIZE - len >= addr) {
68776+ vma = find_vma(mm, addr);
68777+ if (check_heap_stack_gap(vma, addr, len))
68778+ return addr;
68779+ }
68780 }
68781
68782 /* check if free_area_cache is useful for us */
68783@@ -1471,7 +1681,7 @@ arch_get_unmapped_area_topdown(struct fi
68784 /* make sure it can fit in the remaining address space */
68785 if (addr > len) {
68786 vma = find_vma(mm, addr-len);
68787- if (!vma || addr <= vma->vm_start)
68788+ if (check_heap_stack_gap(vma, addr - len, len))
68789 /* remember the address as a hint for next time */
68790 return (mm->free_area_cache = addr-len);
68791 }
68792@@ -1488,7 +1698,7 @@ arch_get_unmapped_area_topdown(struct fi
68793 * return with success:
68794 */
68795 vma = find_vma(mm, addr);
68796- if (!vma || addr+len <= vma->vm_start)
68797+ if (check_heap_stack_gap(vma, addr, len))
68798 /* remember the address as a hint for next time */
68799 return (mm->free_area_cache = addr);
68800
68801@@ -1497,8 +1707,8 @@ arch_get_unmapped_area_topdown(struct fi
68802 mm->cached_hole_size = vma->vm_start - addr;
68803
68804 /* try just below the current vma->vm_start */
68805- addr = vma->vm_start-len;
68806- } while (len < vma->vm_start);
68807+ addr = skip_heap_stack_gap(vma, len);
68808+ } while (!IS_ERR_VALUE(addr));
68809
68810 bottomup:
68811 /*
68812@@ -1507,13 +1717,21 @@ bottomup:
68813 * can happen with large stack limits and large mmap()
68814 * allocations.
68815 */
68816+ mm->mmap_base = TASK_UNMAPPED_BASE;
68817+
68818+#ifdef CONFIG_PAX_RANDMMAP
68819+ if (mm->pax_flags & MF_PAX_RANDMMAP)
68820+ mm->mmap_base += mm->delta_mmap;
68821+#endif
68822+
68823+ mm->free_area_cache = mm->mmap_base;
68824 mm->cached_hole_size = ~0UL;
68825- mm->free_area_cache = TASK_UNMAPPED_BASE;
68826 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
68827 /*
68828 * Restore the topdown base:
68829 */
68830- mm->free_area_cache = mm->mmap_base;
68831+ mm->mmap_base = base;
68832+ mm->free_area_cache = base;
68833 mm->cached_hole_size = ~0UL;
68834
68835 return addr;
68836@@ -1522,6 +1740,12 @@ bottomup:
68837
68838 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
68839 {
68840+
68841+#ifdef CONFIG_PAX_SEGMEXEC
68842+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68843+ return;
68844+#endif
68845+
68846 /*
68847 * Is this a new hole at the highest possible address?
68848 */
68849@@ -1529,8 +1753,10 @@ void arch_unmap_area_topdown(struct mm_s
68850 mm->free_area_cache = addr;
68851
68852 /* dont allow allocations above current base */
68853- if (mm->free_area_cache > mm->mmap_base)
68854+ if (mm->free_area_cache > mm->mmap_base) {
68855 mm->free_area_cache = mm->mmap_base;
68856+ mm->cached_hole_size = ~0UL;
68857+ }
68858 }
68859
68860 unsigned long
68861@@ -1638,6 +1864,28 @@ out:
68862 return prev ? prev->vm_next : vma;
68863 }
68864
68865+#ifdef CONFIG_PAX_SEGMEXEC
68866+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
68867+{
68868+ struct vm_area_struct *vma_m;
68869+
68870+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
68871+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
68872+ BUG_ON(vma->vm_mirror);
68873+ return NULL;
68874+ }
68875+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
68876+ vma_m = vma->vm_mirror;
68877+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
68878+ BUG_ON(vma->vm_file != vma_m->vm_file);
68879+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
68880+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
68881+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
68882+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
68883+ return vma_m;
68884+}
68885+#endif
68886+
68887 /*
68888 * Verify that the stack growth is acceptable and
68889 * update accounting. This is shared with both the
68890@@ -1654,6 +1902,7 @@ static int acct_stack_growth(struct vm_a
68891 return -ENOMEM;
68892
68893 /* Stack limit test */
68894+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
68895 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
68896 return -ENOMEM;
68897
68898@@ -1664,6 +1913,7 @@ static int acct_stack_growth(struct vm_a
68899 locked = mm->locked_vm + grow;
68900 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
68901 limit >>= PAGE_SHIFT;
68902+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68903 if (locked > limit && !capable(CAP_IPC_LOCK))
68904 return -ENOMEM;
68905 }
68906@@ -1694,37 +1944,48 @@ static int acct_stack_growth(struct vm_a
68907 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
68908 * vma is the last one with address > vma->vm_end. Have to extend vma.
68909 */
68910+#ifndef CONFIG_IA64
68911+static
68912+#endif
68913 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
68914 {
68915 int error;
68916+ bool locknext;
68917
68918 if (!(vma->vm_flags & VM_GROWSUP))
68919 return -EFAULT;
68920
68921+ /* Also guard against wrapping around to address 0. */
68922+ if (address < PAGE_ALIGN(address+1))
68923+ address = PAGE_ALIGN(address+1);
68924+ else
68925+ return -ENOMEM;
68926+
68927 /*
68928 * We must make sure the anon_vma is allocated
68929 * so that the anon_vma locking is not a noop.
68930 */
68931 if (unlikely(anon_vma_prepare(vma)))
68932 return -ENOMEM;
68933+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
68934+ if (locknext && anon_vma_prepare(vma->vm_next))
68935+ return -ENOMEM;
68936 vma_lock_anon_vma(vma);
68937+ if (locknext)
68938+ vma_lock_anon_vma(vma->vm_next);
68939
68940 /*
68941 * vma->vm_start/vm_end cannot change under us because the caller
68942 * is required to hold the mmap_sem in read mode. We need the
68943- * anon_vma lock to serialize against concurrent expand_stacks.
68944- * Also guard against wrapping around to address 0.
68945+ * anon_vma locks to serialize against concurrent expand_stacks
68946+ * and expand_upwards.
68947 */
68948- if (address < PAGE_ALIGN(address+4))
68949- address = PAGE_ALIGN(address+4);
68950- else {
68951- vma_unlock_anon_vma(vma);
68952- return -ENOMEM;
68953- }
68954 error = 0;
68955
68956 /* Somebody else might have raced and expanded it already */
68957- if (address > vma->vm_end) {
68958+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
68959+ error = -ENOMEM;
68960+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
68961 unsigned long size, grow;
68962
68963 size = address - vma->vm_start;
68964@@ -1739,6 +2000,8 @@ int expand_upwards(struct vm_area_struct
68965 }
68966 }
68967 }
68968+ if (locknext)
68969+ vma_unlock_anon_vma(vma->vm_next);
68970 vma_unlock_anon_vma(vma);
68971 khugepaged_enter_vma_merge(vma);
68972 return error;
68973@@ -1752,6 +2015,8 @@ int expand_downwards(struct vm_area_stru
68974 unsigned long address)
68975 {
68976 int error;
68977+ bool lockprev = false;
68978+ struct vm_area_struct *prev;
68979
68980 /*
68981 * We must make sure the anon_vma is allocated
68982@@ -1765,6 +2030,15 @@ int expand_downwards(struct vm_area_stru
68983 if (error)
68984 return error;
68985
68986+ prev = vma->vm_prev;
68987+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
68988+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
68989+#endif
68990+ if (lockprev && anon_vma_prepare(prev))
68991+ return -ENOMEM;
68992+ if (lockprev)
68993+ vma_lock_anon_vma(prev);
68994+
68995 vma_lock_anon_vma(vma);
68996
68997 /*
68998@@ -1774,9 +2048,17 @@ int expand_downwards(struct vm_area_stru
68999 */
69000
69001 /* Somebody else might have raced and expanded it already */
69002- if (address < vma->vm_start) {
69003+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
69004+ error = -ENOMEM;
69005+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
69006 unsigned long size, grow;
69007
69008+#ifdef CONFIG_PAX_SEGMEXEC
69009+ struct vm_area_struct *vma_m;
69010+
69011+ vma_m = pax_find_mirror_vma(vma);
69012+#endif
69013+
69014 size = vma->vm_end - address;
69015 grow = (vma->vm_start - address) >> PAGE_SHIFT;
69016
69017@@ -1786,11 +2068,22 @@ int expand_downwards(struct vm_area_stru
69018 if (!error) {
69019 vma->vm_start = address;
69020 vma->vm_pgoff -= grow;
69021+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
69022+
69023+#ifdef CONFIG_PAX_SEGMEXEC
69024+ if (vma_m) {
69025+ vma_m->vm_start -= grow << PAGE_SHIFT;
69026+ vma_m->vm_pgoff -= grow;
69027+ }
69028+#endif
69029+
69030 perf_event_mmap(vma);
69031 }
69032 }
69033 }
69034 vma_unlock_anon_vma(vma);
69035+ if (lockprev)
69036+ vma_unlock_anon_vma(prev);
69037 khugepaged_enter_vma_merge(vma);
69038 return error;
69039 }
69040@@ -1860,6 +2153,13 @@ static void remove_vma_list(struct mm_st
69041 do {
69042 long nrpages = vma_pages(vma);
69043
69044+#ifdef CONFIG_PAX_SEGMEXEC
69045+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
69046+ vma = remove_vma(vma);
69047+ continue;
69048+ }
69049+#endif
69050+
69051 mm->total_vm -= nrpages;
69052 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
69053 vma = remove_vma(vma);
69054@@ -1905,6 +2205,16 @@ detach_vmas_to_be_unmapped(struct mm_str
69055 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
69056 vma->vm_prev = NULL;
69057 do {
69058+
69059+#ifdef CONFIG_PAX_SEGMEXEC
69060+ if (vma->vm_mirror) {
69061+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
69062+ vma->vm_mirror->vm_mirror = NULL;
69063+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
69064+ vma->vm_mirror = NULL;
69065+ }
69066+#endif
69067+
69068 rb_erase(&vma->vm_rb, &mm->mm_rb);
69069 mm->map_count--;
69070 tail_vma = vma;
69071@@ -1933,14 +2243,33 @@ static int __split_vma(struct mm_struct
69072 struct vm_area_struct *new;
69073 int err = -ENOMEM;
69074
69075+#ifdef CONFIG_PAX_SEGMEXEC
69076+ struct vm_area_struct *vma_m, *new_m = NULL;
69077+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
69078+#endif
69079+
69080 if (is_vm_hugetlb_page(vma) && (addr &
69081 ~(huge_page_mask(hstate_vma(vma)))))
69082 return -EINVAL;
69083
69084+#ifdef CONFIG_PAX_SEGMEXEC
69085+ vma_m = pax_find_mirror_vma(vma);
69086+#endif
69087+
69088 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69089 if (!new)
69090 goto out_err;
69091
69092+#ifdef CONFIG_PAX_SEGMEXEC
69093+ if (vma_m) {
69094+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69095+ if (!new_m) {
69096+ kmem_cache_free(vm_area_cachep, new);
69097+ goto out_err;
69098+ }
69099+ }
69100+#endif
69101+
69102 /* most fields are the same, copy all, and then fixup */
69103 *new = *vma;
69104
69105@@ -1953,6 +2282,22 @@ static int __split_vma(struct mm_struct
69106 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
69107 }
69108
69109+#ifdef CONFIG_PAX_SEGMEXEC
69110+ if (vma_m) {
69111+ *new_m = *vma_m;
69112+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
69113+ new_m->vm_mirror = new;
69114+ new->vm_mirror = new_m;
69115+
69116+ if (new_below)
69117+ new_m->vm_end = addr_m;
69118+ else {
69119+ new_m->vm_start = addr_m;
69120+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
69121+ }
69122+ }
69123+#endif
69124+
69125 pol = mpol_dup(vma_policy(vma));
69126 if (IS_ERR(pol)) {
69127 err = PTR_ERR(pol);
69128@@ -1978,6 +2323,42 @@ static int __split_vma(struct mm_struct
69129 else
69130 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
69131
69132+#ifdef CONFIG_PAX_SEGMEXEC
69133+ if (!err && vma_m) {
69134+ if (anon_vma_clone(new_m, vma_m))
69135+ goto out_free_mpol;
69136+
69137+ mpol_get(pol);
69138+ vma_set_policy(new_m, pol);
69139+
69140+ if (new_m->vm_file) {
69141+ get_file(new_m->vm_file);
69142+ if (vma_m->vm_flags & VM_EXECUTABLE)
69143+ added_exe_file_vma(mm);
69144+ }
69145+
69146+ if (new_m->vm_ops && new_m->vm_ops->open)
69147+ new_m->vm_ops->open(new_m);
69148+
69149+ if (new_below)
69150+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69151+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69152+ else
69153+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69154+
69155+ if (err) {
69156+ if (new_m->vm_ops && new_m->vm_ops->close)
69157+ new_m->vm_ops->close(new_m);
69158+ if (new_m->vm_file) {
69159+ if (vma_m->vm_flags & VM_EXECUTABLE)
69160+ removed_exe_file_vma(mm);
69161+ fput(new_m->vm_file);
69162+ }
69163+ mpol_put(pol);
69164+ }
69165+ }
69166+#endif
69167+
69168 /* Success. */
69169 if (!err)
69170 return 0;
69171@@ -1990,10 +2371,18 @@ static int __split_vma(struct mm_struct
69172 removed_exe_file_vma(mm);
69173 fput(new->vm_file);
69174 }
69175- unlink_anon_vmas(new);
69176 out_free_mpol:
69177 mpol_put(pol);
69178 out_free_vma:
69179+
69180+#ifdef CONFIG_PAX_SEGMEXEC
69181+ if (new_m) {
69182+ unlink_anon_vmas(new_m);
69183+ kmem_cache_free(vm_area_cachep, new_m);
69184+ }
69185+#endif
69186+
69187+ unlink_anon_vmas(new);
69188 kmem_cache_free(vm_area_cachep, new);
69189 out_err:
69190 return err;
69191@@ -2006,6 +2395,15 @@ static int __split_vma(struct mm_struct
69192 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69193 unsigned long addr, int new_below)
69194 {
69195+
69196+#ifdef CONFIG_PAX_SEGMEXEC
69197+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69198+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
69199+ if (mm->map_count >= sysctl_max_map_count-1)
69200+ return -ENOMEM;
69201+ } else
69202+#endif
69203+
69204 if (mm->map_count >= sysctl_max_map_count)
69205 return -ENOMEM;
69206
69207@@ -2017,11 +2415,30 @@ int split_vma(struct mm_struct *mm, stru
69208 * work. This now handles partial unmappings.
69209 * Jeremy Fitzhardinge <jeremy@goop.org>
69210 */
69211+#ifdef CONFIG_PAX_SEGMEXEC
69212 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69213 {
69214+ int ret = __do_munmap(mm, start, len);
69215+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
69216+ return ret;
69217+
69218+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
69219+}
69220+
69221+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69222+#else
69223+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69224+#endif
69225+{
69226 unsigned long end;
69227 struct vm_area_struct *vma, *prev, *last;
69228
69229+ /*
69230+ * mm->mmap_sem is required to protect against another thread
69231+ * changing the mappings in case we sleep.
69232+ */
69233+ verify_mm_writelocked(mm);
69234+
69235 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
69236 return -EINVAL;
69237
69238@@ -2096,6 +2513,8 @@ int do_munmap(struct mm_struct *mm, unsi
69239 /* Fix up all other VM information */
69240 remove_vma_list(mm, vma);
69241
69242+ track_exec_limit(mm, start, end, 0UL);
69243+
69244 return 0;
69245 }
69246
69247@@ -2108,22 +2527,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
69248
69249 profile_munmap(addr);
69250
69251+#ifdef CONFIG_PAX_SEGMEXEC
69252+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
69253+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
69254+ return -EINVAL;
69255+#endif
69256+
69257 down_write(&mm->mmap_sem);
69258 ret = do_munmap(mm, addr, len);
69259 up_write(&mm->mmap_sem);
69260 return ret;
69261 }
69262
69263-static inline void verify_mm_writelocked(struct mm_struct *mm)
69264-{
69265-#ifdef CONFIG_DEBUG_VM
69266- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
69267- WARN_ON(1);
69268- up_read(&mm->mmap_sem);
69269- }
69270-#endif
69271-}
69272-
69273 /*
69274 * this is really a simplified "do_mmap". it only handles
69275 * anonymous maps. eventually we may be able to do some
69276@@ -2137,6 +2552,7 @@ unsigned long do_brk(unsigned long addr,
69277 struct rb_node ** rb_link, * rb_parent;
69278 pgoff_t pgoff = addr >> PAGE_SHIFT;
69279 int error;
69280+ unsigned long charged;
69281
69282 len = PAGE_ALIGN(len);
69283 if (!len)
69284@@ -2148,16 +2564,30 @@ unsigned long do_brk(unsigned long addr,
69285
69286 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
69287
69288+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69289+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
69290+ flags &= ~VM_EXEC;
69291+
69292+#ifdef CONFIG_PAX_MPROTECT
69293+ if (mm->pax_flags & MF_PAX_MPROTECT)
69294+ flags &= ~VM_MAYEXEC;
69295+#endif
69296+
69297+ }
69298+#endif
69299+
69300 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
69301 if (error & ~PAGE_MASK)
69302 return error;
69303
69304+ charged = len >> PAGE_SHIFT;
69305+
69306 /*
69307 * mlock MCL_FUTURE?
69308 */
69309 if (mm->def_flags & VM_LOCKED) {
69310 unsigned long locked, lock_limit;
69311- locked = len >> PAGE_SHIFT;
69312+ locked = charged;
69313 locked += mm->locked_vm;
69314 lock_limit = rlimit(RLIMIT_MEMLOCK);
69315 lock_limit >>= PAGE_SHIFT;
69316@@ -2174,22 +2604,22 @@ unsigned long do_brk(unsigned long addr,
69317 /*
69318 * Clear old maps. this also does some error checking for us
69319 */
69320- munmap_back:
69321 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69322 if (vma && vma->vm_start < addr + len) {
69323 if (do_munmap(mm, addr, len))
69324 return -ENOMEM;
69325- goto munmap_back;
69326+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69327+ BUG_ON(vma && vma->vm_start < addr + len);
69328 }
69329
69330 /* Check against address space limits *after* clearing old maps... */
69331- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69332+ if (!may_expand_vm(mm, charged))
69333 return -ENOMEM;
69334
69335 if (mm->map_count > sysctl_max_map_count)
69336 return -ENOMEM;
69337
69338- if (security_vm_enough_memory(len >> PAGE_SHIFT))
69339+ if (security_vm_enough_memory(charged))
69340 return -ENOMEM;
69341
69342 /* Can we just expand an old private anonymous mapping? */
69343@@ -2203,7 +2633,7 @@ unsigned long do_brk(unsigned long addr,
69344 */
69345 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69346 if (!vma) {
69347- vm_unacct_memory(len >> PAGE_SHIFT);
69348+ vm_unacct_memory(charged);
69349 return -ENOMEM;
69350 }
69351
69352@@ -2217,11 +2647,12 @@ unsigned long do_brk(unsigned long addr,
69353 vma_link(mm, vma, prev, rb_link, rb_parent);
69354 out:
69355 perf_event_mmap(vma);
69356- mm->total_vm += len >> PAGE_SHIFT;
69357+ mm->total_vm += charged;
69358 if (flags & VM_LOCKED) {
69359 if (!mlock_vma_pages_range(vma, addr, addr + len))
69360- mm->locked_vm += (len >> PAGE_SHIFT);
69361+ mm->locked_vm += charged;
69362 }
69363+ track_exec_limit(mm, addr, addr + len, flags);
69364 return addr;
69365 }
69366
69367@@ -2268,8 +2699,10 @@ void exit_mmap(struct mm_struct *mm)
69368 * Walk the list again, actually closing and freeing it,
69369 * with preemption enabled, without holding any MM locks.
69370 */
69371- while (vma)
69372+ while (vma) {
69373+ vma->vm_mirror = NULL;
69374 vma = remove_vma(vma);
69375+ }
69376
69377 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69378 }
69379@@ -2283,6 +2716,13 @@ int insert_vm_struct(struct mm_struct *
69380 struct vm_area_struct * __vma, * prev;
69381 struct rb_node ** rb_link, * rb_parent;
69382
69383+#ifdef CONFIG_PAX_SEGMEXEC
69384+ struct vm_area_struct *vma_m = NULL;
69385+#endif
69386+
69387+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
69388+ return -EPERM;
69389+
69390 /*
69391 * The vm_pgoff of a purely anonymous vma should be irrelevant
69392 * until its first write fault, when page's anon_vma and index
69393@@ -2305,7 +2745,22 @@ int insert_vm_struct(struct mm_struct *
69394 if ((vma->vm_flags & VM_ACCOUNT) &&
69395 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69396 return -ENOMEM;
69397+
69398+#ifdef CONFIG_PAX_SEGMEXEC
69399+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69400+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69401+ if (!vma_m)
69402+ return -ENOMEM;
69403+ }
69404+#endif
69405+
69406 vma_link(mm, vma, prev, rb_link, rb_parent);
69407+
69408+#ifdef CONFIG_PAX_SEGMEXEC
69409+ if (vma_m)
69410+ BUG_ON(pax_mirror_vma(vma_m, vma));
69411+#endif
69412+
69413 return 0;
69414 }
69415
69416@@ -2323,6 +2778,8 @@ struct vm_area_struct *copy_vma(struct v
69417 struct rb_node **rb_link, *rb_parent;
69418 struct mempolicy *pol;
69419
69420+ BUG_ON(vma->vm_mirror);
69421+
69422 /*
69423 * If anonymous vma has not yet been faulted, update new pgoff
69424 * to match new location, to increase its chance of merging.
69425@@ -2373,6 +2830,39 @@ struct vm_area_struct *copy_vma(struct v
69426 return NULL;
69427 }
69428
69429+#ifdef CONFIG_PAX_SEGMEXEC
69430+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69431+{
69432+ struct vm_area_struct *prev_m;
69433+ struct rb_node **rb_link_m, *rb_parent_m;
69434+ struct mempolicy *pol_m;
69435+
69436+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69437+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69438+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69439+ *vma_m = *vma;
69440+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
69441+ if (anon_vma_clone(vma_m, vma))
69442+ return -ENOMEM;
69443+ pol_m = vma_policy(vma_m);
69444+ mpol_get(pol_m);
69445+ vma_set_policy(vma_m, pol_m);
69446+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69447+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69448+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69449+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69450+ if (vma_m->vm_file)
69451+ get_file(vma_m->vm_file);
69452+ if (vma_m->vm_ops && vma_m->vm_ops->open)
69453+ vma_m->vm_ops->open(vma_m);
69454+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69455+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69456+ vma_m->vm_mirror = vma;
69457+ vma->vm_mirror = vma_m;
69458+ return 0;
69459+}
69460+#endif
69461+
69462 /*
69463 * Return true if the calling process may expand its vm space by the passed
69464 * number of pages
69465@@ -2383,7 +2873,7 @@ int may_expand_vm(struct mm_struct *mm,
69466 unsigned long lim;
69467
69468 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
69469-
69470+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69471 if (cur + npages > lim)
69472 return 0;
69473 return 1;
69474@@ -2454,6 +2944,22 @@ int install_special_mapping(struct mm_st
69475 vma->vm_start = addr;
69476 vma->vm_end = addr + len;
69477
69478+#ifdef CONFIG_PAX_MPROTECT
69479+ if (mm->pax_flags & MF_PAX_MPROTECT) {
69480+#ifndef CONFIG_PAX_MPROTECT_COMPAT
69481+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69482+ return -EPERM;
69483+ if (!(vm_flags & VM_EXEC))
69484+ vm_flags &= ~VM_MAYEXEC;
69485+#else
69486+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69487+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69488+#endif
69489+ else
69490+ vm_flags &= ~VM_MAYWRITE;
69491+ }
69492+#endif
69493+
69494 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69495 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69496
69497diff -urNp linux-3.1.1/mm/mprotect.c linux-3.1.1/mm/mprotect.c
69498--- linux-3.1.1/mm/mprotect.c 2011-11-11 15:19:27.000000000 -0500
69499+++ linux-3.1.1/mm/mprotect.c 2011-11-16 18:40:44.000000000 -0500
69500@@ -23,10 +23,16 @@
69501 #include <linux/mmu_notifier.h>
69502 #include <linux/migrate.h>
69503 #include <linux/perf_event.h>
69504+
69505+#ifdef CONFIG_PAX_MPROTECT
69506+#include <linux/elf.h>
69507+#endif
69508+
69509 #include <asm/uaccess.h>
69510 #include <asm/pgtable.h>
69511 #include <asm/cacheflush.h>
69512 #include <asm/tlbflush.h>
69513+#include <asm/mmu_context.h>
69514
69515 #ifndef pgprot_modify
69516 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69517@@ -141,6 +147,48 @@ static void change_protection(struct vm_
69518 flush_tlb_range(vma, start, end);
69519 }
69520
69521+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69522+/* called while holding the mmap semaphor for writing except stack expansion */
69523+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69524+{
69525+ unsigned long oldlimit, newlimit = 0UL;
69526+
69527+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
69528+ return;
69529+
69530+ spin_lock(&mm->page_table_lock);
69531+ oldlimit = mm->context.user_cs_limit;
69532+ if ((prot & VM_EXEC) && oldlimit < end)
69533+ /* USER_CS limit moved up */
69534+ newlimit = end;
69535+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69536+ /* USER_CS limit moved down */
69537+ newlimit = start;
69538+
69539+ if (newlimit) {
69540+ mm->context.user_cs_limit = newlimit;
69541+
69542+#ifdef CONFIG_SMP
69543+ wmb();
69544+ cpus_clear(mm->context.cpu_user_cs_mask);
69545+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69546+#endif
69547+
69548+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69549+ }
69550+ spin_unlock(&mm->page_table_lock);
69551+ if (newlimit == end) {
69552+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
69553+
69554+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
69555+ if (is_vm_hugetlb_page(vma))
69556+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69557+ else
69558+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
69559+ }
69560+}
69561+#endif
69562+
69563 int
69564 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69565 unsigned long start, unsigned long end, unsigned long newflags)
69566@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
69567 int error;
69568 int dirty_accountable = 0;
69569
69570+#ifdef CONFIG_PAX_SEGMEXEC
69571+ struct vm_area_struct *vma_m = NULL;
69572+ unsigned long start_m, end_m;
69573+
69574+ start_m = start + SEGMEXEC_TASK_SIZE;
69575+ end_m = end + SEGMEXEC_TASK_SIZE;
69576+#endif
69577+
69578 if (newflags == oldflags) {
69579 *pprev = vma;
69580 return 0;
69581 }
69582
69583+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
69584+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
69585+
69586+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
69587+ return -ENOMEM;
69588+
69589+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
69590+ return -ENOMEM;
69591+ }
69592+
69593 /*
69594 * If we make a private mapping writable we increase our commit;
69595 * but (without finer accounting) cannot reduce our commit if we
69596@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
69597 }
69598 }
69599
69600+#ifdef CONFIG_PAX_SEGMEXEC
69601+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
69602+ if (start != vma->vm_start) {
69603+ error = split_vma(mm, vma, start, 1);
69604+ if (error)
69605+ goto fail;
69606+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
69607+ *pprev = (*pprev)->vm_next;
69608+ }
69609+
69610+ if (end != vma->vm_end) {
69611+ error = split_vma(mm, vma, end, 0);
69612+ if (error)
69613+ goto fail;
69614+ }
69615+
69616+ if (pax_find_mirror_vma(vma)) {
69617+ error = __do_munmap(mm, start_m, end_m - start_m);
69618+ if (error)
69619+ goto fail;
69620+ } else {
69621+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69622+ if (!vma_m) {
69623+ error = -ENOMEM;
69624+ goto fail;
69625+ }
69626+ vma->vm_flags = newflags;
69627+ error = pax_mirror_vma(vma_m, vma);
69628+ if (error) {
69629+ vma->vm_flags = oldflags;
69630+ goto fail;
69631+ }
69632+ }
69633+ }
69634+#endif
69635+
69636 /*
69637 * First try to merge with previous and/or next vma.
69638 */
69639@@ -204,9 +306,21 @@ success:
69640 * vm_flags and vm_page_prot are protected by the mmap_sem
69641 * held in write mode.
69642 */
69643+
69644+#ifdef CONFIG_PAX_SEGMEXEC
69645+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
69646+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
69647+#endif
69648+
69649 vma->vm_flags = newflags;
69650+
69651+#ifdef CONFIG_PAX_MPROTECT
69652+ if (mm->binfmt && mm->binfmt->handle_mprotect)
69653+ mm->binfmt->handle_mprotect(vma, newflags);
69654+#endif
69655+
69656 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
69657- vm_get_page_prot(newflags));
69658+ vm_get_page_prot(vma->vm_flags));
69659
69660 if (vma_wants_writenotify(vma)) {
69661 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
69662@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69663 end = start + len;
69664 if (end <= start)
69665 return -ENOMEM;
69666+
69667+#ifdef CONFIG_PAX_SEGMEXEC
69668+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69669+ if (end > SEGMEXEC_TASK_SIZE)
69670+ return -EINVAL;
69671+ } else
69672+#endif
69673+
69674+ if (end > TASK_SIZE)
69675+ return -EINVAL;
69676+
69677 if (!arch_validate_prot(prot))
69678 return -EINVAL;
69679
69680@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69681 /*
69682 * Does the application expect PROT_READ to imply PROT_EXEC:
69683 */
69684- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69685+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69686 prot |= PROT_EXEC;
69687
69688 vm_flags = calc_vm_prot_bits(prot);
69689@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69690 if (start > vma->vm_start)
69691 prev = vma;
69692
69693+#ifdef CONFIG_PAX_MPROTECT
69694+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
69695+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
69696+#endif
69697+
69698 for (nstart = start ; ; ) {
69699 unsigned long newflags;
69700
69701@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69702
69703 /* newflags >> 4 shift VM_MAY% in place of VM_% */
69704 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
69705+ if (prot & (PROT_WRITE | PROT_EXEC))
69706+ gr_log_rwxmprotect(vma->vm_file);
69707+
69708+ error = -EACCES;
69709+ goto out;
69710+ }
69711+
69712+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
69713 error = -EACCES;
69714 goto out;
69715 }
69716@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69717 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
69718 if (error)
69719 goto out;
69720+
69721+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
69722+
69723 nstart = tmp;
69724
69725 if (nstart < prev->vm_end)
69726diff -urNp linux-3.1.1/mm/mremap.c linux-3.1.1/mm/mremap.c
69727--- linux-3.1.1/mm/mremap.c 2011-11-11 15:19:27.000000000 -0500
69728+++ linux-3.1.1/mm/mremap.c 2011-11-16 18:39:08.000000000 -0500
69729@@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
69730 continue;
69731 pte = ptep_clear_flush(vma, old_addr, old_pte);
69732 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
69733+
69734+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69735+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
69736+ pte = pte_exprotect(pte);
69737+#endif
69738+
69739 set_pte_at(mm, new_addr, new_pte, pte);
69740 }
69741
69742@@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
69743 if (is_vm_hugetlb_page(vma))
69744 goto Einval;
69745
69746+#ifdef CONFIG_PAX_SEGMEXEC
69747+ if (pax_find_mirror_vma(vma))
69748+ goto Einval;
69749+#endif
69750+
69751 /* We can't remap across vm area boundaries */
69752 if (old_len > vma->vm_end - addr)
69753 goto Efault;
69754@@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned
69755 unsigned long ret = -EINVAL;
69756 unsigned long charged = 0;
69757 unsigned long map_flags;
69758+ unsigned long pax_task_size = TASK_SIZE;
69759
69760 if (new_addr & ~PAGE_MASK)
69761 goto out;
69762
69763- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
69764+#ifdef CONFIG_PAX_SEGMEXEC
69765+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69766+ pax_task_size = SEGMEXEC_TASK_SIZE;
69767+#endif
69768+
69769+ pax_task_size -= PAGE_SIZE;
69770+
69771+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
69772 goto out;
69773
69774 /* Check if the location we're moving into overlaps the
69775 * old location at all, and fail if it does.
69776 */
69777- if ((new_addr <= addr) && (new_addr+new_len) > addr)
69778- goto out;
69779-
69780- if ((addr <= new_addr) && (addr+old_len) > new_addr)
69781+ if (addr + old_len > new_addr && new_addr + new_len > addr)
69782 goto out;
69783
69784 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69785@@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long ad
69786 struct vm_area_struct *vma;
69787 unsigned long ret = -EINVAL;
69788 unsigned long charged = 0;
69789+ unsigned long pax_task_size = TASK_SIZE;
69790
69791 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
69792 goto out;
69793@@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long ad
69794 if (!new_len)
69795 goto out;
69796
69797+#ifdef CONFIG_PAX_SEGMEXEC
69798+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69799+ pax_task_size = SEGMEXEC_TASK_SIZE;
69800+#endif
69801+
69802+ pax_task_size -= PAGE_SIZE;
69803+
69804+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
69805+ old_len > pax_task_size || addr > pax_task_size-old_len)
69806+ goto out;
69807+
69808 if (flags & MREMAP_FIXED) {
69809 if (flags & MREMAP_MAYMOVE)
69810 ret = mremap_to(addr, old_len, new_addr, new_len);
69811@@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long ad
69812 addr + new_len);
69813 }
69814 ret = addr;
69815+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
69816 goto out;
69817 }
69818 }
69819@@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long ad
69820 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69821 if (ret)
69822 goto out;
69823+
69824+ map_flags = vma->vm_flags;
69825 ret = move_vma(vma, addr, old_len, new_len, new_addr);
69826+ if (!(ret & ~PAGE_MASK)) {
69827+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
69828+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
69829+ }
69830 }
69831 out:
69832 if (ret & ~PAGE_MASK)
69833diff -urNp linux-3.1.1/mm/nobootmem.c linux-3.1.1/mm/nobootmem.c
69834--- linux-3.1.1/mm/nobootmem.c 2011-11-11 15:19:27.000000000 -0500
69835+++ linux-3.1.1/mm/nobootmem.c 2011-11-16 18:39:08.000000000 -0500
69836@@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
69837 unsigned long __init free_all_memory_core_early(int nodeid)
69838 {
69839 int i;
69840- u64 start, end;
69841+ u64 start, end, startrange, endrange;
69842 unsigned long count = 0;
69843- struct range *range = NULL;
69844+ struct range *range = NULL, rangerange = { 0, 0 };
69845 int nr_range;
69846
69847 nr_range = get_free_all_memory_range(&range, nodeid);
69848+ startrange = __pa(range) >> PAGE_SHIFT;
69849+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
69850
69851 for (i = 0; i < nr_range; i++) {
69852 start = range[i].start;
69853 end = range[i].end;
69854+ if (start <= endrange && startrange < end) {
69855+ BUG_ON(rangerange.start | rangerange.end);
69856+ rangerange = range[i];
69857+ continue;
69858+ }
69859 count += end - start;
69860 __free_pages_memory(start, end);
69861 }
69862+ start = rangerange.start;
69863+ end = rangerange.end;
69864+ count += end - start;
69865+ __free_pages_memory(start, end);
69866
69867 return count;
69868 }
69869diff -urNp linux-3.1.1/mm/nommu.c linux-3.1.1/mm/nommu.c
69870--- linux-3.1.1/mm/nommu.c 2011-11-11 15:19:27.000000000 -0500
69871+++ linux-3.1.1/mm/nommu.c 2011-11-16 18:39:08.000000000 -0500
69872@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMI
69873 int sysctl_overcommit_ratio = 50; /* default is 50% */
69874 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
69875 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
69876-int heap_stack_gap = 0;
69877
69878 atomic_long_t mmap_pages_allocated;
69879
69880@@ -825,15 +824,6 @@ struct vm_area_struct *find_vma(struct m
69881 EXPORT_SYMBOL(find_vma);
69882
69883 /*
69884- * find a VMA
69885- * - we don't extend stack VMAs under NOMMU conditions
69886- */
69887-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
69888-{
69889- return find_vma(mm, addr);
69890-}
69891-
69892-/*
69893 * expand a stack to a given address
69894 * - not supported under NOMMU conditions
69895 */
69896@@ -1553,6 +1543,7 @@ int split_vma(struct mm_struct *mm, stru
69897
69898 /* most fields are the same, copy all, and then fixup */
69899 *new = *vma;
69900+ INIT_LIST_HEAD(&new->anon_vma_chain);
69901 *region = *vma->vm_region;
69902 new->vm_region = region;
69903
69904diff -urNp linux-3.1.1/mm/page_alloc.c linux-3.1.1/mm/page_alloc.c
69905--- linux-3.1.1/mm/page_alloc.c 2011-11-11 15:19:27.000000000 -0500
69906+++ linux-3.1.1/mm/page_alloc.c 2011-11-16 18:40:44.000000000 -0500
69907@@ -340,7 +340,7 @@ out:
69908 * This usage means that zero-order pages may not be compound.
69909 */
69910
69911-static void free_compound_page(struct page *page)
69912+void free_compound_page(struct page *page)
69913 {
69914 __free_pages_ok(page, compound_order(page));
69915 }
69916@@ -653,6 +653,10 @@ static bool free_pages_prepare(struct pa
69917 int i;
69918 int bad = 0;
69919
69920+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69921+ unsigned long index = 1UL << order;
69922+#endif
69923+
69924 trace_mm_page_free_direct(page, order);
69925 kmemcheck_free_shadow(page, order);
69926
69927@@ -668,6 +672,12 @@ static bool free_pages_prepare(struct pa
69928 debug_check_no_obj_freed(page_address(page),
69929 PAGE_SIZE << order);
69930 }
69931+
69932+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69933+ for (; index; --index)
69934+ sanitize_highpage(page + index - 1);
69935+#endif
69936+
69937 arch_free_page(page, order);
69938 kernel_map_pages(page, 1 << order, 0);
69939
69940@@ -783,8 +793,10 @@ static int prep_new_page(struct page *pa
69941 arch_alloc_page(page, order);
69942 kernel_map_pages(page, 1 << order, 1);
69943
69944+#ifndef CONFIG_PAX_MEMORY_SANITIZE
69945 if (gfp_flags & __GFP_ZERO)
69946 prep_zero_page(page, order, gfp_flags);
69947+#endif
69948
69949 if (order && (gfp_flags & __GFP_COMP))
69950 prep_compound_page(page, order);
69951@@ -2539,6 +2551,8 @@ void show_free_areas(unsigned int filter
69952 int cpu;
69953 struct zone *zone;
69954
69955+ pax_track_stack();
69956+
69957 for_each_populated_zone(zone) {
69958 if (skip_free_areas_node(filter, zone_to_nid(zone)))
69959 continue;
69960@@ -3350,7 +3364,13 @@ static int pageblock_is_reserved(unsigne
69961 unsigned long pfn;
69962
69963 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
69964+#ifdef CONFIG_X86_32
69965+ /* boot failures in VMware 8 on 32bit vanilla since
69966+ this change */
69967+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
69968+#else
69969 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
69970+#endif
69971 return 1;
69972 }
69973 return 0;
69974diff -urNp linux-3.1.1/mm/percpu.c linux-3.1.1/mm/percpu.c
69975--- linux-3.1.1/mm/percpu.c 2011-11-11 15:19:27.000000000 -0500
69976+++ linux-3.1.1/mm/percpu.c 2011-11-16 18:39:08.000000000 -0500
69977@@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
69978 static unsigned int pcpu_last_unit_cpu __read_mostly;
69979
69980 /* the address of the first chunk which starts with the kernel static area */
69981-void *pcpu_base_addr __read_mostly;
69982+void *pcpu_base_addr __read_only;
69983 EXPORT_SYMBOL_GPL(pcpu_base_addr);
69984
69985 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
69986diff -urNp linux-3.1.1/mm/rmap.c linux-3.1.1/mm/rmap.c
69987--- linux-3.1.1/mm/rmap.c 2011-11-11 15:19:27.000000000 -0500
69988+++ linux-3.1.1/mm/rmap.c 2011-11-16 18:39:08.000000000 -0500
69989@@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_stru
69990 struct anon_vma *anon_vma = vma->anon_vma;
69991 struct anon_vma_chain *avc;
69992
69993+#ifdef CONFIG_PAX_SEGMEXEC
69994+ struct anon_vma_chain *avc_m = NULL;
69995+#endif
69996+
69997 might_sleep();
69998 if (unlikely(!anon_vma)) {
69999 struct mm_struct *mm = vma->vm_mm;
70000@@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_stru
70001 if (!avc)
70002 goto out_enomem;
70003
70004+#ifdef CONFIG_PAX_SEGMEXEC
70005+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
70006+ if (!avc_m)
70007+ goto out_enomem_free_avc;
70008+#endif
70009+
70010 anon_vma = find_mergeable_anon_vma(vma);
70011 allocated = NULL;
70012 if (!anon_vma) {
70013@@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_stru
70014 /* page_table_lock to protect against threads */
70015 spin_lock(&mm->page_table_lock);
70016 if (likely(!vma->anon_vma)) {
70017+
70018+#ifdef CONFIG_PAX_SEGMEXEC
70019+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
70020+
70021+ if (vma_m) {
70022+ BUG_ON(vma_m->anon_vma);
70023+ vma_m->anon_vma = anon_vma;
70024+ avc_m->anon_vma = anon_vma;
70025+ avc_m->vma = vma;
70026+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
70027+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
70028+ avc_m = NULL;
70029+ }
70030+#endif
70031+
70032 vma->anon_vma = anon_vma;
70033 avc->anon_vma = anon_vma;
70034 avc->vma = vma;
70035@@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_stru
70036
70037 if (unlikely(allocated))
70038 put_anon_vma(allocated);
70039+
70040+#ifdef CONFIG_PAX_SEGMEXEC
70041+ if (unlikely(avc_m))
70042+ anon_vma_chain_free(avc_m);
70043+#endif
70044+
70045 if (unlikely(avc))
70046 anon_vma_chain_free(avc);
70047 }
70048 return 0;
70049
70050 out_enomem_free_avc:
70051+
70052+#ifdef CONFIG_PAX_SEGMEXEC
70053+ if (avc_m)
70054+ anon_vma_chain_free(avc_m);
70055+#endif
70056+
70057 anon_vma_chain_free(avc);
70058 out_enomem:
70059 return -ENOMEM;
70060@@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct v
70061 * Attach the anon_vmas from src to dst.
70062 * Returns 0 on success, -ENOMEM on failure.
70063 */
70064-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70065+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
70066 {
70067 struct anon_vma_chain *avc, *pavc;
70068 struct anon_vma *root = NULL;
70069@@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct
70070 * the corresponding VMA in the parent process is attached to.
70071 * Returns 0 on success, non-zero on failure.
70072 */
70073-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
70074+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
70075 {
70076 struct anon_vma_chain *avc;
70077 struct anon_vma *anon_vma;
70078diff -urNp linux-3.1.1/mm/shmem.c linux-3.1.1/mm/shmem.c
70079--- linux-3.1.1/mm/shmem.c 2011-11-11 15:19:27.000000000 -0500
70080+++ linux-3.1.1/mm/shmem.c 2011-11-16 19:28:28.000000000 -0500
70081@@ -31,7 +31,7 @@
70082 #include <linux/module.h>
70083 #include <linux/swap.h>
70084
70085-static struct vfsmount *shm_mnt;
70086+struct vfsmount *shm_mnt;
70087
70088 #ifdef CONFIG_SHMEM
70089 /*
70090@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
70091 #define BOGO_DIRENT_SIZE 20
70092
70093 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
70094-#define SHORT_SYMLINK_LEN 128
70095+#define SHORT_SYMLINK_LEN 64
70096
70097 struct shmem_xattr {
70098 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
70099@@ -769,6 +769,8 @@ static struct page *shmem_swapin(swp_ent
70100 struct mempolicy mpol, *spol;
70101 struct vm_area_struct pvma;
70102
70103+ pax_track_stack();
70104+
70105 spol = mpol_cond_copy(&mpol,
70106 mpol_shared_policy_lookup(&info->policy, index));
70107
70108@@ -2149,8 +2151,7 @@ int shmem_fill_super(struct super_block
70109 int err = -ENOMEM;
70110
70111 /* Round up to L1_CACHE_BYTES to resist false sharing */
70112- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
70113- L1_CACHE_BYTES), GFP_KERNEL);
70114+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
70115 if (!sbinfo)
70116 return -ENOMEM;
70117
70118diff -urNp linux-3.1.1/mm/slab.c linux-3.1.1/mm/slab.c
70119--- linux-3.1.1/mm/slab.c 2011-11-11 15:19:27.000000000 -0500
70120+++ linux-3.1.1/mm/slab.c 2011-11-16 18:40:44.000000000 -0500
70121@@ -151,7 +151,7 @@
70122
70123 /* Legal flag mask for kmem_cache_create(). */
70124 #if DEBUG
70125-# define CREATE_MASK (SLAB_RED_ZONE | \
70126+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
70127 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
70128 SLAB_CACHE_DMA | \
70129 SLAB_STORE_USER | \
70130@@ -159,7 +159,7 @@
70131 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70132 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
70133 #else
70134-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
70135+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
70136 SLAB_CACHE_DMA | \
70137 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
70138 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70139@@ -288,7 +288,7 @@ struct kmem_list3 {
70140 * Need this for bootstrapping a per node allocator.
70141 */
70142 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
70143-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
70144+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
70145 #define CACHE_CACHE 0
70146 #define SIZE_AC MAX_NUMNODES
70147 #define SIZE_L3 (2 * MAX_NUMNODES)
70148@@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_
70149 if ((x)->max_freeable < i) \
70150 (x)->max_freeable = i; \
70151 } while (0)
70152-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
70153-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
70154-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
70155-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
70156+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
70157+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
70158+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
70159+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
70160 #else
70161 #define STATS_INC_ACTIVE(x) do { } while (0)
70162 #define STATS_DEC_ACTIVE(x) do { } while (0)
70163@@ -538,7 +538,7 @@ static inline void *index_to_obj(struct
70164 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
70165 */
70166 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
70167- const struct slab *slab, void *obj)
70168+ const struct slab *slab, const void *obj)
70169 {
70170 u32 offset = (obj - slab->s_mem);
70171 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
70172@@ -564,7 +564,7 @@ struct cache_names {
70173 static struct cache_names __initdata cache_names[] = {
70174 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
70175 #include <linux/kmalloc_sizes.h>
70176- {NULL,}
70177+ {NULL}
70178 #undef CACHE
70179 };
70180
70181@@ -1571,7 +1571,7 @@ void __init kmem_cache_init(void)
70182 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
70183 sizes[INDEX_AC].cs_size,
70184 ARCH_KMALLOC_MINALIGN,
70185- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70186+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70187 NULL);
70188
70189 if (INDEX_AC != INDEX_L3) {
70190@@ -1579,7 +1579,7 @@ void __init kmem_cache_init(void)
70191 kmem_cache_create(names[INDEX_L3].name,
70192 sizes[INDEX_L3].cs_size,
70193 ARCH_KMALLOC_MINALIGN,
70194- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70195+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70196 NULL);
70197 }
70198
70199@@ -1597,7 +1597,7 @@ void __init kmem_cache_init(void)
70200 sizes->cs_cachep = kmem_cache_create(names->name,
70201 sizes->cs_size,
70202 ARCH_KMALLOC_MINALIGN,
70203- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70204+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70205 NULL);
70206 }
70207 #ifdef CONFIG_ZONE_DMA
70208@@ -4324,10 +4324,10 @@ static int s_show(struct seq_file *m, vo
70209 }
70210 /* cpu stats */
70211 {
70212- unsigned long allochit = atomic_read(&cachep->allochit);
70213- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
70214- unsigned long freehit = atomic_read(&cachep->freehit);
70215- unsigned long freemiss = atomic_read(&cachep->freemiss);
70216+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
70217+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
70218+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
70219+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
70220
70221 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
70222 allochit, allocmiss, freehit, freemiss);
70223@@ -4584,15 +4584,70 @@ static const struct file_operations proc
70224
70225 static int __init slab_proc_init(void)
70226 {
70227- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
70228+ mode_t gr_mode = S_IRUGO;
70229+
70230+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70231+ gr_mode = S_IRUSR;
70232+#endif
70233+
70234+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
70235 #ifdef CONFIG_DEBUG_SLAB_LEAK
70236- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
70237+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
70238 #endif
70239 return 0;
70240 }
70241 module_init(slab_proc_init);
70242 #endif
70243
70244+void check_object_size(const void *ptr, unsigned long n, bool to)
70245+{
70246+
70247+#ifdef CONFIG_PAX_USERCOPY
70248+ struct page *page;
70249+ struct kmem_cache *cachep = NULL;
70250+ struct slab *slabp;
70251+ unsigned int objnr;
70252+ unsigned long offset;
70253+ const char *type;
70254+
70255+ if (!n)
70256+ return;
70257+
70258+ type = "<null>";
70259+ if (ZERO_OR_NULL_PTR(ptr))
70260+ goto report;
70261+
70262+ if (!virt_addr_valid(ptr))
70263+ return;
70264+
70265+ page = virt_to_head_page(ptr);
70266+
70267+ type = "<process stack>";
70268+ if (!PageSlab(page)) {
70269+ if (object_is_on_stack(ptr, n) == -1)
70270+ goto report;
70271+ return;
70272+ }
70273+
70274+ cachep = page_get_cache(page);
70275+ type = cachep->name;
70276+ if (!(cachep->flags & SLAB_USERCOPY))
70277+ goto report;
70278+
70279+ slabp = page_get_slab(page);
70280+ objnr = obj_to_index(cachep, slabp, ptr);
70281+ BUG_ON(objnr >= cachep->num);
70282+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
70283+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
70284+ return;
70285+
70286+report:
70287+ pax_report_usercopy(ptr, n, to, type);
70288+#endif
70289+
70290+}
70291+EXPORT_SYMBOL(check_object_size);
70292+
70293 /**
70294 * ksize - get the actual amount of memory allocated for a given object
70295 * @objp: Pointer to the object
70296diff -urNp linux-3.1.1/mm/slob.c linux-3.1.1/mm/slob.c
70297--- linux-3.1.1/mm/slob.c 2011-11-11 15:19:27.000000000 -0500
70298+++ linux-3.1.1/mm/slob.c 2011-11-16 18:39:08.000000000 -0500
70299@@ -29,7 +29,7 @@
70300 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
70301 * alloc_pages() directly, allocating compound pages so the page order
70302 * does not have to be separately tracked, and also stores the exact
70303- * allocation size in page->private so that it can be used to accurately
70304+ * allocation size in slob_page->size so that it can be used to accurately
70305 * provide ksize(). These objects are detected in kfree() because slob_page()
70306 * is false for them.
70307 *
70308@@ -58,6 +58,7 @@
70309 */
70310
70311 #include <linux/kernel.h>
70312+#include <linux/sched.h>
70313 #include <linux/slab.h>
70314 #include <linux/mm.h>
70315 #include <linux/swap.h> /* struct reclaim_state */
70316@@ -102,7 +103,8 @@ struct slob_page {
70317 unsigned long flags; /* mandatory */
70318 atomic_t _count; /* mandatory */
70319 slobidx_t units; /* free units left in page */
70320- unsigned long pad[2];
70321+ unsigned long pad[1];
70322+ unsigned long size; /* size when >=PAGE_SIZE */
70323 slob_t *free; /* first free slob_t in page */
70324 struct list_head list; /* linked list of free pages */
70325 };
70326@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
70327 */
70328 static inline int is_slob_page(struct slob_page *sp)
70329 {
70330- return PageSlab((struct page *)sp);
70331+ return PageSlab((struct page *)sp) && !sp->size;
70332 }
70333
70334 static inline void set_slob_page(struct slob_page *sp)
70335@@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
70336
70337 static inline struct slob_page *slob_page(const void *addr)
70338 {
70339- return (struct slob_page *)virt_to_page(addr);
70340+ return (struct slob_page *)virt_to_head_page(addr);
70341 }
70342
70343 /*
70344@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
70345 /*
70346 * Return the size of a slob block.
70347 */
70348-static slobidx_t slob_units(slob_t *s)
70349+static slobidx_t slob_units(const slob_t *s)
70350 {
70351 if (s->units > 0)
70352 return s->units;
70353@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
70354 /*
70355 * Return the next free slob block pointer after this one.
70356 */
70357-static slob_t *slob_next(slob_t *s)
70358+static slob_t *slob_next(const slob_t *s)
70359 {
70360 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
70361 slobidx_t next;
70362@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
70363 /*
70364 * Returns true if s is the last free block in its page.
70365 */
70366-static int slob_last(slob_t *s)
70367+static int slob_last(const slob_t *s)
70368 {
70369 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
70370 }
70371@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
70372 if (!page)
70373 return NULL;
70374
70375+ set_slob_page(page);
70376 return page_address(page);
70377 }
70378
70379@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
70380 if (!b)
70381 return NULL;
70382 sp = slob_page(b);
70383- set_slob_page(sp);
70384
70385 spin_lock_irqsave(&slob_lock, flags);
70386 sp->units = SLOB_UNITS(PAGE_SIZE);
70387 sp->free = b;
70388+ sp->size = 0;
70389 INIT_LIST_HEAD(&sp->list);
70390 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70391 set_slob_page_free(sp, slob_list);
70392@@ -476,10 +479,9 @@ out:
70393 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
70394 */
70395
70396-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70397+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70398 {
70399- unsigned int *m;
70400- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70401+ slob_t *m;
70402 void *ret;
70403
70404 gfp &= gfp_allowed_mask;
70405@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t
70406
70407 if (!m)
70408 return NULL;
70409- *m = size;
70410+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70411+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70412+ m[0].units = size;
70413+ m[1].units = align;
70414 ret = (void *)m + align;
70415
70416 trace_kmalloc_node(_RET_IP_, ret,
70417@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t
70418 gfp |= __GFP_COMP;
70419 ret = slob_new_pages(gfp, order, node);
70420 if (ret) {
70421- struct page *page;
70422- page = virt_to_page(ret);
70423- page->private = size;
70424+ struct slob_page *sp;
70425+ sp = slob_page(ret);
70426+ sp->size = size;
70427 }
70428
70429 trace_kmalloc_node(_RET_IP_, ret,
70430 size, PAGE_SIZE << order, gfp, node);
70431 }
70432
70433- kmemleak_alloc(ret, size, 1, gfp);
70434+ return ret;
70435+}
70436+
70437+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70438+{
70439+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70440+ void *ret = __kmalloc_node_align(size, gfp, node, align);
70441+
70442+ if (!ZERO_OR_NULL_PTR(ret))
70443+ kmemleak_alloc(ret, size, 1, gfp);
70444 return ret;
70445 }
70446 EXPORT_SYMBOL(__kmalloc_node);
70447@@ -533,13 +547,92 @@ void kfree(const void *block)
70448 sp = slob_page(block);
70449 if (is_slob_page(sp)) {
70450 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70451- unsigned int *m = (unsigned int *)(block - align);
70452- slob_free(m, *m + align);
70453- } else
70454+ slob_t *m = (slob_t *)(block - align);
70455+ slob_free(m, m[0].units + align);
70456+ } else {
70457+ clear_slob_page(sp);
70458+ free_slob_page(sp);
70459+ sp->size = 0;
70460 put_page(&sp->page);
70461+ }
70462 }
70463 EXPORT_SYMBOL(kfree);
70464
70465+void check_object_size(const void *ptr, unsigned long n, bool to)
70466+{
70467+
70468+#ifdef CONFIG_PAX_USERCOPY
70469+ struct slob_page *sp;
70470+ const slob_t *free;
70471+ const void *base;
70472+ unsigned long flags;
70473+ const char *type;
70474+
70475+ if (!n)
70476+ return;
70477+
70478+ type = "<null>";
70479+ if (ZERO_OR_NULL_PTR(ptr))
70480+ goto report;
70481+
70482+ if (!virt_addr_valid(ptr))
70483+ return;
70484+
70485+ type = "<process stack>";
70486+ sp = slob_page(ptr);
70487+ if (!PageSlab((struct page*)sp)) {
70488+ if (object_is_on_stack(ptr, n) == -1)
70489+ goto report;
70490+ return;
70491+ }
70492+
70493+ type = "<slob>";
70494+ if (sp->size) {
70495+ base = page_address(&sp->page);
70496+ if (base <= ptr && n <= sp->size - (ptr - base))
70497+ return;
70498+ goto report;
70499+ }
70500+
70501+ /* some tricky double walking to find the chunk */
70502+ spin_lock_irqsave(&slob_lock, flags);
70503+ base = (void *)((unsigned long)ptr & PAGE_MASK);
70504+ free = sp->free;
70505+
70506+ while (!slob_last(free) && (void *)free <= ptr) {
70507+ base = free + slob_units(free);
70508+ free = slob_next(free);
70509+ }
70510+
70511+ while (base < (void *)free) {
70512+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70513+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
70514+ int offset;
70515+
70516+ if (ptr < base + align)
70517+ break;
70518+
70519+ offset = ptr - base - align;
70520+ if (offset >= m) {
70521+ base += size;
70522+ continue;
70523+ }
70524+
70525+ if (n > m - offset)
70526+ break;
70527+
70528+ spin_unlock_irqrestore(&slob_lock, flags);
70529+ return;
70530+ }
70531+
70532+ spin_unlock_irqrestore(&slob_lock, flags);
70533+report:
70534+ pax_report_usercopy(ptr, n, to, type);
70535+#endif
70536+
70537+}
70538+EXPORT_SYMBOL(check_object_size);
70539+
70540 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70541 size_t ksize(const void *block)
70542 {
70543@@ -552,10 +645,10 @@ size_t ksize(const void *block)
70544 sp = slob_page(block);
70545 if (is_slob_page(sp)) {
70546 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70547- unsigned int *m = (unsigned int *)(block - align);
70548- return SLOB_UNITS(*m) * SLOB_UNIT;
70549+ slob_t *m = (slob_t *)(block - align);
70550+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
70551 } else
70552- return sp->page.private;
70553+ return sp->size;
70554 }
70555 EXPORT_SYMBOL(ksize);
70556
70557@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(con
70558 {
70559 struct kmem_cache *c;
70560
70561+#ifdef CONFIG_PAX_USERCOPY
70562+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
70563+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
70564+#else
70565 c = slob_alloc(sizeof(struct kmem_cache),
70566 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
70567+#endif
70568
70569 if (c) {
70570 c->name = name;
70571@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_
70572
70573 lockdep_trace_alloc(flags);
70574
70575+#ifdef CONFIG_PAX_USERCOPY
70576+ b = __kmalloc_node_align(c->size, flags, node, c->align);
70577+#else
70578 if (c->size < PAGE_SIZE) {
70579 b = slob_alloc(c->size, flags, c->align, node);
70580 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70581 SLOB_UNITS(c->size) * SLOB_UNIT,
70582 flags, node);
70583 } else {
70584+ struct slob_page *sp;
70585+
70586 b = slob_new_pages(flags, get_order(c->size), node);
70587+ sp = slob_page(b);
70588+ sp->size = c->size;
70589 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70590 PAGE_SIZE << get_order(c->size),
70591 flags, node);
70592 }
70593+#endif
70594
70595 if (c->ctor)
70596 c->ctor(b);
70597@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
70598
70599 static void __kmem_cache_free(void *b, int size)
70600 {
70601- if (size < PAGE_SIZE)
70602+ struct slob_page *sp = slob_page(b);
70603+
70604+ if (is_slob_page(sp))
70605 slob_free(b, size);
70606- else
70607+ else {
70608+ clear_slob_page(sp);
70609+ free_slob_page(sp);
70610+ sp->size = 0;
70611 slob_free_pages(b, get_order(size));
70612+ }
70613 }
70614
70615 static void kmem_rcu_free(struct rcu_head *head)
70616@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_hea
70617
70618 void kmem_cache_free(struct kmem_cache *c, void *b)
70619 {
70620+ int size = c->size;
70621+
70622+#ifdef CONFIG_PAX_USERCOPY
70623+ if (size + c->align < PAGE_SIZE) {
70624+ size += c->align;
70625+ b -= c->align;
70626+ }
70627+#endif
70628+
70629 kmemleak_free_recursive(b, c->flags);
70630 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
70631 struct slob_rcu *slob_rcu;
70632- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
70633- slob_rcu->size = c->size;
70634+ slob_rcu = b + (size - sizeof(struct slob_rcu));
70635+ slob_rcu->size = size;
70636 call_rcu(&slob_rcu->head, kmem_rcu_free);
70637 } else {
70638- __kmem_cache_free(b, c->size);
70639+ __kmem_cache_free(b, size);
70640 }
70641
70642+#ifdef CONFIG_PAX_USERCOPY
70643+ trace_kfree(_RET_IP_, b);
70644+#else
70645 trace_kmem_cache_free(_RET_IP_, b);
70646+#endif
70647+
70648 }
70649 EXPORT_SYMBOL(kmem_cache_free);
70650
70651diff -urNp linux-3.1.1/mm/slub.c linux-3.1.1/mm/slub.c
70652--- linux-3.1.1/mm/slub.c 2011-11-11 15:19:27.000000000 -0500
70653+++ linux-3.1.1/mm/slub.c 2011-11-16 19:27:25.000000000 -0500
70654@@ -208,7 +208,7 @@ struct track {
70655
70656 enum track_item { TRACK_ALLOC, TRACK_FREE };
70657
70658-#ifdef CONFIG_SYSFS
70659+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70660 static int sysfs_slab_add(struct kmem_cache *);
70661 static int sysfs_slab_alias(struct kmem_cache *, const char *);
70662 static void sysfs_slab_remove(struct kmem_cache *);
70663@@ -556,7 +556,7 @@ static void print_track(const char *s, s
70664 if (!t->addr)
70665 return;
70666
70667- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
70668+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
70669 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
70670 #ifdef CONFIG_STACKTRACE
70671 {
70672@@ -2456,6 +2456,8 @@ void kmem_cache_free(struct kmem_cache *
70673
70674 page = virt_to_head_page(x);
70675
70676+ BUG_ON(!PageSlab(page));
70677+
70678 slab_free(s, page, x, _RET_IP_);
70679
70680 trace_kmem_cache_free(_RET_IP_, x);
70681@@ -2489,7 +2491,7 @@ static int slub_min_objects;
70682 * Merge control. If this is set then no merging of slab caches will occur.
70683 * (Could be removed. This was introduced to pacify the merge skeptics.)
70684 */
70685-static int slub_nomerge;
70686+static int slub_nomerge = 1;
70687
70688 /*
70689 * Calculate the order of allocation given an slab object size.
70690@@ -2912,7 +2914,7 @@ static int kmem_cache_open(struct kmem_c
70691 * list to avoid pounding the page allocator excessively.
70692 */
70693 set_min_partial(s, ilog2(s->size));
70694- s->refcount = 1;
70695+ atomic_set(&s->refcount, 1);
70696 #ifdef CONFIG_NUMA
70697 s->remote_node_defrag_ratio = 1000;
70698 #endif
70699@@ -3017,8 +3019,7 @@ static inline int kmem_cache_close(struc
70700 void kmem_cache_destroy(struct kmem_cache *s)
70701 {
70702 down_write(&slub_lock);
70703- s->refcount--;
70704- if (!s->refcount) {
70705+ if (atomic_dec_and_test(&s->refcount)) {
70706 list_del(&s->list);
70707 if (kmem_cache_close(s)) {
70708 printk(KERN_ERR "SLUB %s: %s called for cache that "
70709@@ -3228,6 +3229,50 @@ void *__kmalloc_node(size_t size, gfp_t
70710 EXPORT_SYMBOL(__kmalloc_node);
70711 #endif
70712
70713+void check_object_size(const void *ptr, unsigned long n, bool to)
70714+{
70715+
70716+#ifdef CONFIG_PAX_USERCOPY
70717+ struct page *page;
70718+ struct kmem_cache *s = NULL;
70719+ unsigned long offset;
70720+ const char *type;
70721+
70722+ if (!n)
70723+ return;
70724+
70725+ type = "<null>";
70726+ if (ZERO_OR_NULL_PTR(ptr))
70727+ goto report;
70728+
70729+ if (!virt_addr_valid(ptr))
70730+ return;
70731+
70732+ page = virt_to_head_page(ptr);
70733+
70734+ type = "<process stack>";
70735+ if (!PageSlab(page)) {
70736+ if (object_is_on_stack(ptr, n) == -1)
70737+ goto report;
70738+ return;
70739+ }
70740+
70741+ s = page->slab;
70742+ type = s->name;
70743+ if (!(s->flags & SLAB_USERCOPY))
70744+ goto report;
70745+
70746+ offset = (ptr - page_address(page)) % s->size;
70747+ if (offset <= s->objsize && n <= s->objsize - offset)
70748+ return;
70749+
70750+report:
70751+ pax_report_usercopy(ptr, n, to, type);
70752+#endif
70753+
70754+}
70755+EXPORT_SYMBOL(check_object_size);
70756+
70757 size_t ksize(const void *object)
70758 {
70759 struct page *page;
70760@@ -3502,7 +3547,7 @@ static void __init kmem_cache_bootstrap_
70761 int node;
70762
70763 list_add(&s->list, &slab_caches);
70764- s->refcount = -1;
70765+ atomic_set(&s->refcount, -1);
70766
70767 for_each_node_state(node, N_NORMAL_MEMORY) {
70768 struct kmem_cache_node *n = get_node(s, node);
70769@@ -3619,17 +3664,17 @@ void __init kmem_cache_init(void)
70770
70771 /* Caches that are not of the two-to-the-power-of size */
70772 if (KMALLOC_MIN_SIZE <= 32) {
70773- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
70774+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
70775 caches++;
70776 }
70777
70778 if (KMALLOC_MIN_SIZE <= 64) {
70779- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
70780+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
70781 caches++;
70782 }
70783
70784 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
70785- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
70786+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
70787 caches++;
70788 }
70789
70790@@ -3697,7 +3742,7 @@ static int slab_unmergeable(struct kmem_
70791 /*
70792 * We may have set a slab to be unmergeable during bootstrap.
70793 */
70794- if (s->refcount < 0)
70795+ if (atomic_read(&s->refcount) < 0)
70796 return 1;
70797
70798 return 0;
70799@@ -3756,7 +3801,7 @@ struct kmem_cache *kmem_cache_create(con
70800 down_write(&slub_lock);
70801 s = find_mergeable(size, align, flags, name, ctor);
70802 if (s) {
70803- s->refcount++;
70804+ atomic_inc(&s->refcount);
70805 /*
70806 * Adjust the object sizes so that we clear
70807 * the complete object on kzalloc.
70808@@ -3765,7 +3810,7 @@ struct kmem_cache *kmem_cache_create(con
70809 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
70810
70811 if (sysfs_slab_alias(s, name)) {
70812- s->refcount--;
70813+ atomic_dec(&s->refcount);
70814 goto err;
70815 }
70816 up_write(&slub_lock);
70817@@ -3893,7 +3938,7 @@ void *__kmalloc_node_track_caller(size_t
70818 }
70819 #endif
70820
70821-#ifdef CONFIG_SYSFS
70822+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70823 static int count_inuse(struct page *page)
70824 {
70825 return page->inuse;
70826@@ -4280,12 +4325,12 @@ static void resiliency_test(void)
70827 validate_slab_cache(kmalloc_caches[9]);
70828 }
70829 #else
70830-#ifdef CONFIG_SYSFS
70831+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70832 static void resiliency_test(void) {};
70833 #endif
70834 #endif
70835
70836-#ifdef CONFIG_SYSFS
70837+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70838 enum slab_stat_type {
70839 SL_ALL, /* All slabs */
70840 SL_PARTIAL, /* Only partially allocated slabs */
70841@@ -4495,7 +4540,7 @@ SLAB_ATTR_RO(ctor);
70842
70843 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
70844 {
70845- return sprintf(buf, "%d\n", s->refcount - 1);
70846+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
70847 }
70848 SLAB_ATTR_RO(aliases);
70849
70850@@ -5025,6 +5070,7 @@ static char *create_unique_id(struct kme
70851 return name;
70852 }
70853
70854+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70855 static int sysfs_slab_add(struct kmem_cache *s)
70856 {
70857 int err;
70858@@ -5087,6 +5133,7 @@ static void sysfs_slab_remove(struct kme
70859 kobject_del(&s->kobj);
70860 kobject_put(&s->kobj);
70861 }
70862+#endif
70863
70864 /*
70865 * Need to buffer aliases during bootup until sysfs becomes
70866@@ -5100,6 +5147,7 @@ struct saved_alias {
70867
70868 static struct saved_alias *alias_list;
70869
70870+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70871 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
70872 {
70873 struct saved_alias *al;
70874@@ -5122,6 +5170,7 @@ static int sysfs_slab_alias(struct kmem_
70875 alias_list = al;
70876 return 0;
70877 }
70878+#endif
70879
70880 static int __init slab_sysfs_init(void)
70881 {
70882@@ -5257,7 +5306,13 @@ static const struct file_operations proc
70883
70884 static int __init slab_proc_init(void)
70885 {
70886- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
70887+ mode_t gr_mode = S_IRUGO;
70888+
70889+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70890+ gr_mode = S_IRUSR;
70891+#endif
70892+
70893+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
70894 return 0;
70895 }
70896 module_init(slab_proc_init);
70897diff -urNp linux-3.1.1/mm/swap.c linux-3.1.1/mm/swap.c
70898--- linux-3.1.1/mm/swap.c 2011-11-11 15:19:27.000000000 -0500
70899+++ linux-3.1.1/mm/swap.c 2011-11-16 18:39:08.000000000 -0500
70900@@ -31,6 +31,7 @@
70901 #include <linux/backing-dev.h>
70902 #include <linux/memcontrol.h>
70903 #include <linux/gfp.h>
70904+#include <linux/hugetlb.h>
70905
70906 #include "internal.h"
70907
70908@@ -71,6 +72,8 @@ static void __put_compound_page(struct p
70909
70910 __page_cache_release(page);
70911 dtor = get_compound_page_dtor(page);
70912+ if (!PageHuge(page))
70913+ BUG_ON(dtor != free_compound_page);
70914 (*dtor)(page);
70915 }
70916
70917diff -urNp linux-3.1.1/mm/swapfile.c linux-3.1.1/mm/swapfile.c
70918--- linux-3.1.1/mm/swapfile.c 2011-11-11 15:19:27.000000000 -0500
70919+++ linux-3.1.1/mm/swapfile.c 2011-11-16 18:39:08.000000000 -0500
70920@@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
70921
70922 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
70923 /* Activity counter to indicate that a swapon or swapoff has occurred */
70924-static atomic_t proc_poll_event = ATOMIC_INIT(0);
70925+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
70926
70927 static inline unsigned char swap_count(unsigned char ent)
70928 {
70929@@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
70930 }
70931 filp_close(swap_file, NULL);
70932 err = 0;
70933- atomic_inc(&proc_poll_event);
70934+ atomic_inc_unchecked(&proc_poll_event);
70935 wake_up_interruptible(&proc_poll_wait);
70936
70937 out_dput:
70938@@ -1687,8 +1687,8 @@ static unsigned swaps_poll(struct file *
70939
70940 poll_wait(file, &proc_poll_wait, wait);
70941
70942- if (seq->poll_event != atomic_read(&proc_poll_event)) {
70943- seq->poll_event = atomic_read(&proc_poll_event);
70944+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
70945+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
70946 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
70947 }
70948
70949@@ -1786,7 +1786,7 @@ static int swaps_open(struct inode *inod
70950 return ret;
70951
70952 seq = file->private_data;
70953- seq->poll_event = atomic_read(&proc_poll_event);
70954+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
70955 return 0;
70956 }
70957
70958@@ -2124,7 +2124,7 @@ SYSCALL_DEFINE2(swapon, const char __use
70959 (p->flags & SWP_DISCARDABLE) ? "D" : "");
70960
70961 mutex_unlock(&swapon_mutex);
70962- atomic_inc(&proc_poll_event);
70963+ atomic_inc_unchecked(&proc_poll_event);
70964 wake_up_interruptible(&proc_poll_wait);
70965
70966 if (S_ISREG(inode->i_mode))
70967diff -urNp linux-3.1.1/mm/util.c linux-3.1.1/mm/util.c
70968--- linux-3.1.1/mm/util.c 2011-11-11 15:19:27.000000000 -0500
70969+++ linux-3.1.1/mm/util.c 2011-11-16 18:39:08.000000000 -0500
70970@@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
70971 * allocated buffer. Use this if you don't want to free the buffer immediately
70972 * like, for example, with RCU.
70973 */
70974+#undef __krealloc
70975 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
70976 {
70977 void *ret;
70978@@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
70979 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
70980 * %NULL pointer, the object pointed to is freed.
70981 */
70982+#undef krealloc
70983 void *krealloc(const void *p, size_t new_size, gfp_t flags)
70984 {
70985 void *ret;
70986@@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *m
70987 void arch_pick_mmap_layout(struct mm_struct *mm)
70988 {
70989 mm->mmap_base = TASK_UNMAPPED_BASE;
70990+
70991+#ifdef CONFIG_PAX_RANDMMAP
70992+ if (mm->pax_flags & MF_PAX_RANDMMAP)
70993+ mm->mmap_base += mm->delta_mmap;
70994+#endif
70995+
70996 mm->get_unmapped_area = arch_get_unmapped_area;
70997 mm->unmap_area = arch_unmap_area;
70998 }
70999diff -urNp linux-3.1.1/mm/vmalloc.c linux-3.1.1/mm/vmalloc.c
71000--- linux-3.1.1/mm/vmalloc.c 2011-11-11 15:19:27.000000000 -0500
71001+++ linux-3.1.1/mm/vmalloc.c 2011-11-16 18:40:44.000000000 -0500
71002@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
71003
71004 pte = pte_offset_kernel(pmd, addr);
71005 do {
71006- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71007- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71008+
71009+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71010+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
71011+ BUG_ON(!pte_exec(*pte));
71012+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
71013+ continue;
71014+ }
71015+#endif
71016+
71017+ {
71018+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71019+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71020+ }
71021 } while (pte++, addr += PAGE_SIZE, addr != end);
71022 }
71023
71024@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
71025 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
71026 {
71027 pte_t *pte;
71028+ int ret = -ENOMEM;
71029
71030 /*
71031 * nr is a running index into the array which helps higher level
71032@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
71033 pte = pte_alloc_kernel(pmd, addr);
71034 if (!pte)
71035 return -ENOMEM;
71036+
71037+ pax_open_kernel();
71038 do {
71039 struct page *page = pages[*nr];
71040
71041- if (WARN_ON(!pte_none(*pte)))
71042- return -EBUSY;
71043- if (WARN_ON(!page))
71044- return -ENOMEM;
71045+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71046+ if (pgprot_val(prot) & _PAGE_NX)
71047+#endif
71048+
71049+ if (WARN_ON(!pte_none(*pte))) {
71050+ ret = -EBUSY;
71051+ goto out;
71052+ }
71053+ if (WARN_ON(!page)) {
71054+ ret = -ENOMEM;
71055+ goto out;
71056+ }
71057 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
71058 (*nr)++;
71059 } while (pte++, addr += PAGE_SIZE, addr != end);
71060- return 0;
71061+ ret = 0;
71062+out:
71063+ pax_close_kernel();
71064+ return ret;
71065 }
71066
71067 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
71068@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
71069 * and fall back on vmalloc() if that fails. Others
71070 * just put it in the vmalloc space.
71071 */
71072-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
71073+#ifdef CONFIG_MODULES
71074+#ifdef MODULES_VADDR
71075 unsigned long addr = (unsigned long)x;
71076 if (addr >= MODULES_VADDR && addr < MODULES_END)
71077 return 1;
71078 #endif
71079+
71080+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71081+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
71082+ return 1;
71083+#endif
71084+
71085+#endif
71086+
71087 return is_vmalloc_addr(x);
71088 }
71089
71090@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
71091
71092 if (!pgd_none(*pgd)) {
71093 pud_t *pud = pud_offset(pgd, addr);
71094+#ifdef CONFIG_X86
71095+ if (!pud_large(*pud))
71096+#endif
71097 if (!pud_none(*pud)) {
71098 pmd_t *pmd = pmd_offset(pud, addr);
71099+#ifdef CONFIG_X86
71100+ if (!pmd_large(*pmd))
71101+#endif
71102 if (!pmd_none(*pmd)) {
71103 pte_t *ptep, pte;
71104
71105@@ -1294,6 +1334,16 @@ static struct vm_struct *__get_vm_area_n
71106 struct vm_struct *area;
71107
71108 BUG_ON(in_interrupt());
71109+
71110+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71111+ if (flags & VM_KERNEXEC) {
71112+ if (start != VMALLOC_START || end != VMALLOC_END)
71113+ return NULL;
71114+ start = (unsigned long)MODULES_EXEC_VADDR;
71115+ end = (unsigned long)MODULES_EXEC_END;
71116+ }
71117+#endif
71118+
71119 if (flags & VM_IOREMAP) {
71120 int bit = fls(size);
71121
71122@@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned
71123 if (count > totalram_pages)
71124 return NULL;
71125
71126+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71127+ if (!(pgprot_val(prot) & _PAGE_NX))
71128+ flags |= VM_KERNEXEC;
71129+#endif
71130+
71131 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
71132 __builtin_return_address(0));
71133 if (!area)
71134@@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long
71135 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
71136 return NULL;
71137
71138+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71139+ if (!(pgprot_val(prot) & _PAGE_NX))
71140+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
71141+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
71142+ else
71143+#endif
71144+
71145 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
71146 start, end, node, gfp_mask, caller);
71147
71148@@ -1672,6 +1734,7 @@ static void *__vmalloc_node(unsigned lon
71149 gfp_mask, prot, node, caller);
71150 }
71151
71152+#undef __vmalloc
71153 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
71154 {
71155 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
71156@@ -1695,6 +1758,7 @@ static inline void *__vmalloc_node_flags
71157 * For tight control over page level allocator and protection flags
71158 * use __vmalloc() instead.
71159 */
71160+#undef vmalloc
71161 void *vmalloc(unsigned long size)
71162 {
71163 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
71164@@ -1711,6 +1775,7 @@ EXPORT_SYMBOL(vmalloc);
71165 * For tight control over page level allocator and protection flags
71166 * use __vmalloc() instead.
71167 */
71168+#undef vzalloc
71169 void *vzalloc(unsigned long size)
71170 {
71171 return __vmalloc_node_flags(size, -1,
71172@@ -1725,6 +1790,7 @@ EXPORT_SYMBOL(vzalloc);
71173 * The resulting memory area is zeroed so it can be mapped to userspace
71174 * without leaking data.
71175 */
71176+#undef vmalloc_user
71177 void *vmalloc_user(unsigned long size)
71178 {
71179 struct vm_struct *area;
71180@@ -1752,6 +1818,7 @@ EXPORT_SYMBOL(vmalloc_user);
71181 * For tight control over page level allocator and protection flags
71182 * use __vmalloc() instead.
71183 */
71184+#undef vmalloc_node
71185 void *vmalloc_node(unsigned long size, int node)
71186 {
71187 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
71188@@ -1771,6 +1838,7 @@ EXPORT_SYMBOL(vmalloc_node);
71189 * For tight control over page level allocator and protection flags
71190 * use __vmalloc_node() instead.
71191 */
71192+#undef vzalloc_node
71193 void *vzalloc_node(unsigned long size, int node)
71194 {
71195 return __vmalloc_node_flags(size, node,
71196@@ -1793,10 +1861,10 @@ EXPORT_SYMBOL(vzalloc_node);
71197 * For tight control over page level allocator and protection flags
71198 * use __vmalloc() instead.
71199 */
71200-
71201+#undef vmalloc_exec
71202 void *vmalloc_exec(unsigned long size)
71203 {
71204- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
71205+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
71206 -1, __builtin_return_address(0));
71207 }
71208
71209@@ -1815,6 +1883,7 @@ void *vmalloc_exec(unsigned long size)
71210 * Allocate enough 32bit PA addressable pages to cover @size from the
71211 * page level allocator and map them into contiguous kernel virtual space.
71212 */
71213+#undef vmalloc_32
71214 void *vmalloc_32(unsigned long size)
71215 {
71216 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
71217@@ -1829,6 +1898,7 @@ EXPORT_SYMBOL(vmalloc_32);
71218 * The resulting memory area is 32bit addressable and zeroed so it can be
71219 * mapped to userspace without leaking data.
71220 */
71221+#undef vmalloc_32_user
71222 void *vmalloc_32_user(unsigned long size)
71223 {
71224 struct vm_struct *area;
71225@@ -2091,6 +2161,8 @@ int remap_vmalloc_range(struct vm_area_s
71226 unsigned long uaddr = vma->vm_start;
71227 unsigned long usize = vma->vm_end - vma->vm_start;
71228
71229+ BUG_ON(vma->vm_mirror);
71230+
71231 if ((PAGE_SIZE-1) & (unsigned long)addr)
71232 return -EINVAL;
71233
71234diff -urNp linux-3.1.1/mm/vmstat.c linux-3.1.1/mm/vmstat.c
71235--- linux-3.1.1/mm/vmstat.c 2011-11-11 15:19:27.000000000 -0500
71236+++ linux-3.1.1/mm/vmstat.c 2011-11-16 18:40:44.000000000 -0500
71237@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
71238 *
71239 * vm_stat contains the global counters
71240 */
71241-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
71242+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
71243 EXPORT_SYMBOL(vm_stat);
71244
71245 #ifdef CONFIG_SMP
71246@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
71247 v = p->vm_stat_diff[i];
71248 p->vm_stat_diff[i] = 0;
71249 local_irq_restore(flags);
71250- atomic_long_add(v, &zone->vm_stat[i]);
71251+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
71252 global_diff[i] += v;
71253 #ifdef CONFIG_NUMA
71254 /* 3 seconds idle till flush */
71255@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
71256
71257 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
71258 if (global_diff[i])
71259- atomic_long_add(global_diff[i], &vm_stat[i]);
71260+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
71261 }
71262
71263 #endif
71264@@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
71265 start_cpu_timer(cpu);
71266 #endif
71267 #ifdef CONFIG_PROC_FS
71268- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
71269- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
71270- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
71271- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
71272+ {
71273+ mode_t gr_mode = S_IRUGO;
71274+#ifdef CONFIG_GRKERNSEC_PROC_ADD
71275+ gr_mode = S_IRUSR;
71276+#endif
71277+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
71278+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
71279+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71280+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
71281+#else
71282+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
71283+#endif
71284+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
71285+ }
71286 #endif
71287 return 0;
71288 }
71289diff -urNp linux-3.1.1/net/8021q/vlan.c linux-3.1.1/net/8021q/vlan.c
71290--- linux-3.1.1/net/8021q/vlan.c 2011-11-11 15:19:27.000000000 -0500
71291+++ linux-3.1.1/net/8021q/vlan.c 2011-11-16 18:39:08.000000000 -0500
71292@@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net
71293 err = -EPERM;
71294 if (!capable(CAP_NET_ADMIN))
71295 break;
71296- if ((args.u.name_type >= 0) &&
71297- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
71298+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
71299 struct vlan_net *vn;
71300
71301 vn = net_generic(net, vlan_net_id);
71302diff -urNp linux-3.1.1/net/9p/trans_fd.c linux-3.1.1/net/9p/trans_fd.c
71303--- linux-3.1.1/net/9p/trans_fd.c 2011-11-11 15:19:27.000000000 -0500
71304+++ linux-3.1.1/net/9p/trans_fd.c 2011-11-16 18:39:08.000000000 -0500
71305@@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client
71306 oldfs = get_fs();
71307 set_fs(get_ds());
71308 /* The cast to a user pointer is valid due to the set_fs() */
71309- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
71310+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
71311 set_fs(oldfs);
71312
71313 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
71314diff -urNp linux-3.1.1/net/9p/trans_virtio.c linux-3.1.1/net/9p/trans_virtio.c
71315--- linux-3.1.1/net/9p/trans_virtio.c 2011-11-11 15:19:27.000000000 -0500
71316+++ linux-3.1.1/net/9p/trans_virtio.c 2011-11-16 18:39:08.000000000 -0500
71317@@ -327,7 +327,7 @@ req_retry_pinned:
71318 } else {
71319 char *pbuf;
71320 if (req->tc->pubuf)
71321- pbuf = (__force char *) req->tc->pubuf;
71322+ pbuf = (char __force_kernel *) req->tc->pubuf;
71323 else
71324 pbuf = req->tc->pkbuf;
71325 outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
71326@@ -357,7 +357,7 @@ req_retry_pinned:
71327 } else {
71328 char *pbuf;
71329 if (req->tc->pubuf)
71330- pbuf = (__force char *) req->tc->pubuf;
71331+ pbuf = (char __force_kernel *) req->tc->pubuf;
71332 else
71333 pbuf = req->tc->pkbuf;
71334
71335diff -urNp linux-3.1.1/net/atm/atm_misc.c linux-3.1.1/net/atm/atm_misc.c
71336--- linux-3.1.1/net/atm/atm_misc.c 2011-11-11 15:19:27.000000000 -0500
71337+++ linux-3.1.1/net/atm/atm_misc.c 2011-11-16 18:39:08.000000000 -0500
71338@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
71339 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
71340 return 1;
71341 atm_return(vcc, truesize);
71342- atomic_inc(&vcc->stats->rx_drop);
71343+ atomic_inc_unchecked(&vcc->stats->rx_drop);
71344 return 0;
71345 }
71346 EXPORT_SYMBOL(atm_charge);
71347@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
71348 }
71349 }
71350 atm_return(vcc, guess);
71351- atomic_inc(&vcc->stats->rx_drop);
71352+ atomic_inc_unchecked(&vcc->stats->rx_drop);
71353 return NULL;
71354 }
71355 EXPORT_SYMBOL(atm_alloc_charge);
71356@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
71357
71358 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71359 {
71360-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71361+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71362 __SONET_ITEMS
71363 #undef __HANDLE_ITEM
71364 }
71365@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
71366
71367 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71368 {
71369-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71370+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
71371 __SONET_ITEMS
71372 #undef __HANDLE_ITEM
71373 }
71374diff -urNp linux-3.1.1/net/atm/lec.h linux-3.1.1/net/atm/lec.h
71375--- linux-3.1.1/net/atm/lec.h 2011-11-11 15:19:27.000000000 -0500
71376+++ linux-3.1.1/net/atm/lec.h 2011-11-16 18:39:08.000000000 -0500
71377@@ -48,7 +48,7 @@ struct lane2_ops {
71378 const u8 *tlvs, u32 sizeoftlvs);
71379 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
71380 const u8 *tlvs, u32 sizeoftlvs);
71381-};
71382+} __no_const;
71383
71384 /*
71385 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
71386diff -urNp linux-3.1.1/net/atm/mpc.h linux-3.1.1/net/atm/mpc.h
71387--- linux-3.1.1/net/atm/mpc.h 2011-11-11 15:19:27.000000000 -0500
71388+++ linux-3.1.1/net/atm/mpc.h 2011-11-16 18:39:08.000000000 -0500
71389@@ -33,7 +33,7 @@ struct mpoa_client {
71390 struct mpc_parameters parameters; /* parameters for this client */
71391
71392 const struct net_device_ops *old_ops;
71393- struct net_device_ops new_ops;
71394+ net_device_ops_no_const new_ops;
71395 };
71396
71397
71398diff -urNp linux-3.1.1/net/atm/mpoa_caches.c linux-3.1.1/net/atm/mpoa_caches.c
71399--- linux-3.1.1/net/atm/mpoa_caches.c 2011-11-11 15:19:27.000000000 -0500
71400+++ linux-3.1.1/net/atm/mpoa_caches.c 2011-11-16 18:40:44.000000000 -0500
71401@@ -255,6 +255,8 @@ static void check_resolving_entries(stru
71402 struct timeval now;
71403 struct k_message msg;
71404
71405+ pax_track_stack();
71406+
71407 do_gettimeofday(&now);
71408
71409 read_lock_bh(&client->ingress_lock);
71410diff -urNp linux-3.1.1/net/atm/proc.c linux-3.1.1/net/atm/proc.c
71411--- linux-3.1.1/net/atm/proc.c 2011-11-11 15:19:27.000000000 -0500
71412+++ linux-3.1.1/net/atm/proc.c 2011-11-16 18:39:08.000000000 -0500
71413@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
71414 const struct k_atm_aal_stats *stats)
71415 {
71416 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
71417- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
71418- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
71419- atomic_read(&stats->rx_drop));
71420+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
71421+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
71422+ atomic_read_unchecked(&stats->rx_drop));
71423 }
71424
71425 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
71426diff -urNp linux-3.1.1/net/atm/resources.c linux-3.1.1/net/atm/resources.c
71427--- linux-3.1.1/net/atm/resources.c 2011-11-11 15:19:27.000000000 -0500
71428+++ linux-3.1.1/net/atm/resources.c 2011-11-16 18:39:08.000000000 -0500
71429@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
71430 static void copy_aal_stats(struct k_atm_aal_stats *from,
71431 struct atm_aal_stats *to)
71432 {
71433-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71434+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71435 __AAL_STAT_ITEMS
71436 #undef __HANDLE_ITEM
71437 }
71438@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
71439 static void subtract_aal_stats(struct k_atm_aal_stats *from,
71440 struct atm_aal_stats *to)
71441 {
71442-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71443+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
71444 __AAL_STAT_ITEMS
71445 #undef __HANDLE_ITEM
71446 }
71447diff -urNp linux-3.1.1/net/batman-adv/hard-interface.c linux-3.1.1/net/batman-adv/hard-interface.c
71448--- linux-3.1.1/net/batman-adv/hard-interface.c 2011-11-11 15:19:27.000000000 -0500
71449+++ linux-3.1.1/net/batman-adv/hard-interface.c 2011-11-16 18:39:08.000000000 -0500
71450@@ -347,8 +347,8 @@ int hardif_enable_interface(struct hard_
71451 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
71452 dev_add_pack(&hard_iface->batman_adv_ptype);
71453
71454- atomic_set(&hard_iface->seqno, 1);
71455- atomic_set(&hard_iface->frag_seqno, 1);
71456+ atomic_set_unchecked(&hard_iface->seqno, 1);
71457+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
71458 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
71459 hard_iface->net_dev->name);
71460
71461diff -urNp linux-3.1.1/net/batman-adv/routing.c linux-3.1.1/net/batman-adv/routing.c
71462--- linux-3.1.1/net/batman-adv/routing.c 2011-11-11 15:19:27.000000000 -0500
71463+++ linux-3.1.1/net/batman-adv/routing.c 2011-11-16 18:39:08.000000000 -0500
71464@@ -656,7 +656,7 @@ void receive_bat_packet(const struct eth
71465 return;
71466
71467 /* could be changed by schedule_own_packet() */
71468- if_incoming_seqno = atomic_read(&if_incoming->seqno);
71469+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
71470
71471 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
71472
71473diff -urNp linux-3.1.1/net/batman-adv/send.c linux-3.1.1/net/batman-adv/send.c
71474--- linux-3.1.1/net/batman-adv/send.c 2011-11-11 15:19:27.000000000 -0500
71475+++ linux-3.1.1/net/batman-adv/send.c 2011-11-16 18:39:08.000000000 -0500
71476@@ -326,7 +326,7 @@ void schedule_own_packet(struct hard_ifa
71477
71478 /* change sequence number to network order */
71479 batman_packet->seqno =
71480- htonl((uint32_t)atomic_read(&hard_iface->seqno));
71481+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
71482
71483 batman_packet->ttvn = atomic_read(&bat_priv->ttvn);
71484 batman_packet->tt_crc = htons((uint16_t)atomic_read(&bat_priv->tt_crc));
71485@@ -343,7 +343,7 @@ void schedule_own_packet(struct hard_ifa
71486 else
71487 batman_packet->gw_flags = NO_FLAGS;
71488
71489- atomic_inc(&hard_iface->seqno);
71490+ atomic_inc_unchecked(&hard_iface->seqno);
71491
71492 slide_own_bcast_window(hard_iface);
71493 send_time = own_send_time(bat_priv);
71494diff -urNp linux-3.1.1/net/batman-adv/soft-interface.c linux-3.1.1/net/batman-adv/soft-interface.c
71495--- linux-3.1.1/net/batman-adv/soft-interface.c 2011-11-11 15:19:27.000000000 -0500
71496+++ linux-3.1.1/net/batman-adv/soft-interface.c 2011-11-16 18:39:08.000000000 -0500
71497@@ -632,7 +632,7 @@ static int interface_tx(struct sk_buff *
71498
71499 /* set broadcast sequence number */
71500 bcast_packet->seqno =
71501- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
71502+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
71503
71504 add_bcast_packet_to_list(bat_priv, skb, 1);
71505
71506@@ -824,7 +824,7 @@ struct net_device *softif_create(const c
71507 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
71508
71509 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
71510- atomic_set(&bat_priv->bcast_seqno, 1);
71511+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
71512 atomic_set(&bat_priv->ttvn, 0);
71513 atomic_set(&bat_priv->tt_local_changes, 0);
71514 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
71515diff -urNp linux-3.1.1/net/batman-adv/types.h linux-3.1.1/net/batman-adv/types.h
71516--- linux-3.1.1/net/batman-adv/types.h 2011-11-11 15:19:27.000000000 -0500
71517+++ linux-3.1.1/net/batman-adv/types.h 2011-11-16 18:39:08.000000000 -0500
71518@@ -38,8 +38,8 @@ struct hard_iface {
71519 int16_t if_num;
71520 char if_status;
71521 struct net_device *net_dev;
71522- atomic_t seqno;
71523- atomic_t frag_seqno;
71524+ atomic_unchecked_t seqno;
71525+ atomic_unchecked_t frag_seqno;
71526 unsigned char *packet_buff;
71527 int packet_len;
71528 struct kobject *hardif_obj;
71529@@ -153,7 +153,7 @@ struct bat_priv {
71530 atomic_t orig_interval; /* uint */
71531 atomic_t hop_penalty; /* uint */
71532 atomic_t log_level; /* uint */
71533- atomic_t bcast_seqno;
71534+ atomic_unchecked_t bcast_seqno;
71535 atomic_t bcast_queue_left;
71536 atomic_t batman_queue_left;
71537 atomic_t ttvn; /* tranlation table version number */
71538diff -urNp linux-3.1.1/net/batman-adv/unicast.c linux-3.1.1/net/batman-adv/unicast.c
71539--- linux-3.1.1/net/batman-adv/unicast.c 2011-11-11 15:19:27.000000000 -0500
71540+++ linux-3.1.1/net/batman-adv/unicast.c 2011-11-16 18:39:08.000000000 -0500
71541@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, s
71542 frag1->flags = UNI_FRAG_HEAD | large_tail;
71543 frag2->flags = large_tail;
71544
71545- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
71546+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
71547 frag1->seqno = htons(seqno - 1);
71548 frag2->seqno = htons(seqno);
71549
71550diff -urNp linux-3.1.1/net/bluetooth/hci_conn.c linux-3.1.1/net/bluetooth/hci_conn.c
71551--- linux-3.1.1/net/bluetooth/hci_conn.c 2011-11-11 15:19:27.000000000 -0500
71552+++ linux-3.1.1/net/bluetooth/hci_conn.c 2011-11-16 18:39:08.000000000 -0500
71553@@ -218,7 +218,7 @@ void hci_le_start_enc(struct hci_conn *c
71554 cp.handle = cpu_to_le16(conn->handle);
71555 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
71556 cp.ediv = ediv;
71557- memcpy(cp.rand, rand, sizeof(rand));
71558+ memcpy(cp.rand, rand, sizeof(cp.rand));
71559
71560 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
71561 }
71562@@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *c
71563 memset(&cp, 0, sizeof(cp));
71564
71565 cp.handle = cpu_to_le16(conn->handle);
71566- memcpy(cp.ltk, ltk, sizeof(ltk));
71567+ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
71568
71569 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
71570 }
71571diff -urNp linux-3.1.1/net/bridge/br_multicast.c linux-3.1.1/net/bridge/br_multicast.c
71572--- linux-3.1.1/net/bridge/br_multicast.c 2011-11-11 15:19:27.000000000 -0500
71573+++ linux-3.1.1/net/bridge/br_multicast.c 2011-11-16 18:39:08.000000000 -0500
71574@@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct
71575 nexthdr = ip6h->nexthdr;
71576 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
71577
71578- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
71579+ if (nexthdr != IPPROTO_ICMPV6)
71580 return 0;
71581
71582 /* Okay, we found ICMPv6 header */
71583diff -urNp linux-3.1.1/net/bridge/netfilter/ebtables.c linux-3.1.1/net/bridge/netfilter/ebtables.c
71584--- linux-3.1.1/net/bridge/netfilter/ebtables.c 2011-11-11 15:19:27.000000000 -0500
71585+++ linux-3.1.1/net/bridge/netfilter/ebtables.c 2011-11-16 18:40:44.000000000 -0500
71586@@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *s
71587 tmp.valid_hooks = t->table->valid_hooks;
71588 }
71589 mutex_unlock(&ebt_mutex);
71590- if (copy_to_user(user, &tmp, *len) != 0){
71591+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
71592 BUGPRINT("c2u Didn't work\n");
71593 ret = -EFAULT;
71594 break;
71595@@ -1781,6 +1781,8 @@ static int compat_copy_everything_to_use
71596 int ret;
71597 void __user *pos;
71598
71599+ pax_track_stack();
71600+
71601 memset(&tinfo, 0, sizeof(tinfo));
71602
71603 if (cmd == EBT_SO_GET_ENTRIES) {
71604diff -urNp linux-3.1.1/net/caif/caif_socket.c linux-3.1.1/net/caif/caif_socket.c
71605--- linux-3.1.1/net/caif/caif_socket.c 2011-11-11 15:19:27.000000000 -0500
71606+++ linux-3.1.1/net/caif/caif_socket.c 2011-11-16 18:39:08.000000000 -0500
71607@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
71608 #ifdef CONFIG_DEBUG_FS
71609 struct debug_fs_counter {
71610 atomic_t caif_nr_socks;
71611- atomic_t caif_sock_create;
71612- atomic_t num_connect_req;
71613- atomic_t num_connect_resp;
71614- atomic_t num_connect_fail_resp;
71615- atomic_t num_disconnect;
71616- atomic_t num_remote_shutdown_ind;
71617- atomic_t num_tx_flow_off_ind;
71618- atomic_t num_tx_flow_on_ind;
71619- atomic_t num_rx_flow_off;
71620- atomic_t num_rx_flow_on;
71621+ atomic_unchecked_t caif_sock_create;
71622+ atomic_unchecked_t num_connect_req;
71623+ atomic_unchecked_t num_connect_resp;
71624+ atomic_unchecked_t num_connect_fail_resp;
71625+ atomic_unchecked_t num_disconnect;
71626+ atomic_unchecked_t num_remote_shutdown_ind;
71627+ atomic_unchecked_t num_tx_flow_off_ind;
71628+ atomic_unchecked_t num_tx_flow_on_ind;
71629+ atomic_unchecked_t num_rx_flow_off;
71630+ atomic_unchecked_t num_rx_flow_on;
71631 };
71632 static struct debug_fs_counter cnt;
71633 #define dbfs_atomic_inc(v) atomic_inc_return(v)
71634+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
71635 #define dbfs_atomic_dec(v) atomic_dec_return(v)
71636 #else
71637 #define dbfs_atomic_inc(v) 0
71638@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct soc
71639 atomic_read(&cf_sk->sk.sk_rmem_alloc),
71640 sk_rcvbuf_lowwater(cf_sk));
71641 set_rx_flow_off(cf_sk);
71642- dbfs_atomic_inc(&cnt.num_rx_flow_off);
71643+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
71644 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
71645 }
71646
71647@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct soc
71648 set_rx_flow_off(cf_sk);
71649 if (net_ratelimit())
71650 pr_debug("sending flow OFF due to rmem_schedule\n");
71651- dbfs_atomic_inc(&cnt.num_rx_flow_off);
71652+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
71653 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
71654 }
71655 skb->dev = NULL;
71656@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer
71657 switch (flow) {
71658 case CAIF_CTRLCMD_FLOW_ON_IND:
71659 /* OK from modem to start sending again */
71660- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
71661+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
71662 set_tx_flow_on(cf_sk);
71663 cf_sk->sk.sk_state_change(&cf_sk->sk);
71664 break;
71665
71666 case CAIF_CTRLCMD_FLOW_OFF_IND:
71667 /* Modem asks us to shut up */
71668- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
71669+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
71670 set_tx_flow_off(cf_sk);
71671 cf_sk->sk.sk_state_change(&cf_sk->sk);
71672 break;
71673@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer
71674 /* We're now connected */
71675 caif_client_register_refcnt(&cf_sk->layer,
71676 cfsk_hold, cfsk_put);
71677- dbfs_atomic_inc(&cnt.num_connect_resp);
71678+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
71679 cf_sk->sk.sk_state = CAIF_CONNECTED;
71680 set_tx_flow_on(cf_sk);
71681 cf_sk->sk.sk_state_change(&cf_sk->sk);
71682@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer
71683
71684 case CAIF_CTRLCMD_INIT_FAIL_RSP:
71685 /* Connect request failed */
71686- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
71687+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
71688 cf_sk->sk.sk_err = ECONNREFUSED;
71689 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
71690 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
71691@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer
71692
71693 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
71694 /* Modem has closed this connection, or device is down. */
71695- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
71696+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
71697 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
71698 cf_sk->sk.sk_err = ECONNRESET;
71699 set_rx_flow_on(cf_sk);
71700@@ -297,7 +298,7 @@ static void caif_check_flow_release(stru
71701 return;
71702
71703 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
71704- dbfs_atomic_inc(&cnt.num_rx_flow_on);
71705+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
71706 set_rx_flow_on(cf_sk);
71707 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
71708 }
71709@@ -854,7 +855,7 @@ static int caif_connect(struct socket *s
71710 /*ifindex = id of the interface.*/
71711 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
71712
71713- dbfs_atomic_inc(&cnt.num_connect_req);
71714+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
71715 cf_sk->layer.receive = caif_sktrecv_cb;
71716
71717 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
71718@@ -943,7 +944,7 @@ static int caif_release(struct socket *s
71719 spin_unlock_bh(&sk->sk_receive_queue.lock);
71720 sock->sk = NULL;
71721
71722- dbfs_atomic_inc(&cnt.num_disconnect);
71723+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
71724
71725 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
71726 if (cf_sk->debugfs_socket_dir != NULL)
71727@@ -1122,7 +1123,7 @@ static int caif_create(struct net *net,
71728 cf_sk->conn_req.protocol = protocol;
71729 /* Increase the number of sockets created. */
71730 dbfs_atomic_inc(&cnt.caif_nr_socks);
71731- num = dbfs_atomic_inc(&cnt.caif_sock_create);
71732+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
71733 #ifdef CONFIG_DEBUG_FS
71734 if (!IS_ERR(debugfsdir)) {
71735
71736diff -urNp linux-3.1.1/net/caif/cfctrl.c linux-3.1.1/net/caif/cfctrl.c
71737--- linux-3.1.1/net/caif/cfctrl.c 2011-11-11 15:19:27.000000000 -0500
71738+++ linux-3.1.1/net/caif/cfctrl.c 2011-11-16 18:40:44.000000000 -0500
71739@@ -9,6 +9,7 @@
71740 #include <linux/stddef.h>
71741 #include <linux/spinlock.h>
71742 #include <linux/slab.h>
71743+#include <linux/sched.h>
71744 #include <net/caif/caif_layer.h>
71745 #include <net/caif/cfpkt.h>
71746 #include <net/caif/cfctrl.h>
71747@@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
71748 dev_info.id = 0xff;
71749 memset(this, 0, sizeof(*this));
71750 cfsrvl_init(&this->serv, 0, &dev_info, false);
71751- atomic_set(&this->req_seq_no, 1);
71752- atomic_set(&this->rsp_seq_no, 1);
71753+ atomic_set_unchecked(&this->req_seq_no, 1);
71754+ atomic_set_unchecked(&this->rsp_seq_no, 1);
71755 this->serv.layer.receive = cfctrl_recv;
71756 sprintf(this->serv.layer.name, "ctrl");
71757 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
71758@@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfc
71759 struct cfctrl_request_info *req)
71760 {
71761 spin_lock_bh(&ctrl->info_list_lock);
71762- atomic_inc(&ctrl->req_seq_no);
71763- req->sequence_no = atomic_read(&ctrl->req_seq_no);
71764+ atomic_inc_unchecked(&ctrl->req_seq_no);
71765+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
71766 list_add_tail(&req->list, &ctrl->list);
71767 spin_unlock_bh(&ctrl->info_list_lock);
71768 }
71769@@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctr
71770 if (p != first)
71771 pr_warn("Requests are not received in order\n");
71772
71773- atomic_set(&ctrl->rsp_seq_no,
71774+ atomic_set_unchecked(&ctrl->rsp_seq_no,
71775 p->sequence_no);
71776 list_del(&p->list);
71777 goto out;
71778@@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *l
71779 struct cfctrl *cfctrl = container_obj(layer);
71780 struct cfctrl_request_info rsp, *req;
71781
71782+ pax_track_stack();
71783
71784 cfpkt_extr_head(pkt, &cmdrsp, 1);
71785 cmd = cmdrsp & CFCTRL_CMD_MASK;
71786diff -urNp linux-3.1.1/net/compat.c linux-3.1.1/net/compat.c
71787--- linux-3.1.1/net/compat.c 2011-11-11 15:19:27.000000000 -0500
71788+++ linux-3.1.1/net/compat.c 2011-11-16 18:39:08.000000000 -0500
71789@@ -70,9 +70,9 @@ int get_compat_msghdr(struct msghdr *kms
71790 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
71791 __get_user(kmsg->msg_flags, &umsg->msg_flags))
71792 return -EFAULT;
71793- kmsg->msg_name = compat_ptr(tmp1);
71794- kmsg->msg_iov = compat_ptr(tmp2);
71795- kmsg->msg_control = compat_ptr(tmp3);
71796+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
71797+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
71798+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
71799 return 0;
71800 }
71801
71802@@ -84,7 +84,7 @@ int verify_compat_iovec(struct msghdr *k
71803
71804 if (kern_msg->msg_namelen) {
71805 if (mode == VERIFY_READ) {
71806- int err = move_addr_to_kernel(kern_msg->msg_name,
71807+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
71808 kern_msg->msg_namelen,
71809 kern_address);
71810 if (err < 0)
71811@@ -95,7 +95,7 @@ int verify_compat_iovec(struct msghdr *k
71812 kern_msg->msg_name = NULL;
71813
71814 tot_len = iov_from_user_compat_to_kern(kern_iov,
71815- (struct compat_iovec __user *)kern_msg->msg_iov,
71816+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
71817 kern_msg->msg_iovlen);
71818 if (tot_len >= 0)
71819 kern_msg->msg_iov = kern_iov;
71820@@ -115,20 +115,20 @@ int verify_compat_iovec(struct msghdr *k
71821
71822 #define CMSG_COMPAT_FIRSTHDR(msg) \
71823 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
71824- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
71825+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
71826 (struct compat_cmsghdr __user *)NULL)
71827
71828 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
71829 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
71830 (ucmlen) <= (unsigned long) \
71831 ((mhdr)->msg_controllen - \
71832- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
71833+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
71834
71835 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
71836 struct compat_cmsghdr __user *cmsg, int cmsg_len)
71837 {
71838 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
71839- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
71840+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
71841 msg->msg_controllen)
71842 return NULL;
71843 return (struct compat_cmsghdr __user *)ptr;
71844@@ -220,7 +220,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
71845 {
71846 struct compat_timeval ctv;
71847 struct compat_timespec cts[3];
71848- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
71849+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
71850 struct compat_cmsghdr cmhdr;
71851 int cmlen;
71852
71853@@ -272,7 +272,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
71854
71855 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
71856 {
71857- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
71858+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
71859 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
71860 int fdnum = scm->fp->count;
71861 struct file **fp = scm->fp->fp;
71862@@ -369,7 +369,7 @@ static int do_set_sock_timeout(struct so
71863 return -EFAULT;
71864 old_fs = get_fs();
71865 set_fs(KERNEL_DS);
71866- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
71867+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
71868 set_fs(old_fs);
71869
71870 return err;
71871@@ -430,7 +430,7 @@ static int do_get_sock_timeout(struct so
71872 len = sizeof(ktime);
71873 old_fs = get_fs();
71874 set_fs(KERNEL_DS);
71875- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
71876+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
71877 set_fs(old_fs);
71878
71879 if (!err) {
71880@@ -565,7 +565,7 @@ int compat_mc_setsockopt(struct sock *so
71881 case MCAST_JOIN_GROUP:
71882 case MCAST_LEAVE_GROUP:
71883 {
71884- struct compat_group_req __user *gr32 = (void *)optval;
71885+ struct compat_group_req __user *gr32 = (void __user *)optval;
71886 struct group_req __user *kgr =
71887 compat_alloc_user_space(sizeof(struct group_req));
71888 u32 interface;
71889@@ -586,7 +586,7 @@ int compat_mc_setsockopt(struct sock *so
71890 case MCAST_BLOCK_SOURCE:
71891 case MCAST_UNBLOCK_SOURCE:
71892 {
71893- struct compat_group_source_req __user *gsr32 = (void *)optval;
71894+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
71895 struct group_source_req __user *kgsr = compat_alloc_user_space(
71896 sizeof(struct group_source_req));
71897 u32 interface;
71898@@ -607,7 +607,7 @@ int compat_mc_setsockopt(struct sock *so
71899 }
71900 case MCAST_MSFILTER:
71901 {
71902- struct compat_group_filter __user *gf32 = (void *)optval;
71903+ struct compat_group_filter __user *gf32 = (void __user *)optval;
71904 struct group_filter __user *kgf;
71905 u32 interface, fmode, numsrc;
71906
71907@@ -645,7 +645,7 @@ int compat_mc_getsockopt(struct sock *so
71908 char __user *optval, int __user *optlen,
71909 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
71910 {
71911- struct compat_group_filter __user *gf32 = (void *)optval;
71912+ struct compat_group_filter __user *gf32 = (void __user *)optval;
71913 struct group_filter __user *kgf;
71914 int __user *koptlen;
71915 u32 interface, fmode, numsrc;
71916diff -urNp linux-3.1.1/net/core/datagram.c linux-3.1.1/net/core/datagram.c
71917--- linux-3.1.1/net/core/datagram.c 2011-11-11 15:19:27.000000000 -0500
71918+++ linux-3.1.1/net/core/datagram.c 2011-11-16 18:39:08.000000000 -0500
71919@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
71920 }
71921
71922 kfree_skb(skb);
71923- atomic_inc(&sk->sk_drops);
71924+ atomic_inc_unchecked(&sk->sk_drops);
71925 sk_mem_reclaim_partial(sk);
71926
71927 return err;
71928diff -urNp linux-3.1.1/net/core/dev.c linux-3.1.1/net/core/dev.c
71929--- linux-3.1.1/net/core/dev.c 2011-11-11 15:19:27.000000000 -0500
71930+++ linux-3.1.1/net/core/dev.c 2011-11-16 18:40:44.000000000 -0500
71931@@ -1135,10 +1135,14 @@ void dev_load(struct net *net, const cha
71932 if (no_module && capable(CAP_NET_ADMIN))
71933 no_module = request_module("netdev-%s", name);
71934 if (no_module && capable(CAP_SYS_MODULE)) {
71935+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71936+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
71937+#else
71938 if (!request_module("%s", name))
71939 pr_err("Loading kernel module for a network device "
71940 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
71941 "instead\n", name);
71942+#endif
71943 }
71944 }
71945 EXPORT_SYMBOL(dev_load);
71946@@ -1977,7 +1981,7 @@ static int illegal_highdma(struct net_de
71947
71948 struct dev_gso_cb {
71949 void (*destructor)(struct sk_buff *skb);
71950-};
71951+} __no_const;
71952
71953 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
71954
71955@@ -2930,7 +2934,7 @@ int netif_rx_ni(struct sk_buff *skb)
71956 }
71957 EXPORT_SYMBOL(netif_rx_ni);
71958
71959-static void net_tx_action(struct softirq_action *h)
71960+static void net_tx_action(void)
71961 {
71962 struct softnet_data *sd = &__get_cpu_var(softnet_data);
71963
71964@@ -3779,7 +3783,7 @@ void netif_napi_del(struct napi_struct *
71965 }
71966 EXPORT_SYMBOL(netif_napi_del);
71967
71968-static void net_rx_action(struct softirq_action *h)
71969+static void net_rx_action(void)
71970 {
71971 struct softnet_data *sd = &__get_cpu_var(softnet_data);
71972 unsigned long time_limit = jiffies + 2;
71973diff -urNp linux-3.1.1/net/core/flow.c linux-3.1.1/net/core/flow.c
71974--- linux-3.1.1/net/core/flow.c 2011-11-11 15:19:27.000000000 -0500
71975+++ linux-3.1.1/net/core/flow.c 2011-11-16 18:39:08.000000000 -0500
71976@@ -61,7 +61,7 @@ struct flow_cache {
71977 struct timer_list rnd_timer;
71978 };
71979
71980-atomic_t flow_cache_genid = ATOMIC_INIT(0);
71981+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
71982 EXPORT_SYMBOL(flow_cache_genid);
71983 static struct flow_cache flow_cache_global;
71984 static struct kmem_cache *flow_cachep __read_mostly;
71985@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsig
71986
71987 static int flow_entry_valid(struct flow_cache_entry *fle)
71988 {
71989- if (atomic_read(&flow_cache_genid) != fle->genid)
71990+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
71991 return 0;
71992 if (fle->object && !fle->object->ops->check(fle->object))
71993 return 0;
71994@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const
71995 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
71996 fcp->hash_count++;
71997 }
71998- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
71999+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
72000 flo = fle->object;
72001 if (!flo)
72002 goto ret_object;
72003@@ -280,7 +280,7 @@ nocache:
72004 }
72005 flo = resolver(net, key, family, dir, flo, ctx);
72006 if (fle) {
72007- fle->genid = atomic_read(&flow_cache_genid);
72008+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
72009 if (!IS_ERR(flo))
72010 fle->object = flo;
72011 else
72012diff -urNp linux-3.1.1/net/core/iovec.c linux-3.1.1/net/core/iovec.c
72013--- linux-3.1.1/net/core/iovec.c 2011-11-11 15:19:27.000000000 -0500
72014+++ linux-3.1.1/net/core/iovec.c 2011-11-16 18:39:08.000000000 -0500
72015@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struc
72016 if (m->msg_namelen) {
72017 if (mode == VERIFY_READ) {
72018 void __user *namep;
72019- namep = (void __user __force *) m->msg_name;
72020+ namep = (void __force_user *) m->msg_name;
72021 err = move_addr_to_kernel(namep, m->msg_namelen,
72022 address);
72023 if (err < 0)
72024@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struc
72025 }
72026
72027 size = m->msg_iovlen * sizeof(struct iovec);
72028- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
72029+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
72030 return -EFAULT;
72031
72032 m->msg_iov = iov;
72033diff -urNp linux-3.1.1/net/core/rtnetlink.c linux-3.1.1/net/core/rtnetlink.c
72034--- linux-3.1.1/net/core/rtnetlink.c 2011-11-11 15:19:27.000000000 -0500
72035+++ linux-3.1.1/net/core/rtnetlink.c 2011-11-16 18:39:08.000000000 -0500
72036@@ -57,7 +57,7 @@ struct rtnl_link {
72037 rtnl_doit_func doit;
72038 rtnl_dumpit_func dumpit;
72039 rtnl_calcit_func calcit;
72040-};
72041+} __no_const;
72042
72043 static DEFINE_MUTEX(rtnl_mutex);
72044 static u16 min_ifinfo_dump_size;
72045diff -urNp linux-3.1.1/net/core/scm.c linux-3.1.1/net/core/scm.c
72046--- linux-3.1.1/net/core/scm.c 2011-11-11 15:19:27.000000000 -0500
72047+++ linux-3.1.1/net/core/scm.c 2011-11-16 18:39:08.000000000 -0500
72048@@ -218,7 +218,7 @@ EXPORT_SYMBOL(__scm_send);
72049 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72050 {
72051 struct cmsghdr __user *cm
72052- = (__force struct cmsghdr __user *)msg->msg_control;
72053+ = (struct cmsghdr __force_user *)msg->msg_control;
72054 struct cmsghdr cmhdr;
72055 int cmlen = CMSG_LEN(len);
72056 int err;
72057@@ -241,7 +241,7 @@ int put_cmsg(struct msghdr * msg, int le
72058 err = -EFAULT;
72059 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
72060 goto out;
72061- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
72062+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
72063 goto out;
72064 cmlen = CMSG_SPACE(len);
72065 if (msg->msg_controllen < cmlen)
72066@@ -257,7 +257,7 @@ EXPORT_SYMBOL(put_cmsg);
72067 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72068 {
72069 struct cmsghdr __user *cm
72070- = (__force struct cmsghdr __user*)msg->msg_control;
72071+ = (struct cmsghdr __force_user *)msg->msg_control;
72072
72073 int fdmax = 0;
72074 int fdnum = scm->fp->count;
72075@@ -277,7 +277,7 @@ void scm_detach_fds(struct msghdr *msg,
72076 if (fdnum < fdmax)
72077 fdmax = fdnum;
72078
72079- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
72080+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
72081 i++, cmfptr++)
72082 {
72083 int new_fd;
72084diff -urNp linux-3.1.1/net/core/skbuff.c linux-3.1.1/net/core/skbuff.c
72085--- linux-3.1.1/net/core/skbuff.c 2011-11-11 15:19:27.000000000 -0500
72086+++ linux-3.1.1/net/core/skbuff.c 2011-11-16 18:40:44.000000000 -0500
72087@@ -1650,6 +1650,8 @@ int skb_splice_bits(struct sk_buff *skb,
72088 struct sock *sk = skb->sk;
72089 int ret = 0;
72090
72091+ pax_track_stack();
72092+
72093 if (splice_grow_spd(pipe, &spd))
72094 return -ENOMEM;
72095
72096diff -urNp linux-3.1.1/net/core/sock.c linux-3.1.1/net/core/sock.c
72097--- linux-3.1.1/net/core/sock.c 2011-11-11 15:19:27.000000000 -0500
72098+++ linux-3.1.1/net/core/sock.c 2011-11-16 18:40:44.000000000 -0500
72099@@ -293,7 +293,7 @@ int sock_queue_rcv_skb(struct sock *sk,
72100 */
72101 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
72102 (unsigned)sk->sk_rcvbuf) {
72103- atomic_inc(&sk->sk_drops);
72104+ atomic_inc_unchecked(&sk->sk_drops);
72105 trace_sock_rcvqueue_full(sk, skb);
72106 return -ENOMEM;
72107 }
72108@@ -303,7 +303,7 @@ int sock_queue_rcv_skb(struct sock *sk,
72109 return err;
72110
72111 if (!sk_rmem_schedule(sk, skb->truesize)) {
72112- atomic_inc(&sk->sk_drops);
72113+ atomic_inc_unchecked(&sk->sk_drops);
72114 return -ENOBUFS;
72115 }
72116
72117@@ -323,7 +323,7 @@ int sock_queue_rcv_skb(struct sock *sk,
72118 skb_dst_force(skb);
72119
72120 spin_lock_irqsave(&list->lock, flags);
72121- skb->dropcount = atomic_read(&sk->sk_drops);
72122+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
72123 __skb_queue_tail(list, skb);
72124 spin_unlock_irqrestore(&list->lock, flags);
72125
72126@@ -343,7 +343,7 @@ int sk_receive_skb(struct sock *sk, stru
72127 skb->dev = NULL;
72128
72129 if (sk_rcvqueues_full(sk, skb)) {
72130- atomic_inc(&sk->sk_drops);
72131+ atomic_inc_unchecked(&sk->sk_drops);
72132 goto discard_and_relse;
72133 }
72134 if (nested)
72135@@ -361,7 +361,7 @@ int sk_receive_skb(struct sock *sk, stru
72136 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
72137 } else if (sk_add_backlog(sk, skb)) {
72138 bh_unlock_sock(sk);
72139- atomic_inc(&sk->sk_drops);
72140+ atomic_inc_unchecked(&sk->sk_drops);
72141 goto discard_and_relse;
72142 }
72143
72144@@ -924,7 +924,7 @@ int sock_getsockopt(struct socket *sock,
72145 if (len > sizeof(peercred))
72146 len = sizeof(peercred);
72147 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
72148- if (copy_to_user(optval, &peercred, len))
72149+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
72150 return -EFAULT;
72151 goto lenout;
72152 }
72153@@ -937,7 +937,7 @@ int sock_getsockopt(struct socket *sock,
72154 return -ENOTCONN;
72155 if (lv < len)
72156 return -EINVAL;
72157- if (copy_to_user(optval, address, len))
72158+ if (len > sizeof(address) || copy_to_user(optval, address, len))
72159 return -EFAULT;
72160 goto lenout;
72161 }
72162@@ -970,7 +970,7 @@ int sock_getsockopt(struct socket *sock,
72163
72164 if (len > lv)
72165 len = lv;
72166- if (copy_to_user(optval, &v, len))
72167+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
72168 return -EFAULT;
72169 lenout:
72170 if (put_user(len, optlen))
72171@@ -2029,7 +2029,7 @@ void sock_init_data(struct socket *sock,
72172 */
72173 smp_wmb();
72174 atomic_set(&sk->sk_refcnt, 1);
72175- atomic_set(&sk->sk_drops, 0);
72176+ atomic_set_unchecked(&sk->sk_drops, 0);
72177 }
72178 EXPORT_SYMBOL(sock_init_data);
72179
72180diff -urNp linux-3.1.1/net/decnet/sysctl_net_decnet.c linux-3.1.1/net/decnet/sysctl_net_decnet.c
72181--- linux-3.1.1/net/decnet/sysctl_net_decnet.c 2011-11-11 15:19:27.000000000 -0500
72182+++ linux-3.1.1/net/decnet/sysctl_net_decnet.c 2011-11-16 18:39:08.000000000 -0500
72183@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_t
72184
72185 if (len > *lenp) len = *lenp;
72186
72187- if (copy_to_user(buffer, addr, len))
72188+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
72189 return -EFAULT;
72190
72191 *lenp = len;
72192@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table
72193
72194 if (len > *lenp) len = *lenp;
72195
72196- if (copy_to_user(buffer, devname, len))
72197+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
72198 return -EFAULT;
72199
72200 *lenp = len;
72201diff -urNp linux-3.1.1/net/econet/Kconfig linux-3.1.1/net/econet/Kconfig
72202--- linux-3.1.1/net/econet/Kconfig 2011-11-11 15:19:27.000000000 -0500
72203+++ linux-3.1.1/net/econet/Kconfig 2011-11-16 18:40:44.000000000 -0500
72204@@ -4,7 +4,7 @@
72205
72206 config ECONET
72207 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
72208- depends on EXPERIMENTAL && INET
72209+ depends on EXPERIMENTAL && INET && BROKEN
72210 ---help---
72211 Econet is a fairly old and slow networking protocol mainly used by
72212 Acorn computers to access file and print servers. It uses native
72213diff -urNp linux-3.1.1/net/ipv4/fib_frontend.c linux-3.1.1/net/ipv4/fib_frontend.c
72214--- linux-3.1.1/net/ipv4/fib_frontend.c 2011-11-11 15:19:27.000000000 -0500
72215+++ linux-3.1.1/net/ipv4/fib_frontend.c 2011-11-16 18:39:08.000000000 -0500
72216@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct not
72217 #ifdef CONFIG_IP_ROUTE_MULTIPATH
72218 fib_sync_up(dev);
72219 #endif
72220- atomic_inc(&net->ipv4.dev_addr_genid);
72221+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72222 rt_cache_flush(dev_net(dev), -1);
72223 break;
72224 case NETDEV_DOWN:
72225 fib_del_ifaddr(ifa, NULL);
72226- atomic_inc(&net->ipv4.dev_addr_genid);
72227+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72228 if (ifa->ifa_dev->ifa_list == NULL) {
72229 /* Last address was deleted from this interface.
72230 * Disable IP.
72231@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notif
72232 #ifdef CONFIG_IP_ROUTE_MULTIPATH
72233 fib_sync_up(dev);
72234 #endif
72235- atomic_inc(&net->ipv4.dev_addr_genid);
72236+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72237 rt_cache_flush(dev_net(dev), -1);
72238 break;
72239 case NETDEV_DOWN:
72240diff -urNp linux-3.1.1/net/ipv4/fib_semantics.c linux-3.1.1/net/ipv4/fib_semantics.c
72241--- linux-3.1.1/net/ipv4/fib_semantics.c 2011-11-11 15:19:27.000000000 -0500
72242+++ linux-3.1.1/net/ipv4/fib_semantics.c 2011-11-16 18:39:08.000000000 -0500
72243@@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct n
72244 nh->nh_saddr = inet_select_addr(nh->nh_dev,
72245 nh->nh_gw,
72246 nh->nh_parent->fib_scope);
72247- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
72248+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
72249
72250 return nh->nh_saddr;
72251 }
72252diff -urNp linux-3.1.1/net/ipv4/inet_diag.c linux-3.1.1/net/ipv4/inet_diag.c
72253--- linux-3.1.1/net/ipv4/inet_diag.c 2011-11-11 15:19:27.000000000 -0500
72254+++ linux-3.1.1/net/ipv4/inet_diag.c 2011-11-16 18:40:44.000000000 -0500
72255@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
72256 r->idiag_retrans = 0;
72257
72258 r->id.idiag_if = sk->sk_bound_dev_if;
72259+
72260+#ifdef CONFIG_GRKERNSEC_HIDESYM
72261+ r->id.idiag_cookie[0] = 0;
72262+ r->id.idiag_cookie[1] = 0;
72263+#else
72264 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
72265 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
72266+#endif
72267
72268 r->id.idiag_sport = inet->inet_sport;
72269 r->id.idiag_dport = inet->inet_dport;
72270@@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
72271 r->idiag_family = tw->tw_family;
72272 r->idiag_retrans = 0;
72273 r->id.idiag_if = tw->tw_bound_dev_if;
72274+
72275+#ifdef CONFIG_GRKERNSEC_HIDESYM
72276+ r->id.idiag_cookie[0] = 0;
72277+ r->id.idiag_cookie[1] = 0;
72278+#else
72279 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
72280 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
72281+#endif
72282+
72283 r->id.idiag_sport = tw->tw_sport;
72284 r->id.idiag_dport = tw->tw_dport;
72285 r->id.idiag_src[0] = tw->tw_rcv_saddr;
72286@@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
72287 if (sk == NULL)
72288 goto unlock;
72289
72290+#ifndef CONFIG_GRKERNSEC_HIDESYM
72291 err = -ESTALE;
72292 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
72293 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
72294 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
72295 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
72296 goto out;
72297+#endif
72298
72299 err = -ENOMEM;
72300 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
72301@@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
72302 r->idiag_retrans = req->retrans;
72303
72304 r->id.idiag_if = sk->sk_bound_dev_if;
72305+
72306+#ifdef CONFIG_GRKERNSEC_HIDESYM
72307+ r->id.idiag_cookie[0] = 0;
72308+ r->id.idiag_cookie[1] = 0;
72309+#else
72310 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
72311 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
72312+#endif
72313
72314 tmo = req->expires - jiffies;
72315 if (tmo < 0)
72316diff -urNp linux-3.1.1/net/ipv4/inet_hashtables.c linux-3.1.1/net/ipv4/inet_hashtables.c
72317--- linux-3.1.1/net/ipv4/inet_hashtables.c 2011-11-11 15:19:27.000000000 -0500
72318+++ linux-3.1.1/net/ipv4/inet_hashtables.c 2011-11-16 18:40:44.000000000 -0500
72319@@ -18,12 +18,15 @@
72320 #include <linux/sched.h>
72321 #include <linux/slab.h>
72322 #include <linux/wait.h>
72323+#include <linux/security.h>
72324
72325 #include <net/inet_connection_sock.h>
72326 #include <net/inet_hashtables.h>
72327 #include <net/secure_seq.h>
72328 #include <net/ip.h>
72329
72330+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
72331+
72332 /*
72333 * Allocate and initialize a new local port bind bucket.
72334 * The bindhash mutex for snum's hash chain must be held here.
72335@@ -530,6 +533,8 @@ ok:
72336 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
72337 spin_unlock(&head->lock);
72338
72339+ gr_update_task_in_ip_table(current, inet_sk(sk));
72340+
72341 if (tw) {
72342 inet_twsk_deschedule(tw, death_row);
72343 while (twrefcnt) {
72344diff -urNp linux-3.1.1/net/ipv4/inetpeer.c linux-3.1.1/net/ipv4/inetpeer.c
72345--- linux-3.1.1/net/ipv4/inetpeer.c 2011-11-11 15:19:27.000000000 -0500
72346+++ linux-3.1.1/net/ipv4/inetpeer.c 2011-11-16 19:18:22.000000000 -0500
72347@@ -400,6 +400,8 @@ struct inet_peer *inet_getpeer(const str
72348 unsigned int sequence;
72349 int invalidated, gccnt = 0;
72350
72351+ pax_track_stack();
72352+
72353 /* Attempt a lockless lookup first.
72354 * Because of a concurrent writer, we might not find an existing entry.
72355 */
72356@@ -436,8 +438,8 @@ relookup:
72357 if (p) {
72358 p->daddr = *daddr;
72359 atomic_set(&p->refcnt, 1);
72360- atomic_set(&p->rid, 0);
72361- atomic_set(&p->ip_id_count,
72362+ atomic_set_unchecked(&p->rid, 0);
72363+ atomic_set_unchecked(&p->ip_id_count,
72364 (daddr->family == AF_INET) ?
72365 secure_ip_id(daddr->addr.a4) :
72366 secure_ipv6_id(daddr->addr.a6));
72367diff -urNp linux-3.1.1/net/ipv4/ipconfig.c linux-3.1.1/net/ipv4/ipconfig.c
72368--- linux-3.1.1/net/ipv4/ipconfig.c 2011-11-11 15:19:27.000000000 -0500
72369+++ linux-3.1.1/net/ipv4/ipconfig.c 2011-11-16 18:39:08.000000000 -0500
72370@@ -313,7 +313,7 @@ static int __init ic_devinet_ioctl(unsig
72371
72372 mm_segment_t oldfs = get_fs();
72373 set_fs(get_ds());
72374- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72375+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72376 set_fs(oldfs);
72377 return res;
72378 }
72379@@ -324,7 +324,7 @@ static int __init ic_dev_ioctl(unsigned
72380
72381 mm_segment_t oldfs = get_fs();
72382 set_fs(get_ds());
72383- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72384+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72385 set_fs(oldfs);
72386 return res;
72387 }
72388@@ -335,7 +335,7 @@ static int __init ic_route_ioctl(unsigne
72389
72390 mm_segment_t oldfs = get_fs();
72391 set_fs(get_ds());
72392- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
72393+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
72394 set_fs(oldfs);
72395 return res;
72396 }
72397diff -urNp linux-3.1.1/net/ipv4/ip_fragment.c linux-3.1.1/net/ipv4/ip_fragment.c
72398--- linux-3.1.1/net/ipv4/ip_fragment.c 2011-11-11 15:19:27.000000000 -0500
72399+++ linux-3.1.1/net/ipv4/ip_fragment.c 2011-11-16 18:39:08.000000000 -0500
72400@@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct
72401 return 0;
72402
72403 start = qp->rid;
72404- end = atomic_inc_return(&peer->rid);
72405+ end = atomic_inc_return_unchecked(&peer->rid);
72406 qp->rid = end;
72407
72408 rc = qp->q.fragments && (end - start) > max;
72409diff -urNp linux-3.1.1/net/ipv4/ip_sockglue.c linux-3.1.1/net/ipv4/ip_sockglue.c
72410--- linux-3.1.1/net/ipv4/ip_sockglue.c 2011-11-11 15:19:27.000000000 -0500
72411+++ linux-3.1.1/net/ipv4/ip_sockglue.c 2011-11-16 18:40:44.000000000 -0500
72412@@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock
72413 int val;
72414 int len;
72415
72416+ pax_track_stack();
72417+
72418 if (level != SOL_IP)
72419 return -EOPNOTSUPP;
72420
72421@@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock
72422 len = min_t(unsigned int, len, opt->optlen);
72423 if (put_user(len, optlen))
72424 return -EFAULT;
72425- if (copy_to_user(optval, opt->__data, len))
72426+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
72427+ copy_to_user(optval, opt->__data, len))
72428 return -EFAULT;
72429 return 0;
72430 }
72431@@ -1238,7 +1241,7 @@ static int do_ip_getsockopt(struct sock
72432 if (sk->sk_type != SOCK_STREAM)
72433 return -ENOPROTOOPT;
72434
72435- msg.msg_control = optval;
72436+ msg.msg_control = (void __force_kernel *)optval;
72437 msg.msg_controllen = len;
72438 msg.msg_flags = flags;
72439
72440diff -urNp linux-3.1.1/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-3.1.1/net/ipv4/netfilter/nf_nat_snmp_basic.c
72441--- linux-3.1.1/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-11-11 15:19:27.000000000 -0500
72442+++ linux-3.1.1/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-11-16 18:39:08.000000000 -0500
72443@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
72444
72445 *len = 0;
72446
72447- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
72448+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
72449 if (*octets == NULL) {
72450 if (net_ratelimit())
72451 pr_notice("OOM in bsalg (%d)\n", __LINE__);
72452diff -urNp linux-3.1.1/net/ipv4/ping.c linux-3.1.1/net/ipv4/ping.c
72453--- linux-3.1.1/net/ipv4/ping.c 2011-11-11 15:19:27.000000000 -0500
72454+++ linux-3.1.1/net/ipv4/ping.c 2011-11-16 18:39:08.000000000 -0500
72455@@ -837,7 +837,7 @@ static void ping_format_sock(struct sock
72456 sk_rmem_alloc_get(sp),
72457 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72458 atomic_read(&sp->sk_refcnt), sp,
72459- atomic_read(&sp->sk_drops), len);
72460+ atomic_read_unchecked(&sp->sk_drops), len);
72461 }
72462
72463 static int ping_seq_show(struct seq_file *seq, void *v)
72464diff -urNp linux-3.1.1/net/ipv4/raw.c linux-3.1.1/net/ipv4/raw.c
72465--- linux-3.1.1/net/ipv4/raw.c 2011-11-11 15:19:27.000000000 -0500
72466+++ linux-3.1.1/net/ipv4/raw.c 2011-11-17 18:58:40.000000000 -0500
72467@@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
72468 int raw_rcv(struct sock *sk, struct sk_buff *skb)
72469 {
72470 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
72471- atomic_inc(&sk->sk_drops);
72472+ atomic_inc_unchecked(&sk->sk_drops);
72473 kfree_skb(skb);
72474 return NET_RX_DROP;
72475 }
72476@@ -737,16 +737,20 @@ static int raw_init(struct sock *sk)
72477
72478 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
72479 {
72480+ struct icmp_filter filter;
72481+
72482 if (optlen > sizeof(struct icmp_filter))
72483 optlen = sizeof(struct icmp_filter);
72484- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
72485+ if (copy_from_user(&filter, optval, optlen))
72486 return -EFAULT;
72487+ raw_sk(sk)->filter = filter;
72488 return 0;
72489 }
72490
72491 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
72492 {
72493 int len, ret = -EFAULT;
72494+ struct icmp_filter filter;
72495
72496 if (get_user(len, optlen))
72497 goto out;
72498@@ -756,8 +760,8 @@ static int raw_geticmpfilter(struct sock
72499 if (len > sizeof(struct icmp_filter))
72500 len = sizeof(struct icmp_filter);
72501 ret = -EFAULT;
72502- if (put_user(len, optlen) ||
72503- copy_to_user(optval, &raw_sk(sk)->filter, len))
72504+ filter = raw_sk(sk)->filter;
72505+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
72506 goto out;
72507 ret = 0;
72508 out: return ret;
72509@@ -985,7 +989,13 @@ static void raw_sock_seq_show(struct seq
72510 sk_wmem_alloc_get(sp),
72511 sk_rmem_alloc_get(sp),
72512 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72513- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72514+ atomic_read(&sp->sk_refcnt),
72515+#ifdef CONFIG_GRKERNSEC_HIDESYM
72516+ NULL,
72517+#else
72518+ sp,
72519+#endif
72520+ atomic_read_unchecked(&sp->sk_drops));
72521 }
72522
72523 static int raw_seq_show(struct seq_file *seq, void *v)
72524diff -urNp linux-3.1.1/net/ipv4/route.c linux-3.1.1/net/ipv4/route.c
72525--- linux-3.1.1/net/ipv4/route.c 2011-11-11 15:19:27.000000000 -0500
72526+++ linux-3.1.1/net/ipv4/route.c 2011-11-16 18:39:08.000000000 -0500
72527@@ -308,7 +308,7 @@ static inline unsigned int rt_hash(__be3
72528
72529 static inline int rt_genid(struct net *net)
72530 {
72531- return atomic_read(&net->ipv4.rt_genid);
72532+ return atomic_read_unchecked(&net->ipv4.rt_genid);
72533 }
72534
72535 #ifdef CONFIG_PROC_FS
72536@@ -837,7 +837,7 @@ static void rt_cache_invalidate(struct n
72537 unsigned char shuffle;
72538
72539 get_random_bytes(&shuffle, sizeof(shuffle));
72540- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
72541+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
72542 }
72543
72544 /*
72545@@ -2872,7 +2872,7 @@ static int rt_fill_info(struct net *net,
72546 error = rt->dst.error;
72547 if (peer) {
72548 inet_peer_refcheck(rt->peer);
72549- id = atomic_read(&peer->ip_id_count) & 0xffff;
72550+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
72551 if (peer->tcp_ts_stamp) {
72552 ts = peer->tcp_ts;
72553 tsage = get_seconds() - peer->tcp_ts_stamp;
72554diff -urNp linux-3.1.1/net/ipv4/tcp.c linux-3.1.1/net/ipv4/tcp.c
72555--- linux-3.1.1/net/ipv4/tcp.c 2011-11-11 15:19:27.000000000 -0500
72556+++ linux-3.1.1/net/ipv4/tcp.c 2011-11-16 18:40:44.000000000 -0500
72557@@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock
72558 int val;
72559 int err = 0;
72560
72561+ pax_track_stack();
72562+
72563 /* These are data/string values, all the others are ints */
72564 switch (optname) {
72565 case TCP_CONGESTION: {
72566@@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock
72567 struct tcp_sock *tp = tcp_sk(sk);
72568 int val, len;
72569
72570+ pax_track_stack();
72571+
72572 if (get_user(len, optlen))
72573 return -EFAULT;
72574
72575diff -urNp linux-3.1.1/net/ipv4/tcp_ipv4.c linux-3.1.1/net/ipv4/tcp_ipv4.c
72576--- linux-3.1.1/net/ipv4/tcp_ipv4.c 2011-11-11 15:19:27.000000000 -0500
72577+++ linux-3.1.1/net/ipv4/tcp_ipv4.c 2011-11-16 18:40:44.000000000 -0500
72578@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
72579 int sysctl_tcp_low_latency __read_mostly;
72580 EXPORT_SYMBOL(sysctl_tcp_low_latency);
72581
72582+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72583+extern int grsec_enable_blackhole;
72584+#endif
72585
72586 #ifdef CONFIG_TCP_MD5SIG
72587 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
72588@@ -1622,6 +1625,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
72589 return 0;
72590
72591 reset:
72592+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72593+ if (!grsec_enable_blackhole)
72594+#endif
72595 tcp_v4_send_reset(rsk, skb);
72596 discard:
72597 kfree_skb(skb);
72598@@ -1684,12 +1690,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
72599 TCP_SKB_CB(skb)->sacked = 0;
72600
72601 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72602- if (!sk)
72603+ if (!sk) {
72604+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72605+ ret = 1;
72606+#endif
72607 goto no_tcp_socket;
72608-
72609+ }
72610 process:
72611- if (sk->sk_state == TCP_TIME_WAIT)
72612+ if (sk->sk_state == TCP_TIME_WAIT) {
72613+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72614+ ret = 2;
72615+#endif
72616 goto do_time_wait;
72617+ }
72618
72619 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
72620 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
72621@@ -1739,6 +1752,10 @@ no_tcp_socket:
72622 bad_packet:
72623 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72624 } else {
72625+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72626+ if (!grsec_enable_blackhole || (ret == 1 &&
72627+ (skb->dev->flags & IFF_LOOPBACK)))
72628+#endif
72629 tcp_v4_send_reset(NULL, skb);
72630 }
72631
72632@@ -2403,7 +2420,11 @@ static void get_openreq4(struct sock *sk
72633 0, /* non standard timer */
72634 0, /* open_requests have no inode */
72635 atomic_read(&sk->sk_refcnt),
72636+#ifdef CONFIG_GRKERNSEC_HIDESYM
72637+ NULL,
72638+#else
72639 req,
72640+#endif
72641 len);
72642 }
72643
72644@@ -2453,7 +2474,12 @@ static void get_tcp4_sock(struct sock *s
72645 sock_i_uid(sk),
72646 icsk->icsk_probes_out,
72647 sock_i_ino(sk),
72648- atomic_read(&sk->sk_refcnt), sk,
72649+ atomic_read(&sk->sk_refcnt),
72650+#ifdef CONFIG_GRKERNSEC_HIDESYM
72651+ NULL,
72652+#else
72653+ sk,
72654+#endif
72655 jiffies_to_clock_t(icsk->icsk_rto),
72656 jiffies_to_clock_t(icsk->icsk_ack.ato),
72657 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
72658@@ -2481,7 +2507,13 @@ static void get_timewait4_sock(struct in
72659 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
72660 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
72661 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72662- atomic_read(&tw->tw_refcnt), tw, len);
72663+ atomic_read(&tw->tw_refcnt),
72664+#ifdef CONFIG_GRKERNSEC_HIDESYM
72665+ NULL,
72666+#else
72667+ tw,
72668+#endif
72669+ len);
72670 }
72671
72672 #define TMPSZ 150
72673diff -urNp linux-3.1.1/net/ipv4/tcp_minisocks.c linux-3.1.1/net/ipv4/tcp_minisocks.c
72674--- linux-3.1.1/net/ipv4/tcp_minisocks.c 2011-11-11 15:19:27.000000000 -0500
72675+++ linux-3.1.1/net/ipv4/tcp_minisocks.c 2011-11-16 18:40:44.000000000 -0500
72676@@ -27,6 +27,10 @@
72677 #include <net/inet_common.h>
72678 #include <net/xfrm.h>
72679
72680+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72681+extern int grsec_enable_blackhole;
72682+#endif
72683+
72684 int sysctl_tcp_syncookies __read_mostly = 1;
72685 EXPORT_SYMBOL(sysctl_tcp_syncookies);
72686
72687@@ -750,6 +754,10 @@ listen_overflow:
72688
72689 embryonic_reset:
72690 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
72691+
72692+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72693+ if (!grsec_enable_blackhole)
72694+#endif
72695 if (!(flg & TCP_FLAG_RST))
72696 req->rsk_ops->send_reset(sk, skb);
72697
72698diff -urNp linux-3.1.1/net/ipv4/tcp_output.c linux-3.1.1/net/ipv4/tcp_output.c
72699--- linux-3.1.1/net/ipv4/tcp_output.c 2011-11-11 15:19:27.000000000 -0500
72700+++ linux-3.1.1/net/ipv4/tcp_output.c 2011-11-16 18:40:44.000000000 -0500
72701@@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
72702 int mss;
72703 int s_data_desired = 0;
72704
72705+ pax_track_stack();
72706+
72707 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
72708 s_data_desired = cvp->s_data_desired;
72709 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
72710diff -urNp linux-3.1.1/net/ipv4/tcp_probe.c linux-3.1.1/net/ipv4/tcp_probe.c
72711--- linux-3.1.1/net/ipv4/tcp_probe.c 2011-11-11 15:19:27.000000000 -0500
72712+++ linux-3.1.1/net/ipv4/tcp_probe.c 2011-11-16 18:39:08.000000000 -0500
72713@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
72714 if (cnt + width >= len)
72715 break;
72716
72717- if (copy_to_user(buf + cnt, tbuf, width))
72718+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
72719 return -EFAULT;
72720 cnt += width;
72721 }
72722diff -urNp linux-3.1.1/net/ipv4/tcp_timer.c linux-3.1.1/net/ipv4/tcp_timer.c
72723--- linux-3.1.1/net/ipv4/tcp_timer.c 2011-11-11 15:19:27.000000000 -0500
72724+++ linux-3.1.1/net/ipv4/tcp_timer.c 2011-11-16 18:40:44.000000000 -0500
72725@@ -22,6 +22,10 @@
72726 #include <linux/gfp.h>
72727 #include <net/tcp.h>
72728
72729+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72730+extern int grsec_lastack_retries;
72731+#endif
72732+
72733 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
72734 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
72735 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
72736@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
72737 }
72738 }
72739
72740+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72741+ if ((sk->sk_state == TCP_LAST_ACK) &&
72742+ (grsec_lastack_retries > 0) &&
72743+ (grsec_lastack_retries < retry_until))
72744+ retry_until = grsec_lastack_retries;
72745+#endif
72746+
72747 if (retransmits_timed_out(sk, retry_until,
72748 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
72749 /* Has it gone just too far? */
72750diff -urNp linux-3.1.1/net/ipv4/udp.c linux-3.1.1/net/ipv4/udp.c
72751--- linux-3.1.1/net/ipv4/udp.c 2011-11-11 15:19:27.000000000 -0500
72752+++ linux-3.1.1/net/ipv4/udp.c 2011-11-16 19:17:54.000000000 -0500
72753@@ -86,6 +86,7 @@
72754 #include <linux/types.h>
72755 #include <linux/fcntl.h>
72756 #include <linux/module.h>
72757+#include <linux/security.h>
72758 #include <linux/socket.h>
72759 #include <linux/sockios.h>
72760 #include <linux/igmp.h>
72761@@ -108,6 +109,10 @@
72762 #include <trace/events/udp.h>
72763 #include "udp_impl.h"
72764
72765+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72766+extern int grsec_enable_blackhole;
72767+#endif
72768+
72769 struct udp_table udp_table __read_mostly;
72770 EXPORT_SYMBOL(udp_table);
72771
72772@@ -565,6 +570,9 @@ found:
72773 return s;
72774 }
72775
72776+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
72777+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
72778+
72779 /*
72780 * This routine is called by the ICMP module when it gets some
72781 * sort of error condition. If err < 0 then the socket should
72782@@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
72783 dport = usin->sin_port;
72784 if (dport == 0)
72785 return -EINVAL;
72786+
72787+ err = gr_search_udp_sendmsg(sk, usin);
72788+ if (err)
72789+ return err;
72790 } else {
72791 if (sk->sk_state != TCP_ESTABLISHED)
72792 return -EDESTADDRREQ;
72793+
72794+ err = gr_search_udp_sendmsg(sk, NULL);
72795+ if (err)
72796+ return err;
72797+
72798 daddr = inet->inet_daddr;
72799 dport = inet->inet_dport;
72800 /* Open fast path for connected socket.
72801@@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(
72802 udp_lib_checksum_complete(skb)) {
72803 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
72804 IS_UDPLITE(sk));
72805- atomic_inc(&sk->sk_drops);
72806+ atomic_inc_unchecked(&sk->sk_drops);
72807 __skb_unlink(skb, rcvq);
72808 __skb_queue_tail(&list_kill, skb);
72809 }
72810@@ -1185,6 +1202,10 @@ try_again:
72811 if (!skb)
72812 goto out;
72813
72814+ err = gr_search_udp_recvmsg(sk, skb);
72815+ if (err)
72816+ goto out_free;
72817+
72818 ulen = skb->len - sizeof(struct udphdr);
72819 if (len > ulen)
72820 len = ulen;
72821@@ -1485,7 +1506,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
72822
72823 drop:
72824 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
72825- atomic_inc(&sk->sk_drops);
72826+ atomic_inc_unchecked(&sk->sk_drops);
72827 kfree_skb(skb);
72828 return -1;
72829 }
72830@@ -1504,7 +1525,7 @@ static void flush_stack(struct sock **st
72831 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
72832
72833 if (!skb1) {
72834- atomic_inc(&sk->sk_drops);
72835+ atomic_inc_unchecked(&sk->sk_drops);
72836 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
72837 IS_UDPLITE(sk));
72838 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
72839@@ -1673,6 +1694,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
72840 goto csum_error;
72841
72842 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
72843+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72844+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72845+#endif
72846 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
72847
72848 /*
72849@@ -2100,8 +2124,13 @@ static void udp4_format_sock(struct sock
72850 sk_wmem_alloc_get(sp),
72851 sk_rmem_alloc_get(sp),
72852 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72853- atomic_read(&sp->sk_refcnt), sp,
72854- atomic_read(&sp->sk_drops), len);
72855+ atomic_read(&sp->sk_refcnt),
72856+#ifdef CONFIG_GRKERNSEC_HIDESYM
72857+ NULL,
72858+#else
72859+ sp,
72860+#endif
72861+ atomic_read_unchecked(&sp->sk_drops), len);
72862 }
72863
72864 int udp4_seq_show(struct seq_file *seq, void *v)
72865diff -urNp linux-3.1.1/net/ipv6/addrconf.c linux-3.1.1/net/ipv6/addrconf.c
72866--- linux-3.1.1/net/ipv6/addrconf.c 2011-11-11 15:19:27.000000000 -0500
72867+++ linux-3.1.1/net/ipv6/addrconf.c 2011-11-16 18:39:08.000000000 -0500
72868@@ -2083,7 +2083,7 @@ int addrconf_set_dstaddr(struct net *net
72869 p.iph.ihl = 5;
72870 p.iph.protocol = IPPROTO_IPV6;
72871 p.iph.ttl = 64;
72872- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
72873+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
72874
72875 if (ops->ndo_do_ioctl) {
72876 mm_segment_t oldfs = get_fs();
72877diff -urNp linux-3.1.1/net/ipv6/inet6_connection_sock.c linux-3.1.1/net/ipv6/inet6_connection_sock.c
72878--- linux-3.1.1/net/ipv6/inet6_connection_sock.c 2011-11-11 15:19:27.000000000 -0500
72879+++ linux-3.1.1/net/ipv6/inet6_connection_sock.c 2011-11-16 18:39:08.000000000 -0500
72880@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
72881 #ifdef CONFIG_XFRM
72882 {
72883 struct rt6_info *rt = (struct rt6_info *)dst;
72884- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
72885+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
72886 }
72887 #endif
72888 }
72889@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
72890 #ifdef CONFIG_XFRM
72891 if (dst) {
72892 struct rt6_info *rt = (struct rt6_info *)dst;
72893- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
72894+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
72895 __sk_dst_reset(sk);
72896 dst = NULL;
72897 }
72898diff -urNp linux-3.1.1/net/ipv6/ipv6_sockglue.c linux-3.1.1/net/ipv6/ipv6_sockglue.c
72899--- linux-3.1.1/net/ipv6/ipv6_sockglue.c 2011-11-11 15:19:27.000000000 -0500
72900+++ linux-3.1.1/net/ipv6/ipv6_sockglue.c 2011-11-16 18:40:44.000000000 -0500
72901@@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
72902 int val, valbool;
72903 int retv = -ENOPROTOOPT;
72904
72905+ pax_track_stack();
72906+
72907 if (optval == NULL)
72908 val=0;
72909 else {
72910@@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
72911 int len;
72912 int val;
72913
72914+ pax_track_stack();
72915+
72916 if (ip6_mroute_opt(optname))
72917 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
72918
72919@@ -960,7 +964,7 @@ static int do_ipv6_getsockopt(struct soc
72920 if (sk->sk_type != SOCK_STREAM)
72921 return -ENOPROTOOPT;
72922
72923- msg.msg_control = optval;
72924+ msg.msg_control = (void __force_kernel *)optval;
72925 msg.msg_controllen = len;
72926 msg.msg_flags = flags;
72927
72928diff -urNp linux-3.1.1/net/ipv6/raw.c linux-3.1.1/net/ipv6/raw.c
72929--- linux-3.1.1/net/ipv6/raw.c 2011-11-11 15:19:27.000000000 -0500
72930+++ linux-3.1.1/net/ipv6/raw.c 2011-11-16 18:40:44.000000000 -0500
72931@@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
72932 {
72933 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
72934 skb_checksum_complete(skb)) {
72935- atomic_inc(&sk->sk_drops);
72936+ atomic_inc_unchecked(&sk->sk_drops);
72937 kfree_skb(skb);
72938 return NET_RX_DROP;
72939 }
72940@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72941 struct raw6_sock *rp = raw6_sk(sk);
72942
72943 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
72944- atomic_inc(&sk->sk_drops);
72945+ atomic_inc_unchecked(&sk->sk_drops);
72946 kfree_skb(skb);
72947 return NET_RX_DROP;
72948 }
72949@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72950
72951 if (inet->hdrincl) {
72952 if (skb_checksum_complete(skb)) {
72953- atomic_inc(&sk->sk_drops);
72954+ atomic_inc_unchecked(&sk->sk_drops);
72955 kfree_skb(skb);
72956 return NET_RX_DROP;
72957 }
72958@@ -601,7 +601,7 @@ out:
72959 return err;
72960 }
72961
72962-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
72963+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
72964 struct flowi6 *fl6, struct dst_entry **dstp,
72965 unsigned int flags)
72966 {
72967@@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
72968 u16 proto;
72969 int err;
72970
72971+ pax_track_stack();
72972+
72973 /* Rough check on arithmetic overflow,
72974 better check is made in ip6_append_data().
72975 */
72976@@ -909,12 +911,15 @@ do_confirm:
72977 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
72978 char __user *optval, int optlen)
72979 {
72980+ struct icmp6_filter filter;
72981+
72982 switch (optname) {
72983 case ICMPV6_FILTER:
72984 if (optlen > sizeof(struct icmp6_filter))
72985 optlen = sizeof(struct icmp6_filter);
72986- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
72987+ if (copy_from_user(&filter, optval, optlen))
72988 return -EFAULT;
72989+ raw6_sk(sk)->filter = filter;
72990 return 0;
72991 default:
72992 return -ENOPROTOOPT;
72993@@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
72994 char __user *optval, int __user *optlen)
72995 {
72996 int len;
72997+ struct icmp6_filter filter;
72998
72999 switch (optname) {
73000 case ICMPV6_FILTER:
73001@@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
73002 len = sizeof(struct icmp6_filter);
73003 if (put_user(len, optlen))
73004 return -EFAULT;
73005- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
73006+ filter = raw6_sk(sk)->filter;
73007+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
73008 return -EFAULT;
73009 return 0;
73010 default:
73011@@ -1245,7 +1252,13 @@ static void raw6_sock_seq_show(struct se
73012 0, 0L, 0,
73013 sock_i_uid(sp), 0,
73014 sock_i_ino(sp),
73015- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73016+ atomic_read(&sp->sk_refcnt),
73017+#ifdef CONFIG_GRKERNSEC_HIDESYM
73018+ NULL,
73019+#else
73020+ sp,
73021+#endif
73022+ atomic_read_unchecked(&sp->sk_drops));
73023 }
73024
73025 static int raw6_seq_show(struct seq_file *seq, void *v)
73026diff -urNp linux-3.1.1/net/ipv6/tcp_ipv6.c linux-3.1.1/net/ipv6/tcp_ipv6.c
73027--- linux-3.1.1/net/ipv6/tcp_ipv6.c 2011-11-11 15:19:27.000000000 -0500
73028+++ linux-3.1.1/net/ipv6/tcp_ipv6.c 2011-11-16 18:40:44.000000000 -0500
73029@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
73030 }
73031 #endif
73032
73033+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73034+extern int grsec_enable_blackhole;
73035+#endif
73036+
73037 static void tcp_v6_hash(struct sock *sk)
73038 {
73039 if (sk->sk_state != TCP_CLOSE) {
73040@@ -1647,6 +1651,9 @@ static int tcp_v6_do_rcv(struct sock *sk
73041 return 0;
73042
73043 reset:
73044+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73045+ if (!grsec_enable_blackhole)
73046+#endif
73047 tcp_v6_send_reset(sk, skb);
73048 discard:
73049 if (opt_skb)
73050@@ -1726,12 +1733,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
73051 TCP_SKB_CB(skb)->sacked = 0;
73052
73053 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73054- if (!sk)
73055+ if (!sk) {
73056+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73057+ ret = 1;
73058+#endif
73059 goto no_tcp_socket;
73060+ }
73061
73062 process:
73063- if (sk->sk_state == TCP_TIME_WAIT)
73064+ if (sk->sk_state == TCP_TIME_WAIT) {
73065+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73066+ ret = 2;
73067+#endif
73068 goto do_time_wait;
73069+ }
73070
73071 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
73072 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73073@@ -1779,6 +1794,10 @@ no_tcp_socket:
73074 bad_packet:
73075 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73076 } else {
73077+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73078+ if (!grsec_enable_blackhole || (ret == 1 &&
73079+ (skb->dev->flags & IFF_LOOPBACK)))
73080+#endif
73081 tcp_v6_send_reset(NULL, skb);
73082 }
73083
73084@@ -2039,7 +2058,13 @@ static void get_openreq6(struct seq_file
73085 uid,
73086 0, /* non standard timer */
73087 0, /* open_requests have no inode */
73088- 0, req);
73089+ 0,
73090+#ifdef CONFIG_GRKERNSEC_HIDESYM
73091+ NULL
73092+#else
73093+ req
73094+#endif
73095+ );
73096 }
73097
73098 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73099@@ -2089,7 +2114,12 @@ static void get_tcp6_sock(struct seq_fil
73100 sock_i_uid(sp),
73101 icsk->icsk_probes_out,
73102 sock_i_ino(sp),
73103- atomic_read(&sp->sk_refcnt), sp,
73104+ atomic_read(&sp->sk_refcnt),
73105+#ifdef CONFIG_GRKERNSEC_HIDESYM
73106+ NULL,
73107+#else
73108+ sp,
73109+#endif
73110 jiffies_to_clock_t(icsk->icsk_rto),
73111 jiffies_to_clock_t(icsk->icsk_ack.ato),
73112 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
73113@@ -2124,7 +2154,13 @@ static void get_timewait6_sock(struct se
73114 dest->s6_addr32[2], dest->s6_addr32[3], destp,
73115 tw->tw_substate, 0, 0,
73116 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73117- atomic_read(&tw->tw_refcnt), tw);
73118+ atomic_read(&tw->tw_refcnt),
73119+#ifdef CONFIG_GRKERNSEC_HIDESYM
73120+ NULL
73121+#else
73122+ tw
73123+#endif
73124+ );
73125 }
73126
73127 static int tcp6_seq_show(struct seq_file *seq, void *v)
73128diff -urNp linux-3.1.1/net/ipv6/udp.c linux-3.1.1/net/ipv6/udp.c
73129--- linux-3.1.1/net/ipv6/udp.c 2011-11-11 15:19:27.000000000 -0500
73130+++ linux-3.1.1/net/ipv6/udp.c 2011-11-16 18:40:44.000000000 -0500
73131@@ -50,6 +50,10 @@
73132 #include <linux/seq_file.h>
73133 #include "udp_impl.h"
73134
73135+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73136+extern int grsec_enable_blackhole;
73137+#endif
73138+
73139 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
73140 {
73141 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
73142@@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
73143
73144 return 0;
73145 drop:
73146- atomic_inc(&sk->sk_drops);
73147+ atomic_inc_unchecked(&sk->sk_drops);
73148 drop_no_sk_drops_inc:
73149 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73150 kfree_skb(skb);
73151@@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
73152 continue;
73153 }
73154 drop:
73155- atomic_inc(&sk->sk_drops);
73156+ atomic_inc_unchecked(&sk->sk_drops);
73157 UDP6_INC_STATS_BH(sock_net(sk),
73158 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
73159 UDP6_INC_STATS_BH(sock_net(sk),
73160@@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
73161 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
73162 proto == IPPROTO_UDPLITE);
73163
73164+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73165+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73166+#endif
73167 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
73168
73169 kfree_skb(skb);
73170@@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
73171 if (!sock_owned_by_user(sk))
73172 udpv6_queue_rcv_skb(sk, skb);
73173 else if (sk_add_backlog(sk, skb)) {
73174- atomic_inc(&sk->sk_drops);
73175+ atomic_inc_unchecked(&sk->sk_drops);
73176 bh_unlock_sock(sk);
73177 sock_put(sk);
73178 goto discard;
73179@@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
73180 0, 0L, 0,
73181 sock_i_uid(sp), 0,
73182 sock_i_ino(sp),
73183- atomic_read(&sp->sk_refcnt), sp,
73184- atomic_read(&sp->sk_drops));
73185+ atomic_read(&sp->sk_refcnt),
73186+#ifdef CONFIG_GRKERNSEC_HIDESYM
73187+ NULL,
73188+#else
73189+ sp,
73190+#endif
73191+ atomic_read_unchecked(&sp->sk_drops));
73192 }
73193
73194 int udp6_seq_show(struct seq_file *seq, void *v)
73195diff -urNp linux-3.1.1/net/irda/ircomm/ircomm_tty.c linux-3.1.1/net/irda/ircomm/ircomm_tty.c
73196--- linux-3.1.1/net/irda/ircomm/ircomm_tty.c 2011-11-11 15:19:27.000000000 -0500
73197+++ linux-3.1.1/net/irda/ircomm/ircomm_tty.c 2011-11-16 18:39:08.000000000 -0500
73198@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(st
73199 add_wait_queue(&self->open_wait, &wait);
73200
73201 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
73202- __FILE__,__LINE__, tty->driver->name, self->open_count );
73203+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73204
73205 /* As far as I can see, we protect open_count - Jean II */
73206 spin_lock_irqsave(&self->spinlock, flags);
73207 if (!tty_hung_up_p(filp)) {
73208 extra_count = 1;
73209- self->open_count--;
73210+ local_dec(&self->open_count);
73211 }
73212 spin_unlock_irqrestore(&self->spinlock, flags);
73213- self->blocked_open++;
73214+ local_inc(&self->blocked_open);
73215
73216 while (1) {
73217 if (tty->termios->c_cflag & CBAUD) {
73218@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(st
73219 }
73220
73221 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
73222- __FILE__,__LINE__, tty->driver->name, self->open_count );
73223+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73224
73225 schedule();
73226 }
73227@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(st
73228 if (extra_count) {
73229 /* ++ is not atomic, so this should be protected - Jean II */
73230 spin_lock_irqsave(&self->spinlock, flags);
73231- self->open_count++;
73232+ local_inc(&self->open_count);
73233 spin_unlock_irqrestore(&self->spinlock, flags);
73234 }
73235- self->blocked_open--;
73236+ local_dec(&self->blocked_open);
73237
73238 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
73239- __FILE__,__LINE__, tty->driver->name, self->open_count);
73240+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
73241
73242 if (!retval)
73243 self->flags |= ASYNC_NORMAL_ACTIVE;
73244@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_st
73245 }
73246 /* ++ is not atomic, so this should be protected - Jean II */
73247 spin_lock_irqsave(&self->spinlock, flags);
73248- self->open_count++;
73249+ local_inc(&self->open_count);
73250
73251 tty->driver_data = self;
73252 self->tty = tty;
73253 spin_unlock_irqrestore(&self->spinlock, flags);
73254
73255 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
73256- self->line, self->open_count);
73257+ self->line, local_read(&self->open_count));
73258
73259 /* Not really used by us, but lets do it anyway */
73260 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
73261@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_
73262 return;
73263 }
73264
73265- if ((tty->count == 1) && (self->open_count != 1)) {
73266+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
73267 /*
73268 * Uh, oh. tty->count is 1, which means that the tty
73269 * structure will be freed. state->count should always
73270@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_
73271 */
73272 IRDA_DEBUG(0, "%s(), bad serial port count; "
73273 "tty->count is 1, state->count is %d\n", __func__ ,
73274- self->open_count);
73275- self->open_count = 1;
73276+ local_read(&self->open_count));
73277+ local_set(&self->open_count, 1);
73278 }
73279
73280- if (--self->open_count < 0) {
73281+ if (local_dec_return(&self->open_count) < 0) {
73282 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
73283- __func__, self->line, self->open_count);
73284- self->open_count = 0;
73285+ __func__, self->line, local_read(&self->open_count));
73286+ local_set(&self->open_count, 0);
73287 }
73288- if (self->open_count) {
73289+ if (local_read(&self->open_count)) {
73290 spin_unlock_irqrestore(&self->spinlock, flags);
73291
73292 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
73293@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_
73294 tty->closing = 0;
73295 self->tty = NULL;
73296
73297- if (self->blocked_open) {
73298+ if (local_read(&self->blocked_open)) {
73299 if (self->close_delay)
73300 schedule_timeout_interruptible(self->close_delay);
73301 wake_up_interruptible(&self->open_wait);
73302@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty
73303 spin_lock_irqsave(&self->spinlock, flags);
73304 self->flags &= ~ASYNC_NORMAL_ACTIVE;
73305 self->tty = NULL;
73306- self->open_count = 0;
73307+ local_set(&self->open_count, 0);
73308 spin_unlock_irqrestore(&self->spinlock, flags);
73309
73310 wake_up_interruptible(&self->open_wait);
73311@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct
73312 seq_putc(m, '\n');
73313
73314 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
73315- seq_printf(m, "Open count: %d\n", self->open_count);
73316+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
73317 seq_printf(m, "Max data size: %d\n", self->max_data_size);
73318 seq_printf(m, "Max header size: %d\n", self->max_header_size);
73319
73320diff -urNp linux-3.1.1/net/iucv/af_iucv.c linux-3.1.1/net/iucv/af_iucv.c
73321--- linux-3.1.1/net/iucv/af_iucv.c 2011-11-11 15:19:27.000000000 -0500
73322+++ linux-3.1.1/net/iucv/af_iucv.c 2011-11-16 18:39:08.000000000 -0500
73323@@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct soc
73324
73325 write_lock_bh(&iucv_sk_list.lock);
73326
73327- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
73328+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73329 while (__iucv_get_sock_by_name(name)) {
73330 sprintf(name, "%08x",
73331- atomic_inc_return(&iucv_sk_list.autobind_name));
73332+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73333 }
73334
73335 write_unlock_bh(&iucv_sk_list.lock);
73336diff -urNp linux-3.1.1/net/key/af_key.c linux-3.1.1/net/key/af_key.c
73337--- linux-3.1.1/net/key/af_key.c 2011-11-11 15:19:27.000000000 -0500
73338+++ linux-3.1.1/net/key/af_key.c 2011-11-16 18:40:44.000000000 -0500
73339@@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
73340 struct xfrm_migrate m[XFRM_MAX_DEPTH];
73341 struct xfrm_kmaddress k;
73342
73343+ pax_track_stack();
73344+
73345 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
73346 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
73347 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
73348@@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
73349 static u32 get_acqseq(void)
73350 {
73351 u32 res;
73352- static atomic_t acqseq;
73353+ static atomic_unchecked_t acqseq;
73354
73355 do {
73356- res = atomic_inc_return(&acqseq);
73357+ res = atomic_inc_return_unchecked(&acqseq);
73358 } while (!res);
73359 return res;
73360 }
73361diff -urNp linux-3.1.1/net/lapb/lapb_iface.c linux-3.1.1/net/lapb/lapb_iface.c
73362--- linux-3.1.1/net/lapb/lapb_iface.c 2011-11-11 15:19:27.000000000 -0500
73363+++ linux-3.1.1/net/lapb/lapb_iface.c 2011-11-16 18:39:08.000000000 -0500
73364@@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
73365 goto out;
73366
73367 lapb->dev = dev;
73368- lapb->callbacks = *callbacks;
73369+ lapb->callbacks = callbacks;
73370
73371 __lapb_insert_cb(lapb);
73372
73373@@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
73374
73375 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
73376 {
73377- if (lapb->callbacks.connect_confirmation)
73378- lapb->callbacks.connect_confirmation(lapb->dev, reason);
73379+ if (lapb->callbacks->connect_confirmation)
73380+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
73381 }
73382
73383 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
73384 {
73385- if (lapb->callbacks.connect_indication)
73386- lapb->callbacks.connect_indication(lapb->dev, reason);
73387+ if (lapb->callbacks->connect_indication)
73388+ lapb->callbacks->connect_indication(lapb->dev, reason);
73389 }
73390
73391 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
73392 {
73393- if (lapb->callbacks.disconnect_confirmation)
73394- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
73395+ if (lapb->callbacks->disconnect_confirmation)
73396+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
73397 }
73398
73399 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
73400 {
73401- if (lapb->callbacks.disconnect_indication)
73402- lapb->callbacks.disconnect_indication(lapb->dev, reason);
73403+ if (lapb->callbacks->disconnect_indication)
73404+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
73405 }
73406
73407 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
73408 {
73409- if (lapb->callbacks.data_indication)
73410- return lapb->callbacks.data_indication(lapb->dev, skb);
73411+ if (lapb->callbacks->data_indication)
73412+ return lapb->callbacks->data_indication(lapb->dev, skb);
73413
73414 kfree_skb(skb);
73415 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
73416@@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
73417 {
73418 int used = 0;
73419
73420- if (lapb->callbacks.data_transmit) {
73421- lapb->callbacks.data_transmit(lapb->dev, skb);
73422+ if (lapb->callbacks->data_transmit) {
73423+ lapb->callbacks->data_transmit(lapb->dev, skb);
73424 used = 1;
73425 }
73426
73427diff -urNp linux-3.1.1/net/mac80211/debugfs_sta.c linux-3.1.1/net/mac80211/debugfs_sta.c
73428--- linux-3.1.1/net/mac80211/debugfs_sta.c 2011-11-11 15:19:27.000000000 -0500
73429+++ linux-3.1.1/net/mac80211/debugfs_sta.c 2011-11-16 18:40:44.000000000 -0500
73430@@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struc
73431 struct tid_ampdu_rx *tid_rx;
73432 struct tid_ampdu_tx *tid_tx;
73433
73434+ pax_track_stack();
73435+
73436 rcu_read_lock();
73437
73438 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
73439@@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct f
73440 struct sta_info *sta = file->private_data;
73441 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
73442
73443+ pax_track_stack();
73444+
73445 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
73446 htc->ht_supported ? "" : "not ");
73447 if (htc->ht_supported) {
73448diff -urNp linux-3.1.1/net/mac80211/ieee80211_i.h linux-3.1.1/net/mac80211/ieee80211_i.h
73449--- linux-3.1.1/net/mac80211/ieee80211_i.h 2011-11-11 15:19:27.000000000 -0500
73450+++ linux-3.1.1/net/mac80211/ieee80211_i.h 2011-11-16 18:39:08.000000000 -0500
73451@@ -27,6 +27,7 @@
73452 #include <net/ieee80211_radiotap.h>
73453 #include <net/cfg80211.h>
73454 #include <net/mac80211.h>
73455+#include <asm/local.h>
73456 #include "key.h"
73457 #include "sta_info.h"
73458
73459@@ -754,7 +755,7 @@ struct ieee80211_local {
73460 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
73461 spinlock_t queue_stop_reason_lock;
73462
73463- int open_count;
73464+ local_t open_count;
73465 int monitors, cooked_mntrs;
73466 /* number of interfaces with corresponding FIF_ flags */
73467 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
73468diff -urNp linux-3.1.1/net/mac80211/iface.c linux-3.1.1/net/mac80211/iface.c
73469--- linux-3.1.1/net/mac80211/iface.c 2011-11-11 15:19:27.000000000 -0500
73470+++ linux-3.1.1/net/mac80211/iface.c 2011-11-16 18:39:08.000000000 -0500
73471@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
73472 break;
73473 }
73474
73475- if (local->open_count == 0) {
73476+ if (local_read(&local->open_count) == 0) {
73477 res = drv_start(local);
73478 if (res)
73479 goto err_del_bss;
73480@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
73481 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
73482
73483 if (!is_valid_ether_addr(dev->dev_addr)) {
73484- if (!local->open_count)
73485+ if (!local_read(&local->open_count))
73486 drv_stop(local);
73487 return -EADDRNOTAVAIL;
73488 }
73489@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
73490 mutex_unlock(&local->mtx);
73491
73492 if (coming_up)
73493- local->open_count++;
73494+ local_inc(&local->open_count);
73495
73496 if (hw_reconf_flags) {
73497 ieee80211_hw_config(local, hw_reconf_flags);
73498@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
73499 err_del_interface:
73500 drv_remove_interface(local, &sdata->vif);
73501 err_stop:
73502- if (!local->open_count)
73503+ if (!local_read(&local->open_count))
73504 drv_stop(local);
73505 err_del_bss:
73506 sdata->bss = NULL;
73507@@ -474,7 +474,7 @@ static void ieee80211_do_stop(struct iee
73508 }
73509
73510 if (going_down)
73511- local->open_count--;
73512+ local_dec(&local->open_count);
73513
73514 switch (sdata->vif.type) {
73515 case NL80211_IFTYPE_AP_VLAN:
73516@@ -533,7 +533,7 @@ static void ieee80211_do_stop(struct iee
73517
73518 ieee80211_recalc_ps(local, -1);
73519
73520- if (local->open_count == 0) {
73521+ if (local_read(&local->open_count) == 0) {
73522 if (local->ops->napi_poll)
73523 napi_disable(&local->napi);
73524 ieee80211_clear_tx_pending(local);
73525diff -urNp linux-3.1.1/net/mac80211/main.c linux-3.1.1/net/mac80211/main.c
73526--- linux-3.1.1/net/mac80211/main.c 2011-11-11 15:19:27.000000000 -0500
73527+++ linux-3.1.1/net/mac80211/main.c 2011-11-16 18:39:08.000000000 -0500
73528@@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211
73529 local->hw.conf.power_level = power;
73530 }
73531
73532- if (changed && local->open_count) {
73533+ if (changed && local_read(&local->open_count)) {
73534 ret = drv_config(local, changed);
73535 /*
73536 * Goal:
73537diff -urNp linux-3.1.1/net/mac80211/mlme.c linux-3.1.1/net/mac80211/mlme.c
73538--- linux-3.1.1/net/mac80211/mlme.c 2011-11-11 15:19:27.000000000 -0500
73539+++ linux-3.1.1/net/mac80211/mlme.c 2011-11-16 18:40:44.000000000 -0500
73540@@ -1464,6 +1464,8 @@ static bool ieee80211_assoc_success(stru
73541 bool have_higher_than_11mbit = false;
73542 u16 ap_ht_cap_flags;
73543
73544+ pax_track_stack();
73545+
73546 /* AssocResp and ReassocResp have identical structure */
73547
73548 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
73549diff -urNp linux-3.1.1/net/mac80211/pm.c linux-3.1.1/net/mac80211/pm.c
73550--- linux-3.1.1/net/mac80211/pm.c 2011-11-11 15:19:27.000000000 -0500
73551+++ linux-3.1.1/net/mac80211/pm.c 2011-11-16 18:39:08.000000000 -0500
73552@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211
73553 struct ieee80211_sub_if_data *sdata;
73554 struct sta_info *sta;
73555
73556- if (!local->open_count)
73557+ if (!local_read(&local->open_count))
73558 goto suspend;
73559
73560 ieee80211_scan_cancel(local);
73561@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211
73562 cancel_work_sync(&local->dynamic_ps_enable_work);
73563 del_timer_sync(&local->dynamic_ps_timer);
73564
73565- local->wowlan = wowlan && local->open_count;
73566+ local->wowlan = wowlan && local_read(&local->open_count);
73567 if (local->wowlan) {
73568 int err = drv_suspend(local, wowlan);
73569 if (err < 0) {
73570@@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211
73571 }
73572
73573 /* stop hardware - this must stop RX */
73574- if (local->open_count)
73575+ if (local_read(&local->open_count))
73576 ieee80211_stop_device(local);
73577
73578 suspend:
73579diff -urNp linux-3.1.1/net/mac80211/rate.c linux-3.1.1/net/mac80211/rate.c
73580--- linux-3.1.1/net/mac80211/rate.c 2011-11-11 15:19:27.000000000 -0500
73581+++ linux-3.1.1/net/mac80211/rate.c 2011-11-16 18:39:08.000000000 -0500
73582@@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
73583
73584 ASSERT_RTNL();
73585
73586- if (local->open_count)
73587+ if (local_read(&local->open_count))
73588 return -EBUSY;
73589
73590 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
73591diff -urNp linux-3.1.1/net/mac80211/rc80211_pid_debugfs.c linux-3.1.1/net/mac80211/rc80211_pid_debugfs.c
73592--- linux-3.1.1/net/mac80211/rc80211_pid_debugfs.c 2011-11-11 15:19:27.000000000 -0500
73593+++ linux-3.1.1/net/mac80211/rc80211_pid_debugfs.c 2011-11-16 18:39:08.000000000 -0500
73594@@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
73595
73596 spin_unlock_irqrestore(&events->lock, status);
73597
73598- if (copy_to_user(buf, pb, p))
73599+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
73600 return -EFAULT;
73601
73602 return p;
73603diff -urNp linux-3.1.1/net/mac80211/util.c linux-3.1.1/net/mac80211/util.c
73604--- linux-3.1.1/net/mac80211/util.c 2011-11-11 15:19:27.000000000 -0500
73605+++ linux-3.1.1/net/mac80211/util.c 2011-11-16 18:39:08.000000000 -0500
73606@@ -1166,7 +1166,7 @@ int ieee80211_reconfig(struct ieee80211_
73607 drv_set_coverage_class(local, hw->wiphy->coverage_class);
73608
73609 /* everything else happens only if HW was up & running */
73610- if (!local->open_count)
73611+ if (!local_read(&local->open_count))
73612 goto wake_up;
73613
73614 /*
73615diff -urNp linux-3.1.1/net/netfilter/ipvs/ip_vs_conn.c linux-3.1.1/net/netfilter/ipvs/ip_vs_conn.c
73616--- linux-3.1.1/net/netfilter/ipvs/ip_vs_conn.c 2011-11-11 15:19:27.000000000 -0500
73617+++ linux-3.1.1/net/netfilter/ipvs/ip_vs_conn.c 2011-11-16 18:39:08.000000000 -0500
73618@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
73619 /* Increase the refcnt counter of the dest */
73620 atomic_inc(&dest->refcnt);
73621
73622- conn_flags = atomic_read(&dest->conn_flags);
73623+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
73624 if (cp->protocol != IPPROTO_UDP)
73625 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
73626 /* Bind with the destination and its corresponding transmitter */
73627@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
73628 atomic_set(&cp->refcnt, 1);
73629
73630 atomic_set(&cp->n_control, 0);
73631- atomic_set(&cp->in_pkts, 0);
73632+ atomic_set_unchecked(&cp->in_pkts, 0);
73633
73634 atomic_inc(&ipvs->conn_count);
73635 if (flags & IP_VS_CONN_F_NO_CPORT)
73636@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
73637
73638 /* Don't drop the entry if its number of incoming packets is not
73639 located in [0, 8] */
73640- i = atomic_read(&cp->in_pkts);
73641+ i = atomic_read_unchecked(&cp->in_pkts);
73642 if (i > 8 || i < 0) return 0;
73643
73644 if (!todrop_rate[i]) return 0;
73645diff -urNp linux-3.1.1/net/netfilter/ipvs/ip_vs_core.c linux-3.1.1/net/netfilter/ipvs/ip_vs_core.c
73646--- linux-3.1.1/net/netfilter/ipvs/ip_vs_core.c 2011-11-11 15:19:27.000000000 -0500
73647+++ linux-3.1.1/net/netfilter/ipvs/ip_vs_core.c 2011-11-16 18:39:08.000000000 -0500
73648@@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
73649 ret = cp->packet_xmit(skb, cp, pd->pp);
73650 /* do not touch skb anymore */
73651
73652- atomic_inc(&cp->in_pkts);
73653+ atomic_inc_unchecked(&cp->in_pkts);
73654 ip_vs_conn_put(cp);
73655 return ret;
73656 }
73657@@ -1612,7 +1612,7 @@ ip_vs_in(unsigned int hooknum, struct sk
73658 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
73659 pkts = sysctl_sync_threshold(ipvs);
73660 else
73661- pkts = atomic_add_return(1, &cp->in_pkts);
73662+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73663
73664 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
73665 cp->protocol == IPPROTO_SCTP) {
73666diff -urNp linux-3.1.1/net/netfilter/ipvs/ip_vs_ctl.c linux-3.1.1/net/netfilter/ipvs/ip_vs_ctl.c
73667--- linux-3.1.1/net/netfilter/ipvs/ip_vs_ctl.c 2011-11-11 15:19:27.000000000 -0500
73668+++ linux-3.1.1/net/netfilter/ipvs/ip_vs_ctl.c 2011-11-16 19:13:12.000000000 -0500
73669@@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
73670 ip_vs_rs_hash(ipvs, dest);
73671 write_unlock_bh(&ipvs->rs_lock);
73672 }
73673- atomic_set(&dest->conn_flags, conn_flags);
73674+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
73675
73676 /* bind the service */
73677 if (!dest->svc) {
73678@@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
73679 " %-7s %-6d %-10d %-10d\n",
73680 &dest->addr.in6,
73681 ntohs(dest->port),
73682- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73683+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73684 atomic_read(&dest->weight),
73685 atomic_read(&dest->activeconns),
73686 atomic_read(&dest->inactconns));
73687@@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
73688 "%-7s %-6d %-10d %-10d\n",
73689 ntohl(dest->addr.ip),
73690 ntohs(dest->port),
73691- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73692+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73693 atomic_read(&dest->weight),
73694 atomic_read(&dest->activeconns),
73695 atomic_read(&dest->inactconns));
73696@@ -2285,6 +2285,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
73697 struct ip_vs_dest_user_kern udest;
73698 struct netns_ipvs *ipvs = net_ipvs(net);
73699
73700+ pax_track_stack();
73701+
73702 if (!capable(CAP_NET_ADMIN))
73703 return -EPERM;
73704
73705@@ -2508,7 +2510,7 @@ __ip_vs_get_dest_entries(struct net *net
73706
73707 entry.addr = dest->addr.ip;
73708 entry.port = dest->port;
73709- entry.conn_flags = atomic_read(&dest->conn_flags);
73710+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
73711 entry.weight = atomic_read(&dest->weight);
73712 entry.u_threshold = dest->u_threshold;
73713 entry.l_threshold = dest->l_threshold;
73714@@ -3041,7 +3043,7 @@ static int ip_vs_genl_fill_dest(struct s
73715 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
73716
73717 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
73718- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73719+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73720 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
73721 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
73722 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
73723diff -urNp linux-3.1.1/net/netfilter/ipvs/ip_vs_sync.c linux-3.1.1/net/netfilter/ipvs/ip_vs_sync.c
73724--- linux-3.1.1/net/netfilter/ipvs/ip_vs_sync.c 2011-11-11 15:19:27.000000000 -0500
73725+++ linux-3.1.1/net/netfilter/ipvs/ip_vs_sync.c 2011-11-16 18:39:08.000000000 -0500
73726@@ -649,7 +649,7 @@ control:
73727 * i.e only increment in_pkts for Templates.
73728 */
73729 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
73730- int pkts = atomic_add_return(1, &cp->in_pkts);
73731+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73732
73733 if (pkts % sysctl_sync_period(ipvs) != 1)
73734 return;
73735@@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *
73736
73737 if (opt)
73738 memcpy(&cp->in_seq, opt, sizeof(*opt));
73739- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
73740+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
73741 cp->state = state;
73742 cp->old_state = cp->state;
73743 /*
73744diff -urNp linux-3.1.1/net/netfilter/ipvs/ip_vs_xmit.c linux-3.1.1/net/netfilter/ipvs/ip_vs_xmit.c
73745--- linux-3.1.1/net/netfilter/ipvs/ip_vs_xmit.c 2011-11-11 15:19:27.000000000 -0500
73746+++ linux-3.1.1/net/netfilter/ipvs/ip_vs_xmit.c 2011-11-16 18:39:08.000000000 -0500
73747@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
73748 else
73749 rc = NF_ACCEPT;
73750 /* do not touch skb anymore */
73751- atomic_inc(&cp->in_pkts);
73752+ atomic_inc_unchecked(&cp->in_pkts);
73753 goto out;
73754 }
73755
73756@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
73757 else
73758 rc = NF_ACCEPT;
73759 /* do not touch skb anymore */
73760- atomic_inc(&cp->in_pkts);
73761+ atomic_inc_unchecked(&cp->in_pkts);
73762 goto out;
73763 }
73764
73765diff -urNp linux-3.1.1/net/netfilter/Kconfig linux-3.1.1/net/netfilter/Kconfig
73766--- linux-3.1.1/net/netfilter/Kconfig 2011-11-11 15:19:27.000000000 -0500
73767+++ linux-3.1.1/net/netfilter/Kconfig 2011-11-16 18:40:44.000000000 -0500
73768@@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
73769
73770 To compile it as a module, choose M here. If unsure, say N.
73771
73772+config NETFILTER_XT_MATCH_GRADM
73773+ tristate '"gradm" match support'
73774+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
73775+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
73776+ ---help---
73777+ The gradm match allows to match on grsecurity RBAC being enabled.
73778+ It is useful when iptables rules are applied early on bootup to
73779+ prevent connections to the machine (except from a trusted host)
73780+ while the RBAC system is disabled.
73781+
73782 config NETFILTER_XT_MATCH_HASHLIMIT
73783 tristate '"hashlimit" match support'
73784 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
73785diff -urNp linux-3.1.1/net/netfilter/Makefile linux-3.1.1/net/netfilter/Makefile
73786--- linux-3.1.1/net/netfilter/Makefile 2011-11-11 15:19:27.000000000 -0500
73787+++ linux-3.1.1/net/netfilter/Makefile 2011-11-16 18:40:44.000000000 -0500
73788@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
73789 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
73790 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
73791 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
73792+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
73793 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
73794 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
73795 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
73796diff -urNp linux-3.1.1/net/netfilter/nfnetlink_log.c linux-3.1.1/net/netfilter/nfnetlink_log.c
73797--- linux-3.1.1/net/netfilter/nfnetlink_log.c 2011-11-11 15:19:27.000000000 -0500
73798+++ linux-3.1.1/net/netfilter/nfnetlink_log.c 2011-11-16 18:39:08.000000000 -0500
73799@@ -70,7 +70,7 @@ struct nfulnl_instance {
73800 };
73801
73802 static DEFINE_SPINLOCK(instances_lock);
73803-static atomic_t global_seq;
73804+static atomic_unchecked_t global_seq;
73805
73806 #define INSTANCE_BUCKETS 16
73807 static struct hlist_head instance_table[INSTANCE_BUCKETS];
73808@@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_ins
73809 /* global sequence number */
73810 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
73811 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
73812- htonl(atomic_inc_return(&global_seq)));
73813+ htonl(atomic_inc_return_unchecked(&global_seq)));
73814
73815 if (data_len) {
73816 struct nlattr *nla;
73817diff -urNp linux-3.1.1/net/netfilter/xt_gradm.c linux-3.1.1/net/netfilter/xt_gradm.c
73818--- linux-3.1.1/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
73819+++ linux-3.1.1/net/netfilter/xt_gradm.c 2011-11-16 18:40:44.000000000 -0500
73820@@ -0,0 +1,51 @@
73821+/*
73822+ * gradm match for netfilter
73823